diff --git a/CMakeLists.txt b/CMakeLists.txt index 4711de3..ab56630 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,12 +49,19 @@ if(NOT SKBUILD) add_library(MultiNEAT SHARED ${SOURCE_FILES}) + # target_include_directories( + # MultiNEAT + # PRIVATE + # PUBLIC $ + # $ + # cereal/include) + target_include_directories( - MultiNEAT - PRIVATE - PUBLIC $ - $ - cereal/include) + MultiNEAT + PRIVATE + PUBLIC $ + $ + $) # Not doing this anymore because not all package managers have it. # target_link_libraries(MultiNEAT PUBLIC cereal::cereal) diff --git a/cereal b/cereal index ebef1e9..a56bad8 160000 --- a/cereal +++ b/cereal @@ -1 +1 @@ -Subproject commit ebef1e929807629befafbb2918ea1a08c7194554 +Subproject commit a56bad8bbb770ee266e930c95d37fff2a5be7fea diff --git a/src/Genome.cpp b/src/Genome.cpp index bd17ead..47c73c9 100644 --- a/src/Genome.cpp +++ b/src/Genome.cpp @@ -291,7 +291,7 @@ namespace NEAT m_ID = a_ID; int t_innovnum = 1, t_nnum = 1; - + // override seed_type if 0 hidden units are specified if ((a_SeedType == 1) && (a_NumHidden == 0)) { @@ -487,7 +487,7 @@ namespace NEAT // Start very minimally - connect a random input to each output // Also connect the bias to every output for (unsigned int i = 0; i < a_NumOutputs; i++) - { + { int t_inp_id = t_RNG.RandInt(1, a_NumInputs - 1); int t_bias_id = a_NumInputs; int t_outp_id = a_NumInputs + 1 + i; @@ -807,7 +807,7 @@ namespace NEAT bool Genome::HasLoops() { NeuralNetwork net; - BuildPhenotype(net); + BuildCPPN(net); // convert the net to a Boost::Graph object Graph graph(NumNeurons()); @@ -837,7 +837,7 @@ namespace NEAT // This builds a fastnetwork structure out from the genome - void Genome::BuildPhenotype(NeuralNetwork &a_Net) const + void Genome::BuildCPPN(NeuralNetwork &a_Net) const { // first clear out the network a_Net.Clear(); @@ -989,7 +989,7 @@ namespace NEAT // Begin querying the CPPN // Create the neural network that will represent the CPPN NeuralNetwork t_temp_phenotype(true, rng); - BuildPhenotype(t_temp_phenotype); + BuildCPPN(t_temp_phenotype); t_temp_phenotype.Flush(); // To ensure network relaxation @@ -3669,7 +3669,7 @@ namespace NEAT NeuralNetwork t_temp_phenotype(true, rng); - BuildPhenotype(t_temp_phenotype); + BuildCPPN(t_temp_phenotype); // Find Inputs to Hidden connections. for (unsigned int i = 0; i < input_count; i++) diff --git a/src/Genome.h b/src/Genome.h index 93559b1..6f67417 100644 --- a/src/Genome.h +++ b/src/Genome.h @@ -294,7 +294,7 @@ namespace NEAT void SetOffspringAmount(double a_oa); // This builds a fastnetwork structure out from the genome - void BuildPhenotype(NeuralNetwork &net) const; + void BuildCPPN(NeuralNetwork &net) const; // Projects the phenotype's weights back to the genome void DerivePhenotypicChanges(NeuralNetwork &a_Net); diff --git a/src/NeuralNetwork.cpp b/src/NeuralNetwork.cpp index 1d45125..9ef70b0 100644 --- a/src/NeuralNetwork.cpp +++ b/src/NeuralNetwork.cpp @@ -263,10 +263,11 @@ NeuralNetwork::NeuralNetwork() // clean up other neuron data as well for (unsigned int i = 0; i < m_neurons.size(); i++) { - m_neurons[i].m_a = 1; + m_neurons[i].m_a = 0.5; // default 1 m_neurons[i].m_b = 0; - m_neurons[i].m_timeconst = m_neurons[i].m_bias = - m_neurons[i].m_membrane_potential = 0; + m_neurons[i].m_timeconst = 0; + m_neurons[i].m_bias = 0; + m_neurons[i].m_membrane_potential = 0; } Clear(); } @@ -339,15 +340,15 @@ void NeuralNetwork::Activate() // this will happen. for (unsigned int i = 0; i < m_connections.size(); i++) { - m_neurons[m_connections[i].m_target_neuron_idx].m_activesum += - m_connections[i].m_signal; + m_neurons[m_connections[i].m_target_neuron_idx].m_activesum += m_connections[i].m_signal; } // Now loop nodes_activesums, pass the signals through the activation function // and store the result back to nodes_activations // also skip inputs since they do not get an activation - for (unsigned int i = m_num_inputs; i < m_neurons.size(); i++) + for (unsigned int i = m_num_inputs; i < m_neurons.size(); i++) { double x = m_neurons[i].m_activesum; + // std::cout << "active sum, i.e x = " << x << std::endl; m_neurons[i].m_activesum = 0; // Apply the activation function double y = 0.0; diff --git a/src/python/Genome.cpp b/src/python/Genome.cpp index 3bd5ab8..4913b62 100644 --- a/src/python/Genome.cpp +++ b/src/python/Genome.cpp @@ -43,7 +43,7 @@ void export_Genome(pybind11::module_& mod) { .def("PrintAllTraits", &Genome::PrintAllTraits) - .def("BuildPhenotype", &Genome::BuildPhenotype) + .def("BuildCPPN", &Genome::BuildCPPN) .def("BuildHyperNEATPhenotype", &Genome::BuildHyperNEATPhenotype) .def("BuildESHyperNEATPhenotype", &Genome::BuildESHyperNEATPhenotype) diff --git a/test_multineat.py b/test_multineat.py new file mode 100644 index 0000000..bb801c5 --- /dev/null +++ b/test_multineat.py @@ -0,0 +1,5 @@ +import multineat + +genotype = multineat.Genome() + +print(genotype) \ No newline at end of file diff --git a/tests/python/test_raw_genome.py b/tests/python/test_raw_genome.py index 0ca9cf1..a486861 100755 --- a/tests/python/test_raw_genome.py +++ b/tests/python/test_raw_genome.py @@ -65,7 +65,7 @@ def parameters(): return params def activate_network(self, genome, _input=None): - genome.BuildPhenotype(self._net) + genome.BuildCPPN(self._net) _input = np.array([1, 2, 3], dtype=float) if _input is None else _input self._net.Input(_input) self._net.ActivateAllLayers() diff --git a/tests/python/test_xor.py b/tests/python/test_xor.py index b2020b4..e268122 100755 --- a/tests/python/test_xor.py +++ b/tests/python/test_xor.py @@ -14,7 +14,7 @@ class XorTest(unittest.TestCase): def test_xor(self): def evaluate(genome): net = NEAT.NeuralNetwork() - genome.BuildPhenotype(net) + genome.BuildCPPN(net) error = 0 diff --git a/tests/xor/main.cpp b/tests/xor/main.cpp index e59499d..995478b 100644 --- a/tests/xor/main.cpp +++ b/tests/xor/main.cpp @@ -29,7 +29,7 @@ double xortest(Genome& g) double f = 0; NeuralNetwork net; - g.BuildPhenotype(net); + g.BuildCPPN(net); static const std::vector< std::vector< double > > inputs { {0.0,0.0,1.0},