diff --git a/Cargo.toml b/Cargo.toml index a03470af..2528fe2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mwpf" -version = "0.0.4" +version = "0.1.1" authors = ["Yue Wu "] edition = "2021" license = "MIT" @@ -36,16 +36,21 @@ debug = true [features] # to enable a feature, use `--features xxx` -default = ["cli"] +default = ["f64_weight", "cli", "qecp_integrate", "slp", "pq"] +old_default = ["cli", "slp"] cli = ["pbr"] -r64_weight = [] # use Rational64 instead of BigRational as weight type -f64_weight = [] # use f64 instead of BigRational as weight type -u32_index = [] # use u32 instead of usize as index type, to save memory -python_binding = ["pyo3"] # bind to Python -wasm_binding = ["wasm-bindgen"] # bind to wasm -colorful = [] # enable colorful output in case terminal exists +r64_weight = [] # use Rational64 instead of BigRational as weight type +f64_weight = ["float_lp"] # use f64 instead of BigRational as weight type +f32_weight = ["float_lp"] # use f64 instead of BigRational as weight type +u32_index = [] # use u32 instead of usize as index type, to save memory +python_binding = ["pyo3"] # bind to Python +wasm_binding = ["wasm-bindgen"] # bind to wasm +colorful = [] # enable colorful output in case terminal exists qecp_integrate = ["qecp"] float_lp = ["highs"] +incr_lp = [] +pq = [] # use edge/vertex definitions in dual_module_pq +non-pq = [] # use edge/vertex definitions in dual_module_serial [dependencies] pyo3 = { version = "0.19.2", features = [ @@ -67,25 +72,34 @@ parking_lot = { version = "0.12.1", features = ["hardware-lock-elision"] } num-rational = "0.4.1" num-traits = "0.2.15" more-asserts = "0.3.1" -rand = "0.8.5" +rand = { version = "0.8.5", features = ["small_rng"] } prettytable-rs = "0.10.0" itertools = "0.11.0" cfg-if = "1.0.0" getrandom = { version = "0.2", features = ["js"] } wasm-bindgen = { version = "0.2.86", optional = true } lazy_static = "1.4.0" -slp = "0.1.11" -highs = { version = "1.6.1", optional = true } +slp = { path = "src/slp", optional = true } +highs = { path = "src/highs", optional = true } sugar = "0.2.0" maplit = "1.0.2" -qecp = { version = "0.2.4", optional = true, default-features = false, features = [ +qecp = { version = "0.2.5", optional = true, default-features = false, features = [ "fusion_blossom", ] } serde_variant = "0.1.3" +priority-queue = "2.0.3" +heapz = { path = "src/heapz" } +hashbrown = "0.14.5" +pheap = { path = "src/pheap" } +rayon = "1.7.0" +weak-table = "0.3.2" +petgraph = { version = "0.6.0", features = ["serde-1"] } +core_affinity = "0.8.0" +color-print = "0.3.6" [dev-dependencies] test-case = "3.1.0" [package.metadata.docs.rs] -rustdoc-args = ["--html-in-header", "katex-header.html"] # to run locally: `RUSTDOCFLAGS="--html-in-header katex-header.html" cargo doc --no-deps` +rustdoc-args = ["--html-in-header", "katex-header.html"] \ No newline at end of file diff --git a/flamegraph.svg b/flamegraph.svg new file mode 100644 index 00000000..2b086fe5 --- /dev/null +++ b/flamegraph.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch libdyld.dylib`dyld4::LibSystemHelpers::getenv (1 samples, 0.01%)libsystem_kernel.dylib`__exit (1 samples, 0.01%)libsystem_malloc.dylib`free_medium (1 samples, 0.01%)libsystem_kernel.dylib`madvise (1 samples, 0.01%)libsystem_malloc.dylib`DYLD-STUB$$_platform_bzero (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (2 samples, 0.03%)libsystem_malloc.dylib`free (1 samples, 0.01%)libsystem_malloc.dylib`DYLD-STUB$$_platform_bzero (2 samples, 0.03%)libsystem_malloc.dylib`_nanov2_free (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_madvise_block (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_madvise_block_locked (2 samples, 0.03%)libsystem_kernel.dylib`madvise (2 samples, 0.03%)mwpf`<mwpf::mwpf_solver::SolverSerialJointSingleHair as mwpf::mwpf_solver::PrimalDualSolver>::clear (18 samples, 0.26%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::clear (18 samples, 0.26%)mwpf`alloc::sync::Arc<T,A>::drop_slow (18 samples, 0.26%)mwpf`<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (18 samples, 0.26%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (18 samples, 0.26%)mwpf`alloc::sync::Arc<T,A>::drop_slow (15 samples, 0.21%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (13 samples, 0.19%)libsystem_platform.dylib`_platform_memset (2 samples, 0.03%)mwpf`<mwpf::dual_module_pq::DualModulePQ<Queue> as mwpf::dual_module::DualModuleImpl>::compute_maximum_update_length (1 samples, 0.01%)mwpf`<priority_queue::priority_queue::PriorityQueue<mwpf::dual_module_pq::Obstacle,core::cmp::Reverse<T>> as mwpf::dual_module_pq::FutureQueueMethods<T,mwpf::dual_module_pq::Obstacle>>::pop_event (1 samples, 0.01%)mwpf`indexmap::map::core::IndexMapCore<K,V>::swap_remove_index (1 samples, 0.01%)mwpf`<mwpf::plugin_single_hair::PluginSingleHair as mwpf::plugin::PluginImpl>::find_relaxers (1 samples, 0.01%)mwpf`mwpf::matrix::hair::HairView<M>::new (1 samples, 0.01%)mwpf`mwpf::matrix::echelon::Echelon<M>::force_update_echelon_info (1 samples, 0.01%)mwpf`mwpf::plugin::PluginManager::find_relaxer (2 samples, 0.03%)mwpf`mwpf::plugin::PluginEntry::execute (2 samples, 0.03%)mwpf`<mwpf::plugin_union_find::PluginUnionFind as mwpf::plugin::PluginImpl>::find_relaxers (1 samples, 0.01%)mwpf`mwpf::plugin_union_find::PluginUnionFind::find_single_relaxer (1 samples, 0.01%)mwpf`mwpf::relaxer::Relaxer::new_raw (1 samples, 0.01%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_cluster_tune (3 samples, 0.04%)mwpf`mwpf::relaxer_optimizer::RelaxerOptimizer::optimize (1 samples, 0.01%)mwpf`highs::Model::solve (1 samples, 0.01%)mwpf`Highs::run (1 samples, 0.01%)mwpf`Highs::runPresolve (1 samples, 0.01%)mwpf`PresolveComponent::run (1 samples, 0.01%)mwpf`presolve::HPresolve::run (1 samples, 0.01%)mwpf`presolve::HPresolve::presolve (1 samples, 0.01%)mwpf`presolve::HPresolve::initialRowAndColPresolve (1 samples, 0.01%)mwpf`presolve::HPresolve::updateRowDualImpliedBounds (1 samples, 0.01%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`<mwpf::dual_module_pq::DualModulePQ<Queue> as mwpf::dual_module::DualModuleImpl>::get_edge_nodes (3 samples, 0.04%)mwpf`<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (3 samples, 0.04%)libsystem_platform.dylib`_platform_memmove (8 samples, 0.11%)libsystem_malloc.dylib`free_medium (1 samples, 0.01%)libsystem_kernel.dylib`madvise (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)mwpf`<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (1 samples, 0.01%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (12 samples, 0.17%)mwpf`<std::hash::random::DefaultHasher as core::hash::Hasher>::write (10 samples, 0.14%)mwpf`core::hash::BuildHasher::hash_one (20 samples, 0.28%)libsystem_malloc.dylib`free_medium (11 samples, 0.16%)libsystem_kernel.dylib`madvise (11 samples, 0.16%)mwpf`<std::hash::random::DefaultHasher as core::hash::Hasher>::write (30 samples, 0.43%)mwpf`<itertools::unique_impl::Unique<I> as core::iter::traits::iterator::Iterator>::next (216 samples, 3.07%)mwp..mwpf`hashbrown::raw::RawTable<T,A>::reserve_rehash (99 samples, 1.41%)mwpf`core::hash::BuildHasher::hash_one (21 samples, 0.30%)mwpf`<mwpf::dual_module_pq::DualModulePQ<Queue> as mwpf::dual_module::DualModuleImpl>::get_edge_slack_tune (13 samples, 0.19%)mwpf`<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (247 samples, 3.52%)mwp..mwpf`alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (8 samples, 0.11%)mwpf`alloc::raw_vec::finish_grow (8 samples, 0.11%)libsystem_malloc.dylib`realloc (8 samples, 0.11%)libsystem_malloc.dylib`malloc_zone_realloc (8 samples, 0.11%)libsystem_platform.dylib`_platform_memmove (8 samples, 0.11%)mwpf`<itertools::unique_impl::Unique<I> as core::iter::traits::iterator::Iterator>::next (9 samples, 0.13%)mwpf`<mwpf::dual_module_pq::DualModulePQ<Queue> as mwpf::dual_module::DualModuleImpl>::get_edge_slack_tune (1 samples, 0.01%)mwpf`<alloc::collections::btree::dedup_sorted_iter::DedupSortedIter<K,V,I> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.21%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.21%)mwpf`alloc::collections::btree::append::_<impl alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::bulk_push (31 samples, 0.44%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.23%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (86 samples, 1.22%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (60 samples, 0.85%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (10 samples, 0.14%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V> as core::iter::traits::collect::FromIterator<(K,V)>>::from_iter (560 samples, 7.97%)mwpf`<alloc..mwpf`core::slice::sort::merge_sort (264 samples, 3.76%)mwpf..mwpf`core::slice::sort::insertion_sort_shift_left (56 samples, 0.80%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (45 samples, 0.64%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (38 samples, 0.54%)mwpf`<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (1 samples, 0.01%)mwpf`alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (1 samples, 0.01%)mwpf`alloc::raw_vec::finish_grow (1 samples, 0.01%)libsystem_malloc.dylib`realloc (1 samples, 0.01%)libsystem_malloc.dylib`malloc_zone_realloc (1 samples, 0.01%)libsystem_malloc.dylib`szone_realloc (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`small_malloc_should_clear (1 samples, 0.01%)mwpf`<mwpf::matrix::tight::Tight<M> as mwpf::matrix::interface::MatrixTight>::update_edge_tightness (2 samples, 0.03%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::cmp::PartialEq>::eq (5 samples, 0.07%)mwpf`<mwpf::dual_module_pq::DualModulePQ<Queue> as mwpf::dual_module::DualModuleImpl>::add_dual_node_tune (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::cmp::PartialEq>::eq (13 samples, 0.19%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (10 samples, 0.14%)mwpf`hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.24%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (4 samples, 0.06%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::cmp::PartialEq>::eq (321 samples, 4.57%)mwpf`..mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (287 samples, 4.09%)mwpf..mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (44 samples, 0.63%)mwpf`mwpf::dual_module::_<impl mwpf::pointers::ArcRwLock<mwpf::dual_module::DualModuleInterface>>::find_or_create_node_tune (391 samples, 5.57%)mwpf`mw..mwpf`mwpf::dual_module::_<impl mwpf::pointers::ArcRwLock<mwpf::dual_module::DualModuleInterface>>::find_node (367 samples, 5.22%)mwpf`m..mwpf`<std::hash::random::DefaultHasher as core::hash::Hasher>::write (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (3 samples, 0.04%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (3 samples, 0.04%)libsystem_malloc.dylib`_nanov2_free (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (9 samples, 0.13%)mwpf`alloc::sync::Arc<T,A>::drop_slow (6 samples, 0.09%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (6 samples, 0.09%)libsystem_malloc.dylib`nanov2_madvise_block (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_madvise_block_locked (1 samples, 0.01%)libsystem_kernel.dylib`madvise (1 samples, 0.01%)mwpf`<alloc::collections::btree::dedup_sorted_iter::DedupSortedIter<K,V,I> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.09%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (4 samples, 0.06%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::bulk_build_from_sorted_iter (8 samples, 0.11%)mwpf`alloc::collections::btree::append::_<impl alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::bulk_push (8 samples, 0.11%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (11 samples, 0.16%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (16 samples, 0.23%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (13 samples, 0.19%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (3 samples, 0.04%)mwpf`<alloc::collections::btree::set::BTreeSet<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (60 samples, 0.85%)mwpf`core::slice::sort::merge_sort (52 samples, 0.74%)mwpf`core::slice::sort::insertion_sort_shift_left (25 samples, 0.36%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (22 samples, 0.31%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.26%)mwpf`<mwpf::matrix::tight::Tight<M> as mwpf::matrix::interface::MatrixTight>::update_edge_tightness (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_madvise_block (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_madvise_block_locked (1 samples, 0.01%)libsystem_kernel.dylib`madvise (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (4 samples, 0.06%)mwpf`alloc::sync::Arc<T,A>::drop_slow (4 samples, 0.06%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (4 samples, 0.06%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`core::ptr::drop_in_place<mwpf::relaxer_forest::RelaxerForest> (6 samples, 0.09%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (6 samples, 0.09%)mwpf`alloc::sync::Arc<T,A>::drop_slow (5 samples, 0.07%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (1 samples, 0.01%)mwpf`mwpf::matrix::echelon::Echelon<M>::force_update_echelon_info (18 samples, 0.26%)mwpf`<mwpf::matrix::tail::Tail<M> as mwpf::matrix::interface::MatrixView>::columns (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)mwpf`<mwpf::matrix::tight::Tight<M> as mwpf::matrix::interface::MatrixTight>::update_edge_tightness (9 samples, 0.13%)libsystem_malloc.dylib`free_small (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (3 samples, 0.04%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::clone::Clone>::clone::clone_subtree (5 samples, 0.07%)libsystem_malloc.dylib`nanov2_allocate_outlined (3 samples, 0.04%)libsystem_malloc.dylib`nanov2_find_block_and_allocate (3 samples, 0.04%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::clone::Clone>::clone::clone_subtree (13 samples, 0.19%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::clone::Clone>::clone::clone_subtree (12 samples, 0.17%)mwpf`__rdl_alloc (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (3 samples, 0.04%)libsystem_malloc.dylib`free (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (5 samples, 0.07%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (34 samples, 0.48%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (22 samples, 0.31%)libsystem_platform.dylib`_platform_memset (2 samples, 0.03%)mwpf`alloc::collections::btree::append::_<impl alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::bulk_push (4 samples, 0.06%)mwpf`<alloc::collections::btree::dedup_sorted_iter::DedupSortedIter<K,V,I> as core::iter::traits::iterator::Iterator>::next (4 samples, 0.06%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (3 samples, 0.04%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V> as core::convert::From<[(K,V) (15 samples, 0.21%) N]>>::from (15 samples, 0.21%)mwpf`core::slice::sort::insertion_sort_shift_left (11 samples, 0.16%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (10 samples, 0.14%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (5 samples, 0.07%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (2 samples, 0.03%)mwpf`<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (5 samples, 0.07%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`small_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`small_malloc_from_free_list (1 samples, 0.01%)libsystem_malloc.dylib`small_free_list_add_ptr (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (2 samples, 0.03%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::bulk_build_from_sorted_iter (11 samples, 0.16%)mwpf`alloc::collections::btree::append::_<impl alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::bulk_push (8 samples, 0.11%)mwpf`<alloc::collections::btree::set::BTreeSet<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (87 samples, 1.24%)mwpf`core::slice::sort::merge_sort (69 samples, 0.98%)mwpf`core::slice::sort::insertion_sort_shift_left (10 samples, 0.14%)mwpf`<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (7 samples, 0.10%)mwpf`__rdl_alloc (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (8 samples, 0.11%)mwpf`__rdl_alloc (1 samples, 0.01%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (11 samples, 0.16%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (2 samples, 0.03%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::insert (62 samples, 0.88%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (12 samples, 0.17%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (9 samples, 0.13%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (14 samples, 0.20%)libsystem_platform.dylib`_platform_memmove (6 samples, 0.09%)mwpf`alloc::collections::btree::remove::_<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::LeafOrInternal>,alloc::collections::btree::node::marker::KV>>::remove_kv_tracking (11 samples, 0.16%)mwpf`alloc::collections::btree::remove::_<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>>::remove_leaf_kv (4 samples, 0.06%)mwpf`alloc::collections::btree::node::BalancingContext<K,V>::bulk_steal_left (1 samples, 0.01%)mwpf`DYLD-STUB$$malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_size (1 samples, 0.01%)mwpf`alloc::raw_vec::RawVec<T,A>::reserve_for_push (3 samples, 0.04%)mwpf`alloc::raw_vec::finish_grow (2 samples, 0.03%)libsystem_malloc.dylib`realloc (1 samples, 0.01%)libsystem_malloc.dylib`malloc_zone_realloc (1 samples, 0.01%)libsystem_malloc.dylib`szone_realloc (1 samples, 0.01%)libsystem_malloc.dylib`tiny_free_no_lock (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (4 samples, 0.06%)libsystem_malloc.dylib`free (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (16 samples, 0.23%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_allocate_outlined (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (6 samples, 0.09%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::clone::Clone>::clone::clone_subtree (30 samples, 0.43%)libsystem_malloc.dylib`nanov2_allocate_outlined (15 samples, 0.21%)libsystem_malloc.dylib`nanov2_find_block_and_allocate (7 samples, 0.10%)libsystem_malloc.dylib`nanov2_allocate_from_block (1 samples, 0.01%)mwpf`DYLD-STUB$$malloc (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::clone::Clone>::clone::clone_subtree (51 samples, 0.73%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::clone::Clone>::clone::clone_subtree (49 samples, 0.70%)mwpf`__rdl_alloc (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (3 samples, 0.04%)libsystem_malloc.dylib`_nanov2_free (4 samples, 0.06%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (15 samples, 0.21%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`mwpf::invalid_subgraph::InvalidSubgraph::new_complete (588 samples, 8.37%)mwpf`mwpf::i..mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (13 samples, 0.19%)libsystem_malloc.dylib`nanov2_allocate_outlined (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_find_block_and_allocate (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_allocate_from_block (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)mwpf`<mwpf::matrix::tail::Tail<M> as mwpf::matrix::interface::MatrixView>::columns (54 samples, 0.77%)mwpf`mwpf::matrix::echelon::Echelon<M>::force_update_echelon_info (1,244 samples, 17.71%)mwpf`mwpf::matrix::echelon::..mwpf`mwpf::matrix::row::ParityRow::xor_two_rows (6 samples, 0.09%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (4 samples, 0.06%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (3 samples, 0.04%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (6 samples, 0.09%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (1 samples, 0.01%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::insert (41 samples, 0.58%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (2 samples, 0.03%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (3 samples, 0.04%)mwpf`mwpf::matrix::interface::MatrixTail::set_tail_edges (59 samples, 0.84%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (3 samples, 0.04%)mwpf`mwpf::matrix::hair::HairView<M>::new (1,325 samples, 18.86%)mwpf`mwpf::matrix::hair::Hair..mwpf`mwpf::matrix::row::ParityRow::xor_two_rows (3 samples, 0.04%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (2 samples, 0.03%)libsystem_malloc.dylib`free (2 samples, 0.03%)libsystem_malloc.dylib`free_tiny (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (3 samples, 0.04%)libsystem_platform.dylib`_platform_memmove (8 samples, 0.11%)mwpf`__rdl_alloc (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (4 samples, 0.06%)libsystem_malloc.dylib`_szone_free (1 samples, 0.01%)libsystem_malloc.dylib`free_tiny (1 samples, 0.01%)libsystem_malloc.dylib`tiny_free_no_lock (1 samples, 0.01%)libsystem_malloc.dylib`tiny_free_list_add_ptr (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.27%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (2 samples, 0.03%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`__rdl_alloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_allocate_outlined (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_find_block_and_allocate (2 samples, 0.03%)libsystem_malloc.dylib`set_tiny_meta_header_in_use (1 samples, 0.01%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (13 samples, 0.19%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (8 samples, 0.11%)libsystem_malloc.dylib`szone_malloc_should_clear (4 samples, 0.06%)libsystem_malloc.dylib`tiny_malloc_should_clear (3 samples, 0.04%)libsystem_malloc.dylib`tiny_malloc_from_free_list (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_allocate_outlined (3 samples, 0.04%)libsystem_malloc.dylib`nanov2_find_block_and_allocate (2 samples, 0.03%)mwpf`<mwpf::plugin_single_hair::PluginSingleHair as mwpf::plugin::PluginImpl>::find_relaxers (2,692 samples, 38.32%)mwpf`<mwpf::plugin_single_hair::PluginSingleHair as mwpf::plug..mwpf`mwpf::relaxer::Relaxer::new_raw (438 samples, 6.23%)mwpf`mwp..mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (14 samples, 0.20%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`tiny_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`tiny_malloc_from_free_list (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (1 samples, 0.01%)libsystem_platform.dylib`__bzero (1 samples, 0.01%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (1 samples, 0.01%)libsystem_malloc.dylib`free (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`mwpf::invalid_subgraph::InvalidSubgraph::new_complete_ptr (10 samples, 0.14%)mwpf`mwpf::invalid_subgraph::InvalidSubgraph::new_complete (9 samples, 0.13%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (1 samples, 0.01%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`mwpf::matrix::echelon::Echelon<M>::force_update_echelon_info (5 samples, 0.07%)mwpf`<mwpf::matrix::tail::Tail<M> as mwpf::matrix::interface::MatrixView>::columns (3 samples, 0.04%)mwpf`DYLD-STUB$$memcpy (1 samples, 0.01%)mwpf`<mwpf::plugin_union_find::PluginUnionFind as mwpf::plugin::PluginImpl>::find_relaxers (22 samples, 0.31%)mwpf`mwpf::plugin_union_find::PluginUnionFind::find_single_relaxer (22 samples, 0.31%)mwpf`mwpf::relaxer::Relaxer::new_raw (6 samples, 0.09%)mwpf`__rdl_alloc (1 samples, 0.01%)mwpf`alloc::collections::btree::remove::_<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::LeafOrInternal>,alloc::collections::btree::node::marker::KV>>::remove_kv_tracking (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (2 samples, 0.03%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::insert (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (3 samples, 0.04%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (5 samples, 0.07%)libsystem_malloc.dylib`free (1 samples, 0.01%)mwpf`DYLD-STUB$$free (1 samples, 0.01%)libsystem_malloc.dylib`DYLD-STUB$$_platform_bzero (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_madvise_block (10 samples, 0.14%)libsystem_malloc.dylib`nanov2_madvise_block_locked (10 samples, 0.14%)libsystem_kernel.dylib`madvise (10 samples, 0.14%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (40 samples, 0.57%)mwpf`alloc::sync::Arc<T,A>::drop_slow (35 samples, 0.50%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (27 samples, 0.38%)libsystem_platform.dylib`_platform_memset (2 samples, 0.03%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`mwpf::relaxer_forest::RelaxerForest::add (64 samples, 0.91%)mwpf`alloc::sync::Arc<T,A>::drop_slow (44 samples, 0.63%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (4 samples, 0.06%)libsystem_malloc.dylib`free_tiny (2 samples, 0.03%)libsystem_malloc.dylib`tiny_free_no_lock (2 samples, 0.03%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::cmp::PartialEq>::eq (13 samples, 0.19%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (8 samples, 0.11%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (9 samples, 0.13%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (27 samples, 0.38%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.30%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (5 samples, 0.07%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (20 samples, 0.28%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.26%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::insert (27 samples, 0.38%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (2 samples, 0.03%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (1 samples, 0.01%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (3 samples, 0.04%)mwpf`core::iter::traits::iterator::Iterator::cmp_by (27 samples, 0.38%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (24 samples, 0.34%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (20 samples, 0.28%)mwpf`mwpf::relaxer::Relaxer::new_raw (19 samples, 0.27%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.09%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::cmp::PartialEq>::eq (9 samples, 0.13%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (8 samples, 0.11%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (2 samples, 0.03%)mwpf`<mwpf::relaxer::Relaxer as core::cmp::Ord>::cmp (44 samples, 0.63%)mwpf`core::iter::traits::iterator::Iterator::cmp_by (34 samples, 0.48%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (32 samples, 0.46%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (27 samples, 0.38%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::cmp::PartialEq>::eq (9 samples, 0.13%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (8 samples, 0.11%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (6 samples, 0.09%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::insert (40 samples, 0.57%)mwpf`<mwpf::relaxer::Relaxer as core::cmp::Ord>::cmp (40 samples, 0.57%)mwpf`core::iter::traits::iterator::Iterator::cmp_by (30 samples, 0.43%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (24 samples, 0.34%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.30%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (2 samples, 0.03%)mwpf`__rdl_alloc (1 samples, 0.01%)mwpf`alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (5 samples, 0.07%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (4 samples, 0.06%)libsystem_malloc.dylib`nanov2_allocate_outlined (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_find_block_and_allocate (1 samples, 0.01%)mwpf`mwpf::plugin::PluginManager::find_relaxer (3,195 samples, 45.48%)mwpf`mwpf::plugin::PluginManager::find_relaxermwpf`mwpf::plugin::PluginEntry::execute (3,101 samples, 44.14%)mwpf`mwpf::plugin::PluginEntry::executemwpf`mwpf::relaxer_forest::RelaxerForest::expand (312 samples, 4.44%)mwpf`..mwpf`mwpf::relaxer_forest::RelaxerForest::compute_expanded (312 samples, 4.44%)mwpf`..mwpf`mwpf::relaxer_forest::RelaxerForest::compute_expanded (182 samples, 2.59%)mw..mwpf`mwpf::relaxer::Relaxer::new_raw (81 samples, 1.15%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (2 samples, 0.03%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libsystem_malloc.dylib`free (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (3 samples, 0.04%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::BTreeMap<K,V> as core::iter::traits::collect::FromIterator<(K,V)>>::from_iter (1 samples, 0.01%)mwpf`alloc::collections::btree::append::_<impl alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::bulk_push (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`tiny_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`tiny_malloc_from_free_list (1 samples, 0.01%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.21%)mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (63 samples, 0.90%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.77%)libsystem_malloc.dylib`free_medium (4 samples, 0.06%)libsystem_kernel.dylib`madvise (4 samples, 0.06%)mwpf`HEkk::~HEkk (29 samples, 0.41%)mwpf`HSimplexNla::~HSimplexNla (25 samples, 0.36%)mwpf`HFactor::~HFactor (25 samples, 0.36%)libsystem_malloc.dylib`free_medium (25 samples, 0.36%)libsystem_kernel.dylib`madvise (25 samples, 0.36%)mwpf`Highs::~Highs (32 samples, 0.46%)mwpf`Highs::~Highs (32 samples, 0.46%)mwpf`HighsLp::~HighsLp (3 samples, 0.04%)libsystem_malloc.dylib`free_medium (3 samples, 0.04%)libsystem_kernel.dylib`madvise (3 samples, 0.04%)mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.31%)mwpf`alloc::collections::btree::map::BTreeMap<K,V,A>::insert (195 samples, 2.78%)mw..mwpf`<mwpf::invalid_subgraph::InvalidSubgraph as core::cmp::Ord>::cmp (172 samples, 2.45%)mw..mwpf`<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (148 samples, 2.11%)m..libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (4 samples, 0.06%)libsystem_malloc.dylib`nanov2_realloc (6 samples, 0.09%)libsystem_malloc.dylib`nanov2_allocate_outlined (2 samples, 0.03%)libsystem_malloc.dylib`_szone_free (1 samples, 0.01%)libsystem_malloc.dylib`free_small (1 samples, 0.01%)libsystem_malloc.dylib`small_free_list_add_ptr (1 samples, 0.01%)libsystem_malloc.dylib`free_tiny (1 samples, 0.01%)libsystem_malloc.dylib`tiny_free_no_lock (1 samples, 0.01%)libsystem_malloc.dylib`small_try_realloc_in_place (2 samples, 0.03%)libsystem_malloc.dylib`szone_malloc_should_clear (3 samples, 0.04%)libsystem_malloc.dylib`small_malloc_should_clear (3 samples, 0.04%)libsystem_malloc.dylib`small_malloc_from_free_list (3 samples, 0.04%)libsystem_malloc.dylib`small_free_list_add_ptr (1 samples, 0.01%)libsystem_malloc.dylib`szone_size (1 samples, 0.01%)libsystem_malloc.dylib`tiny_size (1 samples, 0.01%)libsystem_malloc.dylib`szone_realloc (10 samples, 0.14%)libsystem_malloc.dylib`tiny_free_no_lock (1 samples, 0.01%)libsystem_malloc.dylib`malloc_zone_realloc (24 samples, 0.34%)libsystem_platform.dylib`_platform_memmove (4 samples, 0.06%)libsystem_malloc.dylib`realloc (26 samples, 0.37%)libsystem_malloc.dylib`nanov2_realloc (1 samples, 0.01%)mwpf`alloc::raw_vec::RawVec<T,A>::reserve_for_push (29 samples, 0.41%)mwpf`alloc::raw_vec::finish_grow (27 samples, 0.38%)mwpf`__rdl_realloc (1 samples, 0.01%)mwpf`alloc::raw_vec::finish_grow (3 samples, 0.04%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (2 samples, 0.03%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (1 samples, 0.01%)mwpf`HighsLp::clear (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (1 samples, 0.01%)mwpf`HEkk::addCols (3 samples, 0.04%)mwpf`HEkk::clear (3 samples, 0.04%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)mwpf`HighsSparseMatrix::~HighsSparseMatrix (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libc++abi.dylib`DYLD-STUB$$malloc (1 samples, 0.01%)mwpf`Highs::addColsInterface (10 samples, 0.14%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (2 samples, 0.03%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`HighsHessian::clear (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (1 samples, 0.01%)mwpf`highs::Model::add_col (13 samples, 0.19%)mwpf`Highs::addCol (13 samples, 0.19%)mwpf`Highs::addCols (13 samples, 0.19%)mwpf`PresolveComponentData::clear (2 samples, 0.03%)mwpf`HighsLp::clear (2 samples, 0.03%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libsystem_malloc.dylib`_szone_free (1 samples, 0.01%)libsystem_malloc.dylib`free_small (2 samples, 0.03%)libsystem_malloc.dylib`small_free_list_add_ptr (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (2 samples, 0.03%)mwpf`<(ExtendA,ExtendB) as core::iter::traits::collect::Extend<(A,B)>>::extend (14 samples, 0.20%)mwpf`alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (5 samples, 0.07%)mwpf`alloc::raw_vec::finish_grow (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`small_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`small_malloc_from_free_list (1 samples, 0.01%)libsystem_malloc.dylib`small_free_list_add_ptr (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libsystem_malloc.dylib`free (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (2 samples, 0.03%)libc++abi.dylib`operator new(unsigned long) (3 samples, 0.04%)libsystem_malloc.dylib`szone_malloc_should_clear (2 samples, 0.03%)libsystem_malloc.dylib`tiny_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`tiny_malloc_from_free_list (1 samples, 0.01%)libsystem_malloc.dylib`tiny_free_list_add_ptr (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (4 samples, 0.06%)libsystem_malloc.dylib`free_small (1 samples, 0.01%)libsystem_malloc.dylib`free_tiny (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (2 samples, 0.03%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (1 samples, 0.01%)libc++.1.dylib`DYLD-STUB$$memmove (1 samples, 0.01%)libc++.1.dylib`DYLD-STUB$$strlen (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`HEkk::clearEkkDataInfo (1 samples, 0.01%)mwpf`HSimplexNla::frozenBasisClearAllData (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (2 samples, 0.03%)mwpf`HighsLp::clear (3 samples, 0.04%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)mwpf`HEkk::addRows (11 samples, 0.16%)mwpf`HEkk::clear (10 samples, 0.14%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)libc++abi.dylib`DYLD-STUB$$malloc (1 samples, 0.01%)libc++abi.dylib`operator new(unsigned long) (2 samples, 0.03%)libsystem_malloc.dylib`szone_malloc_should_clear (2 samples, 0.03%)libsystem_malloc.dylib`small_malloc_should_clear (2 samples, 0.03%)libsystem_malloc.dylib`small_malloc_from_free_list (1 samples, 0.01%)libsystem_malloc.dylib`_malloc_zone_malloc (2 samples, 0.03%)mwpf`HighsSparseMatrix::HighsSparseMatrix (9 samples, 0.13%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)libsystem_malloc.dylib`free_medium (5 samples, 0.07%)libsystem_kernel.dylib`madvise (5 samples, 0.07%)libsystem_platform.dylib`_platform_memmove (5 samples, 0.07%)libsystem_platform.dylib`_platform_memset (3 samples, 0.04%)mwpf`HighsSparseMatrix::addRows (17 samples, 0.24%)mwpf`std::__1::vector<double, std::__1::allocator<double>>::__append (1 samples, 0.01%)libc++abi.dylib`operator new(unsigned long) (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`medium_malloc_should_clear (1 samples, 0.01%)libc++abi.dylib`DYLD-STUB$$free (1 samples, 0.01%)libc++abi.dylib`operator new(unsigned long) (3 samples, 0.04%)libsystem_malloc.dylib`szone_malloc_should_clear (3 samples, 0.04%)libsystem_malloc.dylib`tiny_malloc_should_clear (3 samples, 0.04%)libsystem_malloc.dylib`tiny_malloc_from_free_list (3 samples, 0.04%)libsystem_malloc.dylib`tiny_free_list_add_ptr (2 samples, 0.03%)libsystem_malloc.dylib`_malloc_zone_malloc (2 samples, 0.03%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)libsystem_malloc.dylib`free_tiny (2 samples, 0.03%)libsystem_malloc.dylib`tiny_free_no_lock (2 samples, 0.03%)libc++abi.dylib`DYLD-STUB$$operator new(unsigned long) (1 samples, 0.01%)libsystem_malloc.dylib`free_tiny (2 samples, 0.03%)libsystem_malloc.dylib`tiny_free_no_lock (2 samples, 0.03%)libsystem_malloc.dylib`tiny_free_list_add_ptr (1 samples, 0.01%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)libc++abi.dylib`operator new(unsigned long) (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`HighsHashTable<int, void>::growTable (30 samples, 0.43%)mwpf`bool HighsHashTable<int, void>::insert<HighsHashTableEntry<int, void>>(HighsHashTableEntry (21 samples, 0.30%)mwpf`assessMatrix(HighsLogOptions const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>, int, int, bool, std::__1::vector<int, std::__1::allocator<int>>&, std::__1::vector<int, std::__1::allocator<int>>&, std::__1::vector<int, std::__1::allocator<int>>&, std::__1::vector<double, std::__1::allocator (114 samples, 1.62%)mwpf`bool HighsHashTable<int, void>::insert<int> (70 samples, 1.00%)mwpf`bool HighsHashTable<int, void>::insert<HighsHashTableEntry<int, void>>(HighsHashTableEntry (2 samples, 0.03%)mwpf`assessMatrixDimensions(HighsLogOptions const&, int, bool, std::__1::vector<int, std::__1::allocator<int>> const&, std::__1::vector<int, std::__1::allocator<int>> const&, std::__1::vector<int, std::__1::allocator<int>> const&, std::__1::vector<double, std::__1::allocator (1 samples, 0.01%)mwpf`HighsSparseMatrix::assess(HighsLogOptions const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (120 samples, 1.71%)mwpf`bool HighsHashTable<int, void>::insert<int> (4 samples, 0.06%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)mwpf`assessBounds(HighsOptions const&, char const*, int, HighsIndexCollection const&, std::__1::vector<double, std::__1::allocator<double>>&, std::__1::vector<double, std::__1::allocator (1 samples, 0.01%)mwpf`isMatrixDataNull(HighsLogOptions const&, int const*, int const*, double const*) (1 samples, 0.01%)mwpf`std::__1::vector<double, std::__1::allocator<double>>::__append (1 samples, 0.01%)mwpf`Highs::addRowsInterface (175 samples, 2.49%)mw..mwpf`std::__1::vector<int, std::__1::allocator<int>>::__append (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_malloc (1 samples, 0.01%)mwpf`HEkk::debugRetainedDataOk (1 samples, 0.01%)mwpf`Highs::returnFromHighs (2 samples, 0.03%)mwpf`Highs::forceHighsSolutionBasisSize (1 samples, 0.01%)libsystem_malloc.dylib`free_small (1 samples, 0.01%)libsystem_malloc.dylib`small_free_list_remove_ptr (1 samples, 0.01%)mwpf`HighsBasis::clear (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_external (1 samples, 0.01%)mwpf`HighsBasis::clear (2 samples, 0.03%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)libc++.1.dylib`DYLD-STUB$$strlen (1 samples, 0.01%)libsystem_platform.dylib`_platform_strlen (1 samples, 0.01%)mwpf`HighsLp::clear (4 samples, 0.06%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)mwpf`HighsSparseMatrix::clear (1 samples, 0.01%)mwpf`PresolveComponentData::clear (8 samples, 0.11%)mwpf`presolve::HighsPostsolveStack::operator= (1 samples, 0.01%)mwpf`Highs::addRow (191 samples, 2.72%)mw..mwpf`Highs::addRows (191 samples, 2.72%)mw..mwpf`isRowDataNull(HighsLogOptions const&, double const*, double const*) (1 samples, 0.01%)mwpf`highs::Model::add_row (213 samples, 3.03%)mwp..mwpf`alloc::vec::in_place_collect::_<impl alloc::vec::spec_from_iter::SpecFromIter<T,I> for alloc::vec::Vec<T>>::from_iter (4 samples, 0.06%)mwpf`HEkk::initialiseSimplexLpRandomVectors (1 samples, 0.01%)mwpf`HEkk::moveLp (4 samples, 0.06%)mwpf`HighsRandom::drawUniform (3 samples, 0.04%)mwpf`HEkk::fullBtran(HVectorBase (1 samples, 0.01%)mwpf`HSimplexNla::btran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranCall(HVectorBase (1 samples, 0.01%)mwpf`HVectorBase<double>::tight (1 samples, 0.01%)mwpf`HEkk::computeDual (13 samples, 0.19%)mwpf`HEkk::fullPrice(HVectorBase<double> const&, HVectorBase (12 samples, 0.17%)mwpf`HighsSparseMatrix::priceByColumn(bool, HVectorBase<double>&, HVectorBase (12 samples, 0.17%)mwpf`HEkk::computePrimalObjectiveValue (1 samples, 0.01%)mwpf`HEkk::computeSimplexPrimalInfeasible (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`HFactor::buildFinish (13 samples, 0.19%)libsystem_platform.dylib`__bzero (4 samples, 0.06%)mwpf`HFactor::buildKernel (36 samples, 0.51%)mwpf`HEkk::computeFactor (67 samples, 0.95%)mwpf`HSimplexNla::invert (66 samples, 0.94%)mwpf`HFactor::build (66 samples, 0.94%)mwpf`HFactor::buildSimple (17 samples, 0.24%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)libsystem_platform.dylib`__bzero (16 samples, 0.23%)libsystem_platform.dylib`_platform_memmove (3 samples, 0.04%)mwpf`HSimplexNla::setup (22 samples, 0.31%)mwpf`HFactor::setupGeneral (21 samples, 0.30%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`HEkk::initialiseSimplexLpBasisAndFactor (90 samples, 1.28%)mwpf`HighsHashHelpers::sparse_combine (1 samples, 0.01%)mwpf`HighsSparseMatrix::collectAj(HVectorBase (1 samples, 0.01%)mwpf`HEkk::initialiseForSolve (132 samples, 1.88%)m..mwpf`HighsSparseMatrix::createRowwisePartitioned (26 samples, 0.37%)libsystem_platform.dylib`__bzero (6 samples, 0.09%)mwpf`HEkk::computeDual (3 samples, 0.04%)mwpf`HEkk::fullPrice(HVectorBase<double> const&, HVectorBase (3 samples, 0.04%)mwpf`HighsSparseMatrix::priceByColumn(bool, HVectorBase<double>&, HVectorBase (3 samples, 0.04%)mwpf`HEkk::initialiseBound (1 samples, 0.01%)mwpf`HEkk::rebuildRefactor (1 samples, 0.01%)mwpf`HEkk::factorSolveError (1 samples, 0.01%)mwpf`HVectorBase<double>::setup (1 samples, 0.01%)mwpf`std::__1::vector<double, std::__1::allocator<double>>::assign (1 samples, 0.01%)mwpf`HEkk::getValueScale(int, std::__1::vector<double, std::__1::allocator (1 samples, 0.01%)mwpf`HEkkDualRow::chooseFinal (2 samples, 0.03%)mwpf`HEkkDualRow::chooseFinalWorkGroupQuad (1 samples, 0.01%)mwpf`HEkkDual::chooseColumn(HVectorBase (3 samples, 0.04%)mwpf`HighsSimplexAnalysis::simplexTimerStart (1 samples, 0.01%)mwpf`HEkkDual::chooseRow (2 samples, 0.03%)mwpf`HEkkDualRHS::chooseNormal (2 samples, 0.03%)mwpf`HEkkDual::updateDual (1 samples, 0.01%)mwpf`HEkkDual::updateFtranBFRT (2 samples, 0.03%)mwpf`HSimplexNla::ftran(HVectorBase (2 samples, 0.03%)mwpf`HFactor::ftranCall(HVectorBase (2 samples, 0.03%)mwpf`HFactor::ftranU(HVectorBase (2 samples, 0.03%)mwpf`HEkkDual::updateFtranDSE(HVectorBase (1 samples, 0.01%)mwpf`HSimplexNla::ftranInScaledSpace(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranCall(HVectorBase (1 samples, 0.01%)mwpf`solveHyper(int, int const*, int const*, double const*, int const*, int const*, int const*, double const*, HVectorBase<double>*) (1 samples, 0.01%)mwpf`HEkk::updateFactor(HVectorBase<double>*, HVectorBase (1 samples, 0.01%)mwpf`HSimplexNla::update(HVectorBase<double>*, HVectorBase (1 samples, 0.01%)mwpf`HFactor::update(HVectorBase<double>*, HVectorBase (1 samples, 0.01%)mwpf`HFactor::updateFT(HVectorBase<double>*, HVectorBase (1 samples, 0.01%)mwpf`HEkkDual::updatePivots (4 samples, 0.06%)mwpf`HighsSparseMatrix::update (3 samples, 0.04%)mwpf`HEkk::updateDualSteepestEdgeWeights(int, int, HVectorBase (1 samples, 0.01%)mwpf`HEkkDualRHS::updatePrimal(HVectorBase (1 samples, 0.01%)mwpf`HEkkDual::updatePrimal(HVectorBase (3 samples, 0.04%)mwpf`HSimplexNla::variableScaleFactor (1 samples, 0.01%)mwpf`HEkkDualRow::chooseMakepack(HVectorBase (1 samples, 0.01%)mwpf`HEkkDual::iterate (19 samples, 0.27%)mwpf`HighsSparseMatrix::collectAj(HVectorBase (1 samples, 0.01%)mwpf`HEkk::fullBtran(HVectorBase (1 samples, 0.01%)mwpf`HSimplexNla::btran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranCall(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranU(HVectorBase (1 samples, 0.01%)mwpf`HEkk::computeDual (2 samples, 0.03%)mwpf`HEkk::fullPrice(HVectorBase<double> const&, HVectorBase (1 samples, 0.01%)mwpf`HighsSparseMatrix::priceByColumn(bool, HVectorBase<double>&, HVectorBase (1 samples, 0.01%)mwpf`HEkk::computeSimplexLpDualInfeasible (1 samples, 0.01%)mwpf`HEkk::getNonsingularInverse (1 samples, 0.01%)mwpf`HEkk::computeFactor (1 samples, 0.01%)mwpf`HSimplexNla::invert (1 samples, 0.01%)mwpf`HFactor::build (1 samples, 0.01%)mwpf`HFactor::buildSimple (1 samples, 0.01%)mwpf`HEkkDual::solvePhase1 (25 samples, 0.36%)mwpf`HEkkDual::rebuild (5 samples, 0.07%)mwpf`HighsSparseMatrix::collectAj(HVectorBase (1 samples, 0.01%)mwpf`HEkk::bailout (1 samples, 0.01%)libc++.1.dylib`std::__1::chrono::steady_clock::now (1 samples, 0.01%)libsystem_c.dylib`clock_gettime (1 samples, 0.01%)libsystem_c.dylib`clock_gettime_nsec_np (1 samples, 0.01%)libsystem_kernel.dylib`mach_continuous_time (1 samples, 0.01%)mwpf`HEkk::debugSimplex(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (1 samples, 0.01%)mwpf`HSimplexNla::btran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranCall(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranU(HVectorBase (1 samples, 0.01%)mwpf`HEkk::rebuildRefactor (2 samples, 0.03%)mwpf`HEkk::factorSolveError (2 samples, 0.03%)mwpf`HSimplexNla::ftran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranCall(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranFT(HVectorBase (1 samples, 0.01%)mwpf`HEkkDual::cleanup (1 samples, 0.01%)mwpf`HEkk::computeDual (1 samples, 0.01%)mwpf`HEkk::fullPrice(HVectorBase<double> const&, HVectorBase (1 samples, 0.01%)mwpf`HighsSparseMatrix::priceByColumn(bool, HVectorBase<double>&, HVectorBase (1 samples, 0.01%)mwpf`HEkk::getValueScale(int, std::__1::vector<double, std::__1::allocator (8 samples, 0.11%)mwpf`HEkk::isBadBasisChange (3 samples, 0.04%)mwpf`HVectorBase<double>::clear (1 samples, 0.01%)mwpf`std::__1::vector<double, std::__1::allocator<double>>::assign (1 samples, 0.01%)mwpf`HEkk::tableauRowPrice(bool, HVectorBase<double> const&, HVectorBase (84 samples, 1.20%)mwpf`HighsSparseMatrix::priceByRowWithSwitch(bool, HVectorBase<double>&, HVectorBase (83 samples, 1.18%)mwpf`HighsSparseMatrix::priceByRowDenseResult(std::__1::vector<double, std::__1::allocator<double>>&, HVectorBase (72 samples, 1.02%)mwpf`HEkkDualRow::chooseFinalWorkGroupQuad (1 samples, 0.01%)mwpf`HEkkDualRow::chooseFinal (9 samples, 0.13%)mwpf`HighsSimplexAnalysis::simplexTimerStop (1 samples, 0.01%)mwpf`HighsSimplexAnalysis::simplexTimerStop (1 samples, 0.01%)mwpf`HEkkDual::chooseColumn(HVectorBase (95 samples, 1.35%)mwpf`nearestPowerOfTwoScale(double) (1 samples, 0.01%)mwpf`HEkkDualRHS::chooseNormal (23 samples, 0.33%)mwpf`HFactor::btranL(HVectorBase (23 samples, 0.33%)mwpf`HFactor::btranU(HVectorBase (29 samples, 0.41%)mwpf`HVectorBase<double>::tight (1 samples, 0.01%)mwpf`HFactor::btranCall(HVectorBase (57 samples, 0.81%)mwpf`solveHyper(int, int const*, int const*, double const*, int const*, int const*, int const*, double const*, HVectorBase<double>*) (4 samples, 0.06%)mwpf`HFactor::btranU(HVectorBase (1 samples, 0.01%)mwpf`HSimplexNla::btran(HVectorBase (61 samples, 0.87%)mwpf`HVectorBase<double>::reIndex (3 samples, 0.04%)mwpf`HEkkDual::chooseRow (85 samples, 1.21%)mwpf`HighsSimplexAnalysis::dualSteepestEdgeWeightError (1 samples, 0.01%)libsystem_m.dylib`log (1 samples, 0.01%)mwpf`HEkkDual::updateDual (8 samples, 0.11%)mwpf`HEkkDualRow::updateDual (8 samples, 0.11%)mwpf`HFactor::ftranFT(HVectorBase (24 samples, 0.34%)mwpf`HFactor::ftranL(HVectorBase (8 samples, 0.11%)mwpf`HFactor::ftranU(HVectorBase (61 samples, 0.87%)mwpf`HVectorBase<double>::pack (6 samples, 0.09%)mwpf`HFactor::ftranCall(HVectorBase (100 samples, 1.42%)mwpf`HVectorBase<double>::tight (1 samples, 0.01%)mwpf`HEkkDual::updateFtran (113 samples, 1.61%)mwpf`HSimplexNla::ftran(HVectorBase (112 samples, 1.59%)mwpf`HVectorBase<double>::reIndex (12 samples, 0.17%)mwpf`HEkkDual::updateFtranBFRT (1 samples, 0.01%)mwpf`HSimplexNla::ftran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranCall(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranU(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranFT(HVectorBase (10 samples, 0.14%)mwpf`HFactor::ftranL(HVectorBase (23 samples, 0.33%)mwpf`HFactor::ftranU(HVectorBase (65 samples, 0.93%)mwpf`HVectorBase<double>::tight (2 samples, 0.03%)mwpf`HFactor::ftranCall(HVectorBase (101 samples, 1.44%)mwpf`solveHyper(int, int const*, int const*, double const*, int const*, int const*, int const*, double const*, HVectorBase<double>*) (1 samples, 0.01%)mwpf`HEkkDual::updateFtranDSE(HVectorBase (110 samples, 1.57%)mwpf`HSimplexNla::ftranInScaledSpace(HVectorBase (110 samples, 1.57%)mwpf`HVectorBase<double>::reIndex (9 samples, 0.13%)libsystem_platform.dylib`_platform_memmove (3 samples, 0.04%)mwpf`HEkk::updateFactor(HVectorBase<double>*, HVectorBase (42 samples, 0.60%)mwpf`HSimplexNla::update(HVectorBase<double>*, HVectorBase (42 samples, 0.60%)mwpf`HFactor::update(HVectorBase<double>*, HVectorBase (42 samples, 0.60%)mwpf`HFactor::updateFT(HVectorBase<double>*, HVectorBase (39 samples, 0.56%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`HEkkDual::updatePivots (79 samples, 1.12%)mwpf`HighsSparseMatrix::update (37 samples, 0.53%)mwpf`HEkk::updateDualSteepestEdgeWeights(int, int, HVectorBase (12 samples, 0.17%)mwpf`HEkkDualRHS::updateInfeasList(HVectorBase (3 samples, 0.04%)mwpf`HEkkDualRHS::updatePrimal(HVectorBase (4 samples, 0.06%)mwpf`HEkkDual::updatePrimal(HVectorBase (24 samples, 0.34%)mwpf`HSimplexNla::basicColScaleFactor (4 samples, 0.06%)mwpf`HEkkDualRow::chooseMakepack(HVectorBase (14 samples, 0.20%)mwpf`HEkkDualRow::choosePossible (24 samples, 0.34%)mwpf`HVectorBase<double>::norm2 (1 samples, 0.01%)mwpf`HighsSimplexAnalysis::simplexTimerStop (1 samples, 0.01%)mwpf`HEkkDual::iterate (572 samples, 8.14%)mwpf`HEkkDu..mwpf`HighsSparseMatrix::collectAj(HVectorBase (6 samples, 0.09%)mwpf`HEkk::fullBtran(HVectorBase (2 samples, 0.03%)mwpf`HSimplexNla::btran(HVectorBase (2 samples, 0.03%)mwpf`HFactor::btranCall(HVectorBase (2 samples, 0.03%)mwpf`HFactor::btranU(HVectorBase (2 samples, 0.03%)mwpf`HEkk::computeDual (11 samples, 0.16%)mwpf`HEkk::fullPrice(HVectorBase<double> const&, HVectorBase (8 samples, 0.11%)mwpf`HighsSparseMatrix::priceByColumn(bool, HVectorBase<double>&, HVectorBase (8 samples, 0.11%)mwpf`HFactor::ftranL(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranCall(HVectorBase (4 samples, 0.06%)mwpf`HFactor::ftranU(HVectorBase (3 samples, 0.04%)mwpf`HSimplexNla::ftran(HVectorBase (5 samples, 0.07%)mwpf`HVectorBase<double>::reIndex (1 samples, 0.01%)mwpf`HEkk::computePrimal (8 samples, 0.11%)mwpf`HVectorBase<double>::setup (1 samples, 0.01%)mwpf`std::__1::vector<int, std::__1::allocator<int>>::assign (1 samples, 0.01%)mwpf`HEkk::computeSimplexDualInfeasible (2 samples, 0.03%)mwpf`HFactor::buildFinish (4 samples, 0.06%)mwpf`HFactor::buildKernel (89 samples, 1.27%)mwpf`HEkk::getNonsingularInverse (111 samples, 1.58%)mwpf`HEkk::computeFactor (111 samples, 1.58%)mwpf`HSimplexNla::invert (111 samples, 1.58%)mwpf`HFactor::build (111 samples, 1.58%)mwpf`HFactor::buildSimple (18 samples, 0.26%)libsystem_platform.dylib`_platform_memset (1 samples, 0.01%)mwpf`HSimplexNla::btran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranCall(HVectorBase (1 samples, 0.01%)mwpf`HFactor::btranU(HVectorBase (1 samples, 0.01%)mwpf`HEkk::factorSolveError (2 samples, 0.03%)mwpf`HSimplexNla::ftran(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranCall(HVectorBase (1 samples, 0.01%)mwpf`HFactor::ftranU(HVectorBase (1 samples, 0.01%)mwpf`HEkk::rebuildRefactor (3 samples, 0.04%)mwpf`HighsSparseMatrix::collectAj(HVectorBase (1 samples, 0.01%)mwpf`HEkkDual::solve (742 samples, 10.56%)mwpf`HEkkDual::..mwpf`HEkkDual::solvePhase2 (713 samples, 10.15%)mwpf`HEkkDual::..mwpf`HEkkDual::rebuild (136 samples, 1.94%)m..mwpf`HEkkDual::correctDualInfeasibilities (1 samples, 0.01%)mwpf`HEkk::computeDual (2 samples, 0.03%)mwpf`HEkk::fullPrice(HVectorBase<double> const&, HVectorBase (1 samples, 0.01%)mwpf`HighsSparseMatrix::priceByColumn(bool, HVectorBase<double>&, HVectorBase (1 samples, 0.01%)mwpf`HEkk::computePrimal (1 samples, 0.01%)mwpf`HEkkPrimal::solve (4 samples, 0.06%)mwpf`HEkkPrimal::solvePhase2 (4 samples, 0.06%)mwpf`HEkkPrimal::rebuild (4 samples, 0.06%)mwpf`HEkkPrimal::correctPrimal (1 samples, 0.01%)mwpf`HighsLp::HighsLp (14 samples, 0.20%)mwpf`HighsSparseMatrix::HighsSparseMatrix (13 samples, 0.19%)mwpf`HSimplexNla::debugCheckData(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (23 samples, 0.33%)mwpf`HighsLp::~HighsLp (2 samples, 0.03%)libsystem_malloc.dylib`free_medium (2 samples, 0.03%)libsystem_kernel.dylib`madvise (2 samples, 0.03%)mwpf`HEkk::solve (903 samples, 12.85%)mwpf`HEkk::solvemwpf`HighsSimplexAnalysis::setup(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (1 samples, 0.01%)mwpf`initialiseScatterData(int, HighsScatterData&) (1 samples, 0.01%)mwpf`std::__1::vector<double, std::__1::allocator<double>>::__append (1 samples, 0.01%)libc++abi.dylib`operator new(unsigned long) (1 samples, 0.01%)libsystem_malloc.dylib`nanov2_allocate_outlined (1 samples, 0.01%)mwpf`HighsHashHelpers::sparse_combine (2 samples, 0.03%)mwpf`Highs::callSolveLp(HighsLp&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (965 samples, 13.74%)mwpf`Highs::callSolve..mwpf`solveLp(HighsLpSolverObject&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (965 samples, 13.74%)mwpf`solveLp(HighsLpS..mwpf`solveLpSimplex(HighsLpSolverObject&) (965 samples, 13.74%)mwpf`solveLpSimplex(H..mwpf`considerScaling(HighsOptions const&, HighsLp&) (56 samples, 0.80%)mwpf`HighsSparseMatrix::range (56 samples, 0.80%)mwpf`Highs::runPostsolve (10 samples, 0.14%)mwpf`calculateRowValuesQuad(HighsLp const&, HighsSolution&, int) (10 samples, 0.14%)mwpf`PresolveComponent::init (1 samples, 0.01%)mwpf`HighsLp::operator= (1 samples, 0.01%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)libsystem_m.dylib`frexp (17 samples, 0.24%)libsystem_m.dylib`ldexp (6 samples, 0.09%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`DYLD-STUB$$frexp (14 samples, 0.20%)mwpf`DYLD-STUB$$ldexp (7 samples, 0.10%)mwpf`HighsHashHelpers::sparse_combine (193 samples, 2.75%)mw..mwpf`HighsLinearSumBounds::remove (34 samples, 0.48%)mwpf`HighsLinearSumBounds::updatedImplVarLower (10 samples, 0.14%)mwpf`HighsMatrixSlice<HighsTripletTreeSliceInOrder>::iterator::operator++ (9 samples, 0.13%)mwpf`presolve::HPresolve::changeImplColLower (2 samples, 0.03%)mwpf`presolve::HPresolve::markChangedRow (1 samples, 0.01%)mwpf`presolve::HPresolve::markChangedCol (3 samples, 0.04%)mwpf`presolve::HPresolve::markChangedRow (8 samples, 0.11%)mwpf`HighsLinearSumBounds::updatedVarLower (8 samples, 0.11%)mwpf`presolve::HPresolve::markChangedRow (3 samples, 0.04%)mwpf`presolve::HPresolve::removeRowSingletons (14 samples, 0.20%)mwpf`presolve::HPresolve::singletonRow (3 samples, 0.04%)mwpf`presolve::HPresolve::markChangedRow (2 samples, 0.03%)mwpf`HighsMatrixSlice<HighsTripletTreeSliceInOrder>::iterator::iterator (7 samples, 0.10%)libsystem_malloc.dylib`_malloc_zone_malloc (1 samples, 0.01%)mwpf`presolve::HPresolve::storeRow (11 samples, 0.16%)mwpf`HighsMatrixSlice<HighsTripletTreeSliceInOrder>::iterator::operator++ (2 samples, 0.03%)mwpf`presolve::HPresolve::markChangedCol (1 samples, 0.01%)mwpf`presolve::HPresolve::unlink (124 samples, 1.77%)mwpf`presolve::HPresolve::markChangedRow (2 samples, 0.03%)mwpf`std::__1::__hash_table<std::__1::__hash_value_type<unsigned long long, int>, std::__1::__unordered_map_hasher<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::hash<unsigned long long>, std::__1::equal_to<unsigned long long>, true>, std::__1::__unordered_map_equal<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::equal_to<unsigned long long>, std::__1::hash<unsigned long long>, true>, std::__1::allocator<std::__1::__hash_value_type<unsigned long long, int>>>::__node_insert_multi(std::__1::__hash_node<std::__1::__hash_value_type (1 samples, 0.01%)mwpf`std::__1::__hash_table<std::__1::__hash_value_type<unsigned long long, int>, std::__1::__unordered_map_hasher<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::hash<unsigned long long>, std::__1::equal_to<unsigned long long>, true>, std::__1::__unordered_map_equal<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::equal_to<unsigned long long>, std::__1::hash<unsigned long long>, true>, std::__1::allocator<std::__1::__hash_value_type<unsigned long long, int>>>::__node_insert_multi_prepare(unsigned long, std::__1::__hash_value_type (1 samples, 0.01%)mwpf`presolve::HPresolve::detectParallelRowsAndCols (448 samples, 6.38%)mwpf`pre..mwpf`std::__1::__hash_table<std::__1::__hash_value_type<unsigned long long, int>, std::__1::__unordered_map_hasher<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::hash<unsigned long long>, std::__1::equal_to<unsigned long long>, true>, std::__1::__unordered_map_equal<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::equal_to<unsigned long long>, std::__1::hash<unsigned long long>, true>, std::__1::allocator<std::__1::__hash_value_type<unsigned long long, int>>>::__node_insert_multi(std::__1::__hash_const_iterator<std::__1::__hash_node<std::__1::__hash_value_type<unsigned long long, int>, void*>*>, std::__1::__hash_node<std::__1::__hash_value_type (2 samples, 0.03%)mwpf`std::__1::__hash_table<std::__1::__hash_value_type<unsigned long long, int>, std::__1::__unordered_map_hasher<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::hash<unsigned long long>, std::__1::equal_to<unsigned long long>, true>, std::__1::__unordered_map_equal<unsigned long long, std::__1::__hash_value_type<unsigned long long, int>, std::__1::equal_to<unsigned long long>, std::__1::hash<unsigned long long>, true>, std::__1::allocator<std::__1::__hash_value_type<unsigned long long, int>>>::__node_insert_multi_prepare(unsigned long, std::__1::__hash_value_type (1 samples, 0.01%)mwpf`HighsMatrixSlice<HighsTripletTreeSlicePreOrder>::iterator::operator++ (4 samples, 0.06%)mwpf`HighsLinearSumBounds::getResidualSumLowerOrig (6 samples, 0.09%)mwpf`HighsMatrixSlice<HighsTripletTreeSlicePreOrder>::iterator::operator++ (4 samples, 0.06%)mwpf`HighsLinearSumBounds::updatedImplVarUpper (6 samples, 0.09%)mwpf`presolve::HPresolve::changeImplColUpper (5 samples, 0.07%)mwpf`presolve::HPresolve::markChangedRow (1 samples, 0.01%)mwpf`presolve::HPresolve::rowPresolve (44 samples, 0.63%)mwpf`presolve::HPresolve::updateColImpliedBounds (32 samples, 0.46%)mwpf`presolve::HPresolve::markChangedRow (2 samples, 0.03%)mwpf`presolve::HPresolve::fastPresolveLoop (49 samples, 0.70%)mwpf`presolve::HPresolve::presolveChangedRows (49 samples, 0.70%)mwpf`presolve::HPresolve::updateColImpliedBounds (1 samples, 0.01%)mwpf`presolve::HPresolve::findNonzero (50 samples, 0.71%)mwpf`HighsLinearSumBounds::add (20 samples, 0.28%)mwpf`presolve::HPresolve::fromCSC(std::__1::vector<double, std::__1::allocator<double>> const&, std::__1::vector<int, std::__1::allocator<int>> const&, std::__1::vector<int, std::__1::allocator (35 samples, 0.50%)mwpf`presolve::HPresolve::link (15 samples, 0.21%)mwpf`HighsLinearSumBounds::getResidualSumLowerOrig (8 samples, 0.11%)mwpf`presolve::HPresolve::colPresolve (24 samples, 0.34%)mwpf`presolve::HPresolve::updateRowDualImpliedBounds (11 samples, 0.16%)libc++.1.dylib`std::__1::chrono::steady_clock::now (2 samples, 0.03%)libsystem_c.dylib`clock_gettime (2 samples, 0.03%)libsystem_c.dylib`clock_gettime_nsec_np (2 samples, 0.03%)libsystem_kernel.dylib`mach_continuous_time (2 samples, 0.03%)mwpf`HighsLinearSumBounds::updatedImplVarLower (4 samples, 0.06%)mwpf`presolve::HPresolve::changeImplColLower (5 samples, 0.07%)mwpf`presolve::HPresolve::rowPresolve (15 samples, 0.21%)mwpf`presolve::HPresolve::updateColImpliedBounds (13 samples, 0.19%)mwpf`presolve::HPresolve::markChangedRow (4 samples, 0.06%)mwpf`presolve::HPresolve::initialRowAndColPresolve (47 samples, 0.67%)mwpf`presolve::HPresolve::updateRowDualImpliedBounds (8 samples, 0.11%)mwpf`presolve::HPresolve::link (2 samples, 0.03%)mwpf`presolve::HPresolve::shrinkProblem (11 samples, 0.16%)mwpf`presolve::HPresolve::toCSC(std::__1::vector<double, std::__1::allocator<double>>&, std::__1::vector<int, std::__1::allocator<int>>&, std::__1::vector<int, std::__1::allocator (12 samples, 0.17%)mwpf`presolve::HPresolve::presolve (900 samples, 12.81%)mwpf`presolve::HPre..mwpf`presolve::HPresolve::unlink (8 samples, 0.11%)mwpf`presolve::HPresolve::shrinkProblem (1 samples, 0.01%)mwpf`presolve::HPresolve::run (907 samples, 12.91%)mwpf`presolve::HPre..mwpf`presolve::HPresolve::toCSC(std::__1::vector<double, std::__1::allocator<double>>&, std::__1::vector<int, std::__1::allocator<int>>&, std::__1::vector<int, std::__1::allocator (6 samples, 0.09%)libsystem_platform.dylib`__bzero (7 samples, 0.10%)libsystem_platform.dylib`_platform_memmove (4 samples, 0.06%)mwpf`HighsLinearSumBounds::add (95 samples, 1.35%)mwpf`presolve::HPresolve::link (46 samples, 0.65%)mwpf`std::__1::enable_if<__is_cpp17_forward_iterator<std::__1::__wrap_iter<int*>>::value && is_constructible<int, std::__1::iterator_traits<std::__1::__wrap_iter<int*>>::reference>::value, std::__1::__wrap_iter<int*>>::type std::__1::vector<int, std::__1::allocator<int>>::insert<std::__1::__wrap_iter<int*>>(std::__1::__wrap_iter<int const*>, std::__1::__wrap_iter<int*>, std::__1::__wrap_iter (1 samples, 0.01%)mwpf`presolve::HPresolve::fromCSC(std::__1::vector<double, std::__1::allocator<double>> const&, std::__1::vector<int, std::__1::allocator<int>> const&, std::__1::vector<int, std::__1::allocator (157 samples, 2.23%)m..mwpf`std::__1::vector<int, std::__1::allocator<int>>::insert(std::__1::__wrap_iter (3 samples, 0.04%)mwpf`presolve::HPresolve::setInput (160 samples, 2.28%)m..mwpf`presolve::HPresolve::link (3 samples, 0.04%)mwpf`Highs::runPresolve (1,077 samples, 15.33%)mwpf`Highs::runPresolvemwpf`PresolveComponent::run (1,076 samples, 15.32%)mwpf`PresolveComponent:..mwpf`presolve::HPresolve::~HPresolve (9 samples, 0.13%)libsystem_malloc.dylib`free_medium (9 samples, 0.13%)libsystem_kernel.dylib`madvise (9 samples, 0.13%)mwpf`HighsOptions::HighsOptions (1 samples, 0.01%)mwpf`HighsOptions::initRecords (1 samples, 0.01%)libsystem_platform.dylib`__bzero (1 samples, 0.01%)mwpf`HighsOptions::~HighsOptions (1 samples, 0.01%)mwpf`OptionRecordBool::~OptionRecordBool (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`HighsSparseMatrix::assessSmallValues (1 samples, 0.01%)libc++abi.dylib`operator new(unsigned long) (1 samples, 0.01%)libsystem_malloc.dylib`szone_malloc_should_clear (1 samples, 0.01%)mwpf`HighsSparseMatrix::ensureColwise (20 samples, 0.28%)libsystem_malloc.dylib`free_medium (1 samples, 0.01%)libsystem_kernel.dylib`madvise (1 samples, 0.01%)mwpf`Highs::run (2,076 samples, 29.55%)mwpf`Highs::runmwpf`OptionRecordString::~OptionRecordString (1 samples, 0.01%)mwpf`highs::Model::solve (2,079 samples, 29.59%)mwpf`highs::Model::solvemwpf`HighsSparseMatrix::hasLargeValue (3 samples, 0.04%)libsystem_c.dylib`signal__ (1 samples, 0.01%)libsystem_kernel.dylib`__sigaction (1 samples, 0.01%)mwpf`HighsInfo::initRecords (1 samples, 0.01%)mwpf`InfoRecordInt::InfoRecordInt(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::basic_string(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator (1 samples, 0.01%)libc++.1.dylib`std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>& std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char>>::__assign_no_alias<true> (1 samples, 0.01%)mwpf`highs::Problem<MATRIX>::try_optimise (4 samples, 0.06%)mwpf`Highs_create (4 samples, 0.06%)mwpf`Highs::Highs (4 samples, 0.06%)mwpf`HighsOptions::initRecords (2 samples, 0.03%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_cluster_tune (6,941 samples, 98.80%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_cluster_tunemwpf`mwpf::relaxer_optimizer::RelaxerOptimizer::optimize (2,790 samples, 39.72%)mwpf`mwpf::relaxer_optimizer::RelaxerOptimizer::optimizemwpf`mwpf::relaxer::Relaxer::new_raw (50 samples, 0.71%)libsystem_platform.dylib`_platform_memmove (1 samples, 0.01%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (1 samples, 0.01%)libsystem_malloc.dylib`_nanov2_free (1 samples, 0.01%)mwpf`mwpf::dual_module::DualModuleImpl::get_conflicts_tune (39 samples, 0.56%)mwpf`alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (2 samples, 0.03%)mwpf`mwpf::primal_module_serial::PrimalModuleSerial::union (9 samples, 0.13%)mwpf`<mwpf::matrix::basic::BasicMatrix as mwpf::matrix::interface::MatrixBasic>::add_constraint (1 samples, 0.01%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_tune (6,996 samples, 99.59%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_tunemwpf`std::time::Instant::elapsed (1 samples, 0.01%)mwpf`std::sys::unix::time::Timespec::now (1 samples, 0.01%)libsystem_c.dylib`clock_gettime (1 samples, 0.01%)libsystem_c.dylib`clock_gettime_nsec_np (1 samples, 0.01%)mwpf`<mwpf::mwpf_solver::SolverSerialPlugins as mwpf::mwpf_solver::PrimalDualSolver>::solve_visualizer (7,001 samples, 99.66%)mwpf`<mwpf::mwpf_solver::SolverSerialPlugins as mwpf::mwpf_solver::PrimalDualSolver>::solve_visualizermwpf`mwpf::primal_module::PrimalModuleImpl::solve_visualizer (7,001 samples, 99.66%)mwpf`mwpf::primal_module::PrimalModuleImpl::solve_visualizermwpf`mwpf::primal_module_serial::PrimalModuleSerial::union (1 samples, 0.01%)dyld`start (7,023 samples, 99.97%)dyld`startmwpf`main (7,022 samples, 99.96%)mwpf`mainmwpf`std::rt::lang_start (7,022 samples, 99.96%)mwpf`std::rt::lang_startmwpf`std::rt::lang_start_internal (7,022 samples, 99.96%)mwpf`std::rt::lang_start_internalmwpf`std::rt::lang_start::_{{closure}} (7,022 samples, 99.96%)mwpf`std::rt::lang_start::_{{closure}}mwpf`std::sys_common::backtrace::__rust_begin_short_backtrace (7,022 samples, 99.96%)mwpf`std::sys_common::backtrace::__rust_begin_short_backtracemwpf`mwpf::main (7,022 samples, 99.96%)mwpf`mwpf::mainmwpf`mwpf::cli::Cli::run (7,022 samples, 99.96%)mwpf`mwpf::cli::Cli::runmwpf`mwpf::dual_module::_<impl mwpf::pointers::ArcRwLock<mwpf::dual_module::DualModuleInterface>>::clear (2 samples, 0.03%)mwpf`hashbrown::raw::RawTable<T,A>::clear (2 samples, 0.03%)mwpf`alloc::sync::Arc<T,A>::drop_slow (2 samples, 0.03%)mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_madvise_block (2 samples, 0.03%)libsystem_malloc.dylib`nanov2_madvise_block_locked (2 samples, 0.03%)libsystem_kernel.dylib`madvise (2 samples, 0.03%)all (7,025 samples, 100%)libsystem_kernel.dylib`__exit (2 samples, 0.03%) \ No newline at end of file diff --git a/flamegraph_long.svg b/flamegraph_long.svg new file mode 100644 index 00000000..ad94cc7b --- /dev/null +++ b/flamegraph_long.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch mwpf`clap_builder::builder::command::Command::get_matches_from (1 samples, 14.29%)mwpf`clap_builder::bui..mwpf`clap_builder::builder::command::Command::_do_parse (1 samples, 14.29%)mwpf`clap_builder::bui..mwpf`clap_builder::parser::parser::Parser::get_matches_with (1 samples, 14.29%)mwpf`clap_builder::par..mwpf`clap_builder::parser::parser::Parser::get_matches_with (1 samples, 14.29%)mwpf`clap_builder::par..mwpf`clap_builder::parser::parser::Parser::parse_opt_value (1 samples, 14.29%)mwpf`clap_builder::par..mwpf`clap_builder::parser::parser::Parser::resolve_pending (1 samples, 14.29%)mwpf`clap_builder::par..mwpf`clap_builder::parser::parser::Parser::react (1 samples, 14.29%)mwpf`clap_builder::par..mwpf`clap_builder::parser::parser::Parser::push_arg_values (1 samples, 14.29%)mwpf`clap_builder::par..mwpf`<P as clap_builder::builder::value_parser::AnyValueParser>::parse_ref_ (1 samples, 14.29%)mwpf`<P as clap_builde..mwpf`serde_json::de::from_trait (1 samples, 14.29%)mwpf`serde_json::de::f..mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve (1 samples, 14.29%)mwpf`<mwpf::primal_mod..mwpf`mwpf::plugin::PluginManager::find_relaxer (1 samples, 14.29%)mwpf`mwpf::plugin::Plu..mwpf`mwpf::matrix::echelon::Echelon<M>::force_update_echelon_info (1 samples, 14.29%)mwpf`mwpf::matrix::ech..mwpf`<mwpf::matrix::tail::Tail<M> as mwpf::matrix::interface::MatrixView>::columns (1 samples, 14.29%)mwpf`<mwpf::matrix::ta..mwpf`<mwpf::matrix::tight::Tight<M> as mwpf::matrix::interface::MatrixView>::columns (1 samples, 14.29%)mwpf`<mwpf::matrix::ti..mwpf`<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (1 samples, 14.29%)mwpf`<alloc::collectio..mwpf`alloc::sync::Arc<T,A>::drop_slow (1 samples, 14.29%)mwpf`alloc::sync::Arc<..mwpf`alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (1 samples, 14.29%)mwpf`alloc::collection..mwpf`mwpf::plugin::PluginManager::find_relaxer (2 samples, 28.57%)mwpf`mwpf::plugin::PluginManager::find_relaxermwpf`mwpf::plugin::PluginEntry::execute (1 samples, 14.29%)mwpf`mwpf::plugin::Plu..mwpf`<mwpf::plugin_single_hair::PluginSingleHair as mwpf::plugin::PluginImpl>::find_relaxers (1 samples, 14.29%)mwpf`<mwpf::plugin_sin..mwpf`mwpf::invalid_subgraph::InvalidSubgraph::new_complete (1 samples, 14.29%)mwpf`mwpf::invalid_sub..mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_cluster_tune (3 samples, 42.86%)mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_m..mwpf`mwpf::relaxer_optimizer::RelaxerOptimizer::optimize (1 samples, 14.29%)mwpf`mwpf::relaxer_opt..mwpf`highs::Model::solve (1 samples, 14.29%)mwpf`highs::Model::sol..mwpf`Highs::run (1 samples, 14.29%)mwpf`Highs::runmwpf`Highs::runPresolve (1 samples, 14.29%)mwpf`Highs::runPresolvemwpf`PresolveComponent::run (1 samples, 14.29%)mwpf`PresolveComponent..mwpf`presolve::HPresolve::run (1 samples, 14.29%)mwpf`presolve::HPresol..mwpf`presolve::HPresolve::presolve (1 samples, 14.29%)mwpf`presolve::HPresol..mwpf`presolve::HPresolve::initialRowAndColPresolve (1 samples, 14.29%)mwpf`presolve::HPresol..mwpf`presolve::HPresolve::rowPresolve (1 samples, 14.29%)mwpf`presolve::HPresol..libc++.1.dylib`std::__1::chrono::steady_clock::now (1 samples, 14.29%)libc++.1.dylib`std::__..libsystem_c.dylib`clock_gettime (1 samples, 14.29%)libsystem_c.dylib`cloc..libsystem_c.dylib`clock_gettime_nsec_np (1 samples, 14.29%)libsystem_c.dylib`cloc..libsystem_kernel.dylib`mach_continuous_time (1 samples, 14.29%)libsystem_kernel.dylib..mwpf`<mwpf::plugin_single_hair::PluginSingleHair as mwpf::plugin::PluginImpl>::find_relaxers (1 samples, 14.29%)mwpf`<mwpf::plugin_sin..mwpf`mwpf::matrix::echelon::Echelon<M>::force_update_echelon_info (1 samples, 14.29%)mwpf`mwpf::matrix::ech..mwpf`<mwpf::matrix::tail::Tail<M> as mwpf::matrix::interface::MatrixView>::columns (1 samples, 14.29%)mwpf`<mwpf::matrix::ta..all (7 samples, 100%)dyld`start (7 samples, 100.00%)dyld`startmwpf`main (7 samples, 100.00%)mwpf`mainmwpf`std::rt::lang_start (7 samples, 100.00%)mwpf`std::rt::lang_startmwpf`std::rt::lang_start_internal (7 samples, 100.00%)mwpf`std::rt::lang_start_internalmwpf`std::rt::lang_start::_{{closure}} (7 samples, 100.00%)mwpf`std::rt::lang_start::_{{closure}}mwpf`std::sys_common::backtrace::__rust_begin_short_backtrace (7 samples, 100.00%)mwpf`std::sys_common::backtrace::__rust_begin_short_backtracemwpf`mwpf::main (7 samples, 100.00%)mwpf`mwpf::mainmwpf`mwpf::cli::Cli::run (6 samples, 85.71%)mwpf`mwpf::cli::Cli::runmwpf`<mwpf::mwpf_solver::SolverSerialPlugins as mwpf::mwpf_solver::PrimalDualSolver>::solve_visualizer (6 samples, 85.71%)mwpf`<mwpf::mwpf_solver::SolverSerialPlugins as mwpf::mwpf_solver::PrimalDualSolver>::solve_visualizermwpf`mwpf::primal_module::PrimalModuleImpl::solve_visualizer (6 samples, 85.71%)mwpf`mwpf::primal_module::PrimalModuleImpl::solve_visualizermwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_tune (2 samples, 28.57%)mwpf`<mwpf::primal_module_serial::PrimalModule..mwpf`<mwpf::primal_module_serial::PrimalModuleSerial as mwpf::primal_module::PrimalModuleImpl>::resolve_cluster_tune (2 samples, 28.57%)mwpf`<mwpf::primal_module_serial::PrimalModule..mwpf`mwpf::plugin::PluginManager::find_relaxer (2 samples, 28.57%)mwpf`mwpf::plugin::PluginManager::find_relaxermwpf`mwpf::plugin::PluginEntry::execute (2 samples, 28.57%)mwpf`mwpf::plugin::PluginEntry::executemwpf`<mwpf::plugin_union_find::PluginUnionFind as mwpf::plugin::PluginImpl>::find_relaxers (1 samples, 14.29%)mwpf`<mwpf::plugin_uni..mwpf`mwpf::plugin_union_find::PluginUnionFind::find_single_relaxer (1 samples, 14.29%)mwpf`mwpf::plugin_unio..mwpf`mwpf::invalid_subgraph::InvalidSubgraph::new_complete_ptr (1 samples, 14.29%)mwpf`mwpf::invalid_sub..mwpf`mwpf::invalid_subgraph::InvalidSubgraph::new_complete (1 samples, 14.29%)mwpf`mwpf::invalid_sub.. \ No newline at end of file diff --git a/src/bin/aps2024_demo.rs b/src/bin/aps2024_demo.rs.save similarity index 98% rename from src/bin/aps2024_demo.rs rename to src/bin/aps2024_demo.rs.save index e170e3a5..f5b64be3 100644 --- a/src/bin/aps2024_demo.rs +++ b/src/bin/aps2024_demo.rs.save @@ -40,7 +40,7 @@ fn debug_demo() { primal_module.growing_strategy = GrowingStrategy::SingleCluster; primal_module.plugins = Arc::new(vec![]); primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, 0); visualizer .snapshot_combined( "subgraph".to_string(), @@ -110,7 +110,7 @@ fn simple_demo() { primal_module.growing_strategy = GrowingStrategy::SingleCluster; primal_module.plugins = Arc::new(vec![]); primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, 0); visualizer .snapshot_combined( "subgraph".to_string(), @@ -178,7 +178,7 @@ fn challenge_demo() { }), ]); primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, 0); visualizer .snapshot_combined( "subgraph".to_string(), @@ -321,7 +321,7 @@ fn surface_code_example() { }), ]); primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, seed); visualizer .snapshot_combined( "subgraph".to_string(), @@ -367,7 +367,7 @@ fn triangle_color_code_example() { }), ]); primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, seed); visualizer .snapshot_combined( "subgraph".to_string(), @@ -417,7 +417,7 @@ fn small_color_code_example() { }), ]); primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, seed); visualizer .snapshot_combined( "subgraph".to_string(), @@ -472,7 +472,7 @@ fn circuit_level_example() { ]); primal_module.config.timeout = timeout; primal_module.solve_visualizer(&interface_ptr, syndrome_pattern, &mut dual_module, Some(&mut visualizer)); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module, 0); visualizer .snapshot_combined( "subgraph".to_string(), diff --git a/src/bin/test_1.rs.save b/src/bin/test_1.rs.save new file mode 100644 index 00000000..c906babf --- /dev/null +++ b/src/bin/test_1.rs.save @@ -0,0 +1,60 @@ +use mwpf::{dual_module_pq::DualModulePQ, example_codes::CodeCapacityTailoredCode, plugin::{PluginImpl, RepeatStrategy}, plugin_single_hair::PluginSingleHair, plugin_union_find::PluginUnionFind}; + +pub fn main() { + primal_module_serial_basic_3_improved_with_dual_pq_impl_m(); +} + +fn primal_module_serial_basic_3_improved_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_3_improved_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_improved_with_dual_pq_impl_m.json".to_string(); + let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; + let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); + primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( + code, + visualize_filename, + defect_vertices, + 5, + vec![ + PluginUnionFind::entry(), + PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), + ], + GrowingStrategy::ModeBased, + ); +} + +pub fn primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( + code: impl ExampleCode, + visualize_filename: String, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, +) -> ( + DualModuleInterfacePtr, + PrimalModuleSerial, + impl DualModuleImpl + MWPSVisualizer, +) { + println!("{defect_vertices:?}"); + let visualizer = { + let visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename.clone()); + visualizer + }; + // create dual module + let model_graph = code.get_model_graph(); + primal_module_serial_basic_standard_syndrome_optional_viz( + code, + defect_vertices, + final_dual, + plugins, + growing_strategy, + DualModulePQ::>::new_empty(&model_graph.initializer), + model_graph, + Some(visualizer), + ) +} \ No newline at end of file diff --git a/src/cli.rs b/src/cli.rs index 188cb7c1..c48be8b4 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -9,6 +9,8 @@ use clap::{Parser, Subcommand, ValueEnum}; use more_asserts::assert_le; use num_traits::FromPrimitive; use pbr::ProgressBar; +use rand::rngs::SmallRng; +use rand::RngCore; use rand::{thread_rng, Rng, SeedableRng}; use serde::Serialize; use serde_variant::to_variant_name; @@ -99,6 +101,12 @@ pub struct BenchmarkParameters { /// skip some iterations, useful when debugging #[clap(long, default_value_t = 0)] starting_iteration: usize, + /// apply deterministic seed for debugging purpose + #[clap(long, action)] + apply_deterministic_seed: Option, + /// single seed for debugging purposes + #[clap(long, action)] + single_seed: Option, } #[derive(Subcommand, Clone, Debug)] @@ -142,6 +150,10 @@ pub enum ExampleCodeType { CodeCapacityTailoredCode, /// read from error pattern file, generated using option `--primal-dual-type error-pattern-logger` ErrorPatternReader, + /// code constructed by QEC-Playground, pass configurations using `--code-config` + #[cfg(feature = "qecp_integrate")] + #[serde(rename = "qec-playground-code")] + QECPlaygroundCode, } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum, Serialize, Debug)] @@ -221,45 +233,45 @@ impl TypedValueParser for SerdeJsonParser { } } -impl MatrixSpeedClass { - pub fn run(&self, parameters: MatrixSpeedParameters, samples: Vec, bool)>>) { - match *self { - MatrixSpeedClass::EchelonTailTight => { - let mut matrix = Echelon::>>::new(); - for edge_index in 0..parameters.width { - matrix.add_tight_variable(edge_index); - } - Self::run_on_matrix_interface(&matrix, samples) - } - MatrixSpeedClass::EchelonTight => { - let mut matrix = Echelon::>::new(); - for edge_index in 0..parameters.width { - matrix.add_tight_variable(edge_index); - } - Self::run_on_matrix_interface(&matrix, samples) - } - MatrixSpeedClass::Echelon => { - let mut matrix = Echelon::::new(); - for edge_index in 0..parameters.width { - matrix.add_variable(edge_index); - } - Self::run_on_matrix_interface(&matrix, samples) - } - } - } +// impl MatrixSpeedClass { +// pub fn run(&self, parameters: MatrixSpeedParameters, samples: Vec, bool)>>) { +// match *self { +// MatrixSpeedClass::EchelonTailTight => { +// let mut matrix = Echelon::>>::new(); +// for edge_index in 0..parameters.width { +// matrix.add_tight_variable(edge_index); +// } +// Self::run_on_matrix_interface(&matrix, samples) +// } +// MatrixSpeedClass::EchelonTight => { +// let mut matrix = Echelon::>::new(); +// for edge_index in 0..parameters.width { +// matrix.add_tight_variable(edge_index); +// } +// Self::run_on_matrix_interface(&matrix, samples) +// } +// MatrixSpeedClass::Echelon => { +// let mut matrix = Echelon::::new(); +// for edge_index in 0..parameters.width { +// matrix.add_variable(edge_index); +// } +// Self::run_on_matrix_interface(&matrix, samples) +// } +// } +// } - pub fn run_on_matrix_interface(matrix: &M, samples: Vec, bool)>>) { - for parity_checks in samples.iter() { - let mut matrix = matrix.clone(); - for (vertex_index, (incident_edges, parity)) in parity_checks.iter().enumerate() { - matrix.add_constraint(vertex_index, incident_edges, *parity); - } - // for a MatrixView, visiting the columns and rows is sufficient to update its internal state - matrix.columns(); - matrix.rows(); - } - } -} +// pub fn run_on_matrix_interface(matrix: &M, samples: Vec, bool)>>) { +// for parity_checks in samples.iter() { +// let mut matrix = matrix.clone(); +// for (vertex_index, (incident_edges, parity)) in parity_checks.iter().enumerate() { +// matrix.add_constraint(vertex_index, incident_edges, *parity); +// } +// // for a MatrixView, visiting the columns and rows is sufficient to update its internal state +// matrix.columns(); +// matrix.rows(); +// } +// } +// } impl Cli { pub fn run(self) { @@ -283,6 +295,8 @@ impl Cli { print_syndrome_pattern, starting_iteration, print_error_pattern, + apply_deterministic_seed, + single_seed, }) => { // whether to disable progress bar, useful when running jobs in background let disable_progress_bar = env::var("DISABLE_PROGRESS_BAR").is_ok(); @@ -298,7 +312,6 @@ impl Cli { let initializer = code.get_initializer(); let mut primal_dual_solver = primal_dual_type.build(&initializer, &*code, primal_dual_config); let mut result_verifier = verifier.build(&initializer); - let mut benchmark_profiler = BenchmarkProfiler::new(noisy_measurements, benchmark_profiler_output); // prepare progress bar display let mut pb = if !disable_progress_bar { let mut pb = ProgressBar::on(std::io::stderr(), total_rounds as u64); @@ -310,10 +323,59 @@ impl Cli { } None }; - let mut rng = thread_rng(); + + if let Some(seed) = single_seed { + let (syndrome_pattern, error_pattern) = code.generate_random_errors(seed); + if print_syndrome_pattern { + println!("syndrome_pattern: {:?}", syndrome_pattern); + } + if print_error_pattern { + println!("error_pattern: {:?}", error_pattern); + } + // create a new visualizer each round + let mut visualizer = None; + if enable_visualizer { + let new_visualizer = Visualizer::new( + Some(visualize_data_folder() + static_visualize_data_filename().as_str()), + code.get_positions(), + true, + ) + .unwrap(); + visualizer = Some(new_visualizer); + } + + let begin_time = std::time::Instant::now(); + primal_dual_solver.solve_visualizer(&syndrome_pattern, visualizer.as_mut(), seed); // FIXME: for release, remove the seed that is passed in for debugging purposes + + // solver load the defect vertices from their indices + result_verifier.verify( + &mut primal_dual_solver, + &syndrome_pattern, + &error_pattern, + visualizer.as_mut(), + seed, + ); + primal_dual_solver.clear(); // also count the clear operation + let end_time = std::time::Instant::now(); + let resolve_time = (end_time - begin_time).as_millis(); + println!("resolve time {:?}", resolve_time); + + return; + } + + let mut benchmark_profiler = BenchmarkProfiler::new(noisy_measurements, benchmark_profiler_output); + // let mut rng = thread_rng(); + thread_rng().gen::(); + let mut seed = match apply_deterministic_seed { + Some(seed) => seed, + None => thread_rng().gen::(), + }; + let mut rng = SmallRng::seed_from_u64(seed); + // println!("OG_s: {:?}", seed); for round in (starting_iteration as u64)..(total_rounds as u64) { pb.as_mut().map(|pb| pb.set(round)); - let seed = if use_deterministic_seed { round } else { rng.gen() }; + seed = if use_deterministic_seed { round } else { rng.next_u64() }; + // println!("NEW rng seed: {:?}", seed); let (syndrome_pattern, error_pattern) = code.generate_random_errors(seed); if print_syndrome_pattern { println!("syndrome_pattern: {:?}", syndrome_pattern); @@ -333,16 +395,18 @@ impl Cli { visualizer = Some(new_visualizer); } benchmark_profiler.begin(&syndrome_pattern, &error_pattern); - primal_dual_solver.solve_visualizer(&syndrome_pattern, visualizer.as_mut()); + primal_dual_solver.solve_visualizer(&syndrome_pattern, visualizer.as_mut(), seed); // FIXME: for release, remove the seed that is passed in for debugging purposes benchmark_profiler.event("decoded".to_string()); result_verifier.verify( &mut primal_dual_solver, &syndrome_pattern, &error_pattern, visualizer.as_mut(), + seed, ); benchmark_profiler.event("verified".to_string()); primal_dual_solver.clear(); // also count the clear operation + benchmark_profiler.end(Some(&*primal_dual_solver)); if let Some(pb) = pb.as_mut() { if pb_message.is_empty() { @@ -359,6 +423,8 @@ impl Cli { } println!(); } + + eprintln!("total resolve time {:?}", benchmark_profiler.sum_round_time); } Commands::MatrixSpeed(parameters) => { let MatrixSpeedParameters { @@ -384,7 +450,7 @@ impl Cli { samples.push(parity_checks); } // call the matrix operation - matrix_type.run(parameters, samples); + // matrix_type.run(parameters, samples); } Commands::Test { command } => match command { TestCommands::Common => { @@ -525,6 +591,8 @@ impl ExampleCodeType { Box::new(CodeCapacityColorCode::new(d, p, max_weight)) } Self::ErrorPatternReader => Box::new(ErrorPatternReader::new(code_config)), + #[cfg(feature = "qecp_integrate")] + Self::QECPlaygroundCode => Box::new(QECPlaygroundCode::new(d, p, code_config)), } } } @@ -569,8 +637,9 @@ trait ResultVerifier { &mut self, primal_dual_solver: &mut Box, syndrome_pattern: &SyndromePattern, - error_pattern: &Subgraph, + error_pattern: &Vec, visualizer: Option<&mut Visualizer>, + seed: u64, ); } @@ -581,8 +650,9 @@ impl ResultVerifier for VerifierNone { &mut self, _primal_dual_solver: &mut Box, _syndrome_pattern: &SyndromePattern, - _error_pattern: &Subgraph, + _error_pattern: &Vec, _visualizer: Option<&mut Visualizer>, + _seed: u64, ) { } } @@ -596,8 +666,9 @@ impl ResultVerifier for VerifierFusionSerial { &mut self, _primal_dual_solver: &mut Box, _syndrome_pattern: &SyndromePattern, - _error_pattern: &Subgraph, + _error_pattern: &Vec, _visualizer: Option<&mut Visualizer>, + _seed: u64, ) { println!("{}", self.initializer.vertex_num); unimplemented!() @@ -614,30 +685,36 @@ impl ResultVerifier for VerifierActualError { &mut self, primal_dual_solver: &mut Box, syndrome_pattern: &SyndromePattern, - error_pattern: &Subgraph, + error_pattern: &Vec, visualizer: Option<&mut Visualizer>, + seed: u64, ) { if !syndrome_pattern.erasures.is_empty() { unimplemented!() } - let actual_weight = Rational::from_usize(self.initializer.get_subgraph_total_weight(error_pattern)).unwrap(); - let (subgraph, weight_range) = primal_dual_solver.subgraph_range_visualizer(visualizer); + let actual_weight = if error_pattern.is_empty() && !syndrome_pattern.defect_vertices.is_empty() { + // error pattern is not generated by the simulator + Rational::from_usize(usize::MAX).unwrap() + } else { + self.initializer.get_subgraph_index_total_weight(error_pattern) + }; + let (subgraph, weight_range) = primal_dual_solver.subgraph_range_visualizer(visualizer, seed); assert!( self.initializer .matches_subgraph_syndrome(&subgraph, &syndrome_pattern.defect_vertices), - "bug: the result subgraph does not match the syndrome" + "bug: the result subgraph does not match the syndrome || the seed is {seed:?}" ); assert_le!( weight_range.lower, actual_weight, - "bug: the lower bound of weight range is larger than the actual weight" + "bug: the lower bound of weight range is larger than the actual weight || the seed is {seed:?}" ); if self.is_strict { - let subgraph_weight = Rational::from_usize(self.initializer.get_subgraph_total_weight(&subgraph)).unwrap(); + let subgraph_weight = self.initializer.get_subgraph_total_weight(&subgraph); assert_le!(subgraph_weight, actual_weight, "it's not a minimum-weight parity subgraph: the actual error pattern has smaller weight, range: {weight_range:?}"); assert_eq!( weight_range.lower, weight_range.upper, - "the weight range must be optimal: lower = upper" + "the weight range must be optimal: lower = upper || the seed is {seed:?}" ); } } diff --git a/src/decoding_hypergraph.rs b/src/decoding_hypergraph.rs index ea92c6ba..ac9c9e4e 100644 --- a/src/decoding_hypergraph.rs +++ b/src/decoding_hypergraph.rs @@ -1,3 +1,5 @@ +use weak_table::PtrWeakHashSet; + use crate::matrix::*; use crate::model_hypergraph::*; use crate::util::*; @@ -5,6 +7,11 @@ use crate::visualize::*; use std::collections::{BTreeSet, HashSet}; use std::sync::Arc; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak}; + #[derive(Debug, Clone)] pub struct DecodingHyperGraph { /// model graph @@ -19,13 +26,13 @@ pub struct DecodingHyperGraph { impl DecodingHyperGraph { pub fn new(model_graph: Arc, syndrome_pattern: Arc) -> Self { - let mut decoding_graph = Self { + let decoding_graph = Self { model_graph, syndrome_pattern: syndrome_pattern.clone(), defect_vertices_hashset: HashSet::new(), erasures_hashset: HashSet::new(), }; - decoding_graph.set_syndrome(syndrome_pattern); + // decoding_graph.set_syndrome(syndrome_pattern); decoding_graph } @@ -54,47 +61,58 @@ impl DecodingHyperGraph { Self::new(model_graph, Arc::new(SyndromePattern::new_vertices(defect_vertices))) } - pub fn find_valid_subgraph(&self, edges: &BTreeSet, vertices: &BTreeSet) -> Option { - let mut matrix = Echelon::::new(); - for &edge_index in edges.iter() { - matrix.add_variable(edge_index); - } - - for &vertex_index in vertices.iter() { - let incident_edges = self.get_vertex_neighbors(vertex_index); - let parity = self.is_vertex_defect(vertex_index); - matrix.add_constraint(vertex_index, incident_edges, parity); - } - matrix.get_solution() - } - - pub fn find_valid_subgraph_auto_vertices(&self, edges: &BTreeSet) -> Option { - self.find_valid_subgraph(edges, &self.get_edges_neighbors(edges)) - } - - pub fn is_valid_cluster(&self, edges: &BTreeSet, vertices: &BTreeSet) -> bool { - self.find_valid_subgraph(edges, vertices).is_some() - } - - pub fn is_valid_cluster_auto_vertices(&self, edges: &BTreeSet) -> bool { - self.find_valid_subgraph_auto_vertices(edges).is_some() - } - - pub fn is_vertex_defect(&self, vertex_index: VertexIndex) -> bool { - self.defect_vertices_hashset.contains(&vertex_index) - } - - pub fn get_edge_neighbors(&self, edge_index: EdgeIndex) -> &Vec { - self.model_graph.get_edge_neighbors(edge_index) - } - - pub fn get_vertex_neighbors(&self, vertex_index: VertexIndex) -> &Vec { - self.model_graph.get_vertex_neighbors(vertex_index) - } - - pub fn get_edges_neighbors(&self, edges: &BTreeSet) -> BTreeSet { - self.model_graph.get_edges_neighbors(edges) - } + // pub fn find_valid_subgraph(&self, edges: &BTreeSet, vertices: &BTreeSet) -> Option { + // let mut matrix = Echelon::::new(); + // for edge_index in edges.iter() { + // matrix.add_variable(edge_index.downgrade()); + // } + + // for vertex_index in vertices.iter() { + // // let incident_edges = self.get_vertex_neighbors(vertex_index); + // // let parity = self.is_vertex_defect(vertex_index); + // let incident_edges = &vertex_index.read_recursive().edges; + // let parity = vertex_index.read_recursive().is_defect; + // matrix.add_constraint(vertex_index.downgrade(), &incident_edges, parity); + // } + // matrix.get_solution() + // } + + // pub fn find_valid_subgraph_auto_vertices(&self, edges: &BTreeSet) -> Option { + // let mut vertices: BTreeSet = BTreeSet::new(); + // for edge_ptr in edges.iter() { + // // let local_vertices = &edge_ptr.read_recursive().vertices; + // let local_vertices = &edge_ptr.get_vertex_neighbors(); + // for vertex in local_vertices { + // vertices.insert(vertex.upgrade_force()); + // } + // } + + // self.find_valid_subgraph(edges, &vertices) + // } + + // pub fn is_valid_cluster(&self, edges: &BTreeSet, vertices: &BTreeSet) -> bool { + // self.find_valid_subgraph(edges, vertices).is_some() + // } + + // pub fn is_valid_cluster_auto_vertices(&self, edges: &BTreeSet) -> bool { + // self.find_valid_subgraph_auto_vertices(edges).is_some() + // } + + // pub fn is_vertex_defect(&self, vertex_index: VertexIndex) -> bool { + // self.defect_vertices_hashset.contains(&vertex_index) + // } + + // pub fn get_edge_neighbors(&self, edge_index: EdgeIndex) -> &Vec { + // self.model_graph.get_edge_neighbors(edge_index) + // } + + // pub fn get_vertex_neighbors(&self, vertex_index: VertexIndex) -> &Vec { + // self.model_graph.get_vertex_neighbors(vertex_index) + // } + + // pub fn get_edges_neighbors(&self, edges: &BTreeSet) -> BTreeSet { + // self.model_graph.get_edges_neighbors(edges) + // } } impl MWPSVisualizer for DecodingHyperGraph { diff --git a/src/dual_module.rs b/src/dual_module.rs index 244e86d7..c730005e 100644 --- a/src/dual_module.rs +++ b/src/dual_module.rs @@ -3,17 +3,81 @@ //! Generics for dual modules //! + +use color_print::cprint; +use color_print::cprintln; + +use crate::dual_module_parallel::*; +use crate::dual_module_pq::{FutureQueueMethods, Obstacle}; +use std::ops::DerefMut; use crate::decoding_hypergraph::*; use crate::derivative::Derivative; use crate::invalid_subgraph::*; use crate::model_hypergraph::*; -use crate::num_traits::{One, ToPrimitive, Zero}; +use crate::num_traits::{FromPrimitive, One, Signed, ToPrimitive, Zero}; +use crate::ordered_float::OrderedFloat; use crate::pointers::*; +use crate::primal_module::Affinity; +use crate::primal_module_serial::{PrimalClusterPtr, PrimalModuleSerialNodeWeak}; +use crate::relaxer_optimizer::OptimizerResult; use crate::util::*; use crate::visualize::*; +use crate::matrix::*; + +use std::collections::BTreeMap; use std::collections::{BTreeSet, HashMap}; use std::sync::Arc; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + +// this is not effecitively doing much right now due to the My (Leo's) desire for ultra performance (inlining function > branches) +#[derive(Default, Debug, Clone)] +pub enum DualModuleMode { + /// Mode 1 + #[default] + Search, // Searching for a solution + + /// Mode 2 + Tune, // Tuning for the optimal solution +} + +impl DualModuleMode { + pub fn new() -> Self { + Self::default() + } + + pub fn advance(&mut self) { + match self { + Self::Search => *self = Self::Tune, + Self::Tune => panic!("dual module mode is already in tune mode"), + } + } + + pub fn reset(&mut self) { + *self = Self::Search; + } +} + +// Each dual_module impl should have mode and affinity_map, hence these methods should be shared +// Note: Affinity Map is not implemented in this branch, but a different file/branch (there incurs performance overhead) +#[macro_export] +macro_rules! add_shared_methods { + () => { + /// Returns a reference to the mode field. + fn mode(&self) -> &DualModuleMode { + &self.mode + } + + /// Returns a mutable reference to the mode field. + fn mode_mut(&mut self) -> &mut DualModuleMode { + &mut self.mode + } + }; +} + pub struct DualNode { /// the index of this dual node, helps to locate internal details of this dual node pub index: NodeIndex, @@ -25,11 +89,13 @@ pub struct DualNode { /// the pointer to the global time /// Note: may employ some unsafe features while being sound in performance-critical cases /// and can remove option when removing dual_module_serial - global_time: Option>, + pub global_time: Option>, /// the last time this dual_node is synced/updated with the global time pub last_updated_time: Rational, /// dual variable's value at the last updated time pub dual_variable_at_last_updated_time: Rational, + /// the corresponding PrimalModuleSerialNode + pub primal_module_serial_node: Option, } impl DualNode { @@ -74,12 +140,14 @@ impl std::fmt::Debug for DualNodePtr { let global_time = dual_node.global_time.as_ref().unwrap_or(&new).read_recursive(); write!( f, - "\n\t\tindex: {}, global_time: {:?}, dual_variable: {}\n\t\tdual_variable_at_last_updated_time: {}, last_updated_time: {}", + "\n\t\tindex: {}, global_time: {:?}, grow_rate: {:?}, dual_variable: {}\n\t\tdual_variable_at_last_updated_time: {}, last_updated_time: {}\n\timpacted_edges: {:?}\n", dual_node.index, global_time, + dual_node.grow_rate, dual_node.get_dual_variable(), dual_node.dual_variable_at_last_updated_time, - dual_node.last_updated_time + dual_node.last_updated_time, + dual_node.invalid_subgraph.hair.iter().map(|e| e.read_recursive().edge_index).collect::>(), ) } } @@ -90,15 +158,12 @@ impl std::fmt::Debug for DualNodeWeak { } } -impl Ord for DualNodePtr { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.read_recursive().index.cmp(&other.read_recursive().index) - } -} - -impl PartialOrd for DualNodePtr { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) +impl DualNodePtr { + /// we mainly use the vertex_index from this function to run bfs to find the partition unit responsible for this dual node + pub fn get_representative_vertex(&self) -> VertexPtr { + let dual_node = self.read_recursive(); + let defect_vertex = dual_node.invalid_subgraph.vertices.first().unwrap(); + defect_vertex.clone() } } @@ -111,8 +176,8 @@ pub struct DualModuleInterface { pub nodes: Vec, /// given an invalid subgraph, find its corresponding dual node pub hashmap: HashMap, NodeIndex>, - /// the decoding graph - pub decoding_graph: DecodingHyperGraph, + // /// the decoding graph + // pub decoding_graph: DecodingHyperGraph, } pub type DualModuleInterfacePtr = ArcRwLock; @@ -142,9 +207,73 @@ pub enum MaxUpdateLength { /// non-zero maximum update length ValidGrow(Rational), /// conflicting growth, violating the slackness constraint - Conflicting(EdgeIndex), + Conflicting(EdgePtr), /// hitting 0 dual variable while shrinking, only happens when `grow_rate` < 0 - ShrinkProhibited(DualNodePtr), + /// note: Using OrderedDualNodePtr since we can compare without acquiring the lock, for enabling btreeset/hashset/pq etc. with lower overhead + ShrinkProhibited(OrderedDualNodePtr), +} + +/// a pair of node index and dual node pointer, used for comparison without acquiring the lock +/// useful for when inserting into sets +#[derive(Derivative, PartialEq, Eq, Clone, Debug)] +pub struct OrderedDualNodePtr { + pub index: NodeIndex, + pub ptr: DualNodePtr, +} + +impl OrderedDualNodePtr { + pub fn new(index: NodeIndex, ptr: DualNodePtr) -> Self { + Self { index, ptr } + } +} +impl PartialOrd for OrderedDualNodePtr { + fn partial_cmp(&self, other: &Self) -> Option { + // Some(self.index.cmp(&other.index)) + Some(self.cmp(other)) + } +} +impl Ord for OrderedDualNodePtr { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // self.index.cmp(&other.index) + let ptr1 = Arc::as_ptr(self.ptr.ptr()); + let ptr2 = Arc::as_ptr(other.ptr.ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +#[derive(Derivative, PartialEq, Eq, Clone, Debug)] +pub struct OrderedDualNodeWeak { + pub index: NodeIndex, + pub weak_ptr: DualNodeWeak, +} + +impl OrderedDualNodeWeak { + pub fn new(index: NodeIndex, weak_ptr: DualNodeWeak) -> Self { + Self { index, weak_ptr } + } + + pub fn upgrade_force(&self) -> OrderedDualNodePtr { + OrderedDualNodePtr::new(self.index, self.weak_ptr.upgrade_force()) + } +} +impl PartialOrd for OrderedDualNodeWeak { + fn partial_cmp(&self, other: &Self) -> Option { + // Some(self.index.cmp(&other.index)) + Some(self.cmp(other)) + } +} +impl Ord for OrderedDualNodeWeak { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // the old version of cmp is to compare their indices only + // compare the pointer address + let ptr1 = Arc::as_ptr(self.weak_ptr.upgrade_force().ptr()); + let ptr2 = Arc::as_ptr(other.weak_ptr.upgrade_force().ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } } #[derive(Derivative, Clone)] @@ -200,9 +329,214 @@ pub trait DualModuleImpl { /// note that a negative growth should be implemented by reversing the speed of each dual node fn grow(&mut self, length: Rational); - fn get_edge_nodes(&self, edge_index: EdgeIndex) -> Vec; - fn get_edge_slack(&self, edge_index: EdgeIndex) -> Rational; - fn is_edge_tight(&self, edge_index: EdgeIndex) -> bool; + fn get_edge_nodes(&self, edge_ptr: EdgePtr) -> Vec; + fn get_edge_slack(&self, edge_ptr: EdgePtr) -> Rational; + fn is_edge_tight(&self, edge_ptr: EdgePtr) -> bool; + + /* New tuning-related methods */ + /// mode mangements + fn mode(&self) -> &DualModuleMode; + fn mode_mut(&mut self) -> &mut DualModuleMode; + fn advance_mode(&mut self) { + eprintln!("this dual_module does not implement different modes"); + } + fn reset_mode(&mut self) { + *self.mode_mut() = DualModuleMode::default(); + } + + /// "set_grow_rate", but in tuning phase + fn set_grow_rate_tune(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { + eprintln!("this dual_module does not implement tuning"); + self.set_grow_rate(dual_node_ptr, grow_rate) + } + + /// "add_dual_node", but in tuning phase + fn add_dual_node_tune(&mut self, dual_node_ptr: &DualNodePtr) { + eprintln!("this dual_module does not implement tuning"); + self.add_dual_node(dual_node_ptr); + } + + /// syncing all possible states (dual_variable and edge_weights) with global time, so global_time can be discarded later + fn sync(&mut self) { + panic!("this dual_module does not have global time and does not need to sync"); + } + + /// grow a specific edge on the spot + fn grow_edge(&self, _edge_ptr: EdgePtr, _amount: &Rational) { + panic!("this dual_module doesn't support edge growth"); + } + + /// `is_edge_tight` but in tuning phase + fn is_edge_tight_tune(&self, edge_ptr: EdgePtr) -> bool { + eprintln!("this dual_module does not implement tuning"); + self.is_edge_tight(edge_ptr) + } + + /// `get_edge_slack` but in tuning phase + fn get_edge_slack_tune(&self, edge_ptr: EdgePtr) -> Rational { + eprintln!("this dual_module does not implement tuning"); + self.get_edge_slack(edge_ptr) + } + + /* miscs */ + + /// print all the states for the current dual module + fn debug_print(&self) { + println!("this dual_module doesn't support debug print"); + } + + /* affinity */ + + /// calculate affinity based on the following metric + /// Clusters with larger primal-dual gaps will receive high affinity because working on those clusters + /// will often reduce the gap faster. However, clusters with a large number of dual variables, vertices, + /// and hyperedges will receive a lower affinity + fn calculate_cluster_affinity(&mut self, _cluster: PrimalClusterPtr) -> Option { + eprintln!("not implemented, skipping"); + Some(OrderedFloat::from(100.0)) + } + + fn get_conflicts_tune( + &self, + optimizer_result: OptimizerResult, + // dual_node_deltas: BTreeMap, + dual_node_deltas: BTreeMap, + ) -> BTreeSet { + let mut conflicts: BTreeSet = BTreeSet::new(); + match optimizer_result { + OptimizerResult::EarlyReturned => { + // if early returned, meaning optimizer didn't optimize, but simply should find current conflicts and return + // for (dual_node_ptr, grow_rate) in dual_node_deltas.into_iter() { + for (dual_node_ptr, (grow_rate, _)) in dual_node_deltas.into_iter() { + let node_ptr_read = dual_node_ptr.ptr.read_recursive(); + if grow_rate.is_negative() && node_ptr_read.dual_variable_at_last_updated_time.is_zero() { + conflicts.insert(MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new( + node_ptr_read.index, + dual_node_ptr.ptr.clone(), + ))); + } + for edge_ptr in node_ptr_read.invalid_subgraph.hair.iter() { + if grow_rate.is_positive() && self.is_edge_tight_tune(edge_ptr.clone()) { + conflicts.insert(MaxUpdateLength::Conflicting( edge_ptr.clone() )); + } + } + } + } + OptimizerResult::Skipped => { + // if skipped, should check if is growable, if not return the conflicts that leads to that conclusion + // for (dual_node_ptr, grow_rate) in dual_node_deltas.into_iter() { + for (dual_node_ptr, (grow_rate, cluster_index)) in dual_node_deltas.into_iter() { + // check if the single direction is growable + let mut actual_grow_rate = Rational::from_usize(std::usize::MAX).unwrap(); + let node_ptr_read = dual_node_ptr.ptr.read_recursive(); + for edge_ptr in node_ptr_read.invalid_subgraph.hair.iter() { + actual_grow_rate = std::cmp::min(actual_grow_rate, self.get_edge_slack_tune(edge_ptr.clone())); + } + if actual_grow_rate.is_zero() { + // if not, return the current conflicts + for edge_ptr in node_ptr_read.invalid_subgraph.hair.iter() { + if grow_rate.is_positive() && self.is_edge_tight_tune(edge_ptr.clone()) { + conflicts.insert(MaxUpdateLength::Conflicting( edge_ptr.clone())); + } + } + if grow_rate.is_negative() && node_ptr_read.dual_variable_at_last_updated_time.is_zero() { + conflicts.insert(MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new( + node_ptr_read.index, + dual_node_ptr.ptr.clone(), + ))); + } + } else { + // if yes, grow and return new conflicts + drop(node_ptr_read); + let mut node_ptr_write = dual_node_ptr.ptr.write(); + for edge_ptr in node_ptr_write.invalid_subgraph.hair.iter() { + self.grow_edge(edge_ptr.clone(), &actual_grow_rate); + #[cfg(feature = "incr_lp")] + self.update_edge_cluster_weights(*edge_index, cluster_index, actual_grow_rate); // note: comment out if not using cluster-based + if actual_grow_rate.is_positive() && self.is_edge_tight_tune(edge_ptr.clone()) { + conflicts.insert(MaxUpdateLength::Conflicting(edge_ptr.clone())); + } + } + node_ptr_write.dual_variable_at_last_updated_time += actual_grow_rate.clone(); + if actual_grow_rate.is_negative() && node_ptr_write.dual_variable_at_last_updated_time.is_zero() { + conflicts.insert(MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new( + node_ptr_write.index, + dual_node_ptr.ptr.clone(), + ))); + } + } + } + } + _ => { + // in other cases, optimizer should have optimized, so we should apply the deltas and return the new conflicts + let mut edge_deltas: BTreeMap = BTreeMap::new(); + // for (dual_node_ptr, grow_rate) in dual_node_deltas.into_iter() { + for (dual_node_ptr, (grow_rate, cluster_index)) in dual_node_deltas.into_iter() { + // update the dual node and check for conflicts + let mut node_ptr_write = dual_node_ptr.ptr.write(); + node_ptr_write.dual_variable_at_last_updated_time += grow_rate.clone(); + if grow_rate.is_negative() && node_ptr_write.dual_variable_at_last_updated_time.is_zero() { + conflicts.insert(MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new( + node_ptr_write.index, + dual_node_ptr.ptr.clone(), + ))); + } + + // calculate the total edge deltas + for edge_ptr in node_ptr_write.invalid_subgraph.hair.iter() { + match edge_deltas.entry(edge_ptr.clone()) { + std::collections::btree_map::Entry::Vacant(v) => { + v.insert(grow_rate.clone()); + } + std::collections::btree_map::Entry::Occupied(mut o) => { + let current = o.get_mut(); + *current += grow_rate.clone(); + } + } + #[cfg(feature = "incr_lp")] + self.update_edge_cluster_weights(*edge_index, cluster_index, grow_rate.clone()); + // note: comment out if not using cluster-based + } + } + + // apply the edge deltas and check for conflicts + for (edge_ptr, grow_rate) in edge_deltas.into_iter() { + if grow_rate.is_zero() { + continue; + } + self.grow_edge(edge_ptr.clone(), &grow_rate); + if grow_rate.is_positive() && self.is_edge_tight_tune(edge_ptr.clone()) { + conflicts.insert(MaxUpdateLength::Conflicting(edge_ptr.clone())); + } + } + } + } + conflicts + } + + /// get the edge free weight, for each edge what is the weight that are free to use by the given participating dual variables + fn get_edge_free_weight( + &self, + edge_ptr: EdgePtr, + participating_dual_variables: &hashbrown::HashSet, + ) -> Rational; + + #[cfg(feature = "incr_lp")] + fn update_edge_cluster_weights(&self, edge_index: EdgeIndex, cluster_index: NodeIndex, grow_rate: Rational); + + #[cfg(feature = "incr_lp")] + fn get_edge_free_weight_cluster(&self, edge_index: EdgeIndex, cluster_index: NodeIndex) -> Rational; + + #[cfg(feature = "incr_lp")] + fn update_edge_cluster_weights_union( + &self, + dual_node_ptr: &DualNodePtr, + drained_cluster_index: NodeIndex, + absorbing_cluster_index: NodeIndex, + ); + + fn get_vertex_ptr(&self, vertex_index: VertexIndex) -> VertexPtr; + fn get_edge_ptr(&self, edge_index: EdgeIndex) -> EdgePtr; } impl MaxUpdateLength { @@ -226,6 +560,13 @@ impl MaxUpdateLength { } impl GroupMaxUpdateLength { + pub fn len (&mut self) -> usize{ + return match self { + GroupMaxUpdateLength::Unbounded => 0, + GroupMaxUpdateLength::ValidGrow(_) => 1, + GroupMaxUpdateLength::Conflicts(conflicts) => conflicts.len(), + } + } pub fn add(&mut self, max_update_length: MaxUpdateLength) { match self { Self::Unbounded => { @@ -287,37 +628,83 @@ impl GroupMaxUpdateLength { Self::Conflicts(conflicts) => conflicts.last(), } } + + pub fn extend(&mut self, other: Self) { + match self { + Self::Conflicts(conflicts) => { + if let Self::Conflicts(other_conflicts) = other { + conflicts.extend(other_conflicts); + } // only add conflicts + }, + Self::Unbounded => { + match other { + Self::Unbounded => {} // do nothing + Self::ValidGrow(length) => *self = Self::ValidGrow(length), + Self::Conflicts(mut other_list) => { + let mut list = Vec::::new(); + std::mem::swap(&mut list, &mut other_list); + *self = Self::Conflicts(list); + } + } + }, + Self::ValidGrow(current_length) => match other { + Self::Conflicts(mut other_list) => { + let mut list = Vec::::new(); + std::mem::swap(&mut list, &mut other_list); + *self = Self::Conflicts(list); + } + Self::Unbounded => {} // do nothing + Self::ValidGrow(length) => { + *current_length = std::cmp::min(current_length.clone(), length); + } + } + } + } } impl DualModuleInterfacePtr { - pub fn new(model_graph: Arc) -> Self { + pub fn new() -> Self { Self::new_value(DualModuleInterface { nodes: Vec::new(), hashmap: HashMap::new(), - decoding_graph: DecodingHyperGraph::new(model_graph, Arc::new(SyndromePattern::new_empty())), + // decoding_graph: DecodingHyperGraph::new(model_graph, Arc::new(SyndromePattern::new_empty())), }) } /// a dual module interface MUST be created given a concrete implementation of the dual module - pub fn new_load(decoding_graph: DecodingHyperGraph, dual_module_impl: &mut impl DualModuleImpl) -> Self { - let interface_ptr = Self::new(decoding_graph.model_graph.clone()); - interface_ptr.load(decoding_graph.syndrome_pattern, dual_module_impl); + pub fn new_load(syndrome_pattern: Arc, dual_module_impl: &mut impl DualModuleImpl) -> Self { + let interface_ptr = Self::new(); + interface_ptr.load(syndrome_pattern, dual_module_impl); interface_ptr } + // the defect_vertices here are local vertices pub fn load(&self, syndrome_pattern: Arc, dual_module_impl: &mut impl DualModuleImpl) { - self.write().decoding_graph.set_syndrome(syndrome_pattern.clone()); + // self.write().decoding_graph.set_syndrome(syndrome_pattern.clone()); for vertex_idx in syndrome_pattern.defect_vertices.iter() { self.create_defect_node(*vertex_idx, dual_module_impl); } } + // // the defect_vertices here are local vertices + // pub fn load_ptr( + // &self, + // syndrome_pattern: Arc, + // dual_module_ptr: &mut DualModuleParallelUnitPtr,) + // where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + // { + // // self.write().decoding_graph.set_syndrome(syndrome_pattern.clone()); + // for vertex_idx in syndrome_pattern.defect_vertices.iter() { + // self.create_defect_node(*vertex_idx, dual_module_ptr.write().deref_mut()); + // } + // } + pub fn sum_dual_variables(&self) -> Rational { let interface = self.read_recursive(); let mut sum = Rational::zero(); for dual_node_ptr in interface.nodes.iter() { let dual_node = dual_node_ptr.read_recursive(); - sum += dual_node.get_dual_variable(); + sum += dual_node.get_dual_variable().floor(); } sum } @@ -337,12 +724,15 @@ impl DualModuleInterfacePtr { /// make it private; use `load` instead fn create_defect_node(&self, vertex_idx: VertexIndex, dual_module: &mut impl DualModuleImpl) -> DualNodePtr { let interface = self.read_recursive(); - let mut internal_vertices = BTreeSet::new(); - internal_vertices.insert(vertex_idx); + // let mut internal_vertices = BTreeSet::new(); + // internal_vertices.insert(vertex_idx); + let vertex_ptr = dual_module.get_vertex_ptr(vertex_idx); // this is okay because create_defect_node is only called upon local defect vertices, so we won't access index out of range + vertex_ptr.write().is_defect = true; // we change the is_defect to true, since is_defect is initialized as false for all vertex pointers + let mut vertices = BTreeSet::new(); + vertices.insert(vertex_ptr); let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete( - vec![vertex_idx].into_iter().collect(), - BTreeSet::new(), - &interface.decoding_graph, + &vertices, + &BTreeSet::new() )); let node_index = interface.nodes.len() as NodeIndex; let node_ptr = DualNodePtr::new_value(DualNode { @@ -352,14 +742,18 @@ impl DualModuleInterfacePtr { dual_variable_at_last_updated_time: Rational::zero(), global_time: None, last_updated_time: Rational::zero(), + primal_module_serial_node: None, // to be filled in when initializing a primalnode }); + let cloned_node_ptr = node_ptr.clone(); drop(interface); let mut interface = self.write(); - interface.nodes.push(node_ptr); + interface.nodes.push(node_ptr.clone()); interface.hashmap.insert(invalid_subgraph, node_index); drop(interface); + // println!("node created in `create_defect_node`: {:?}", node_ptr.clone()); dual_module.add_defect_node(&cloned_node_ptr); + cloned_node_ptr } @@ -374,6 +768,7 @@ impl DualModuleInterfacePtr { } pub fn create_node(&self, invalid_subgraph: Arc, dual_module: &mut impl DualModuleImpl) -> DualNodePtr { + // cprintln!("create_node"); debug_assert!( self.find_node(&invalid_subgraph).is_none(), "do not create the same node twice" @@ -388,10 +783,41 @@ impl DualModuleInterfacePtr { dual_variable_at_last_updated_time: Rational::zero(), global_time: None, last_updated_time: Rational::zero(), + primal_module_serial_node: None, // to be filled in when initializing a primalnode }); interface.nodes.push(node_ptr.clone()); drop(interface); dual_module.add_dual_node(&node_ptr); + + node_ptr + } + + /// `create_node` for tuning + pub fn create_node_tune( + &self, + invalid_subgraph: Arc, + dual_module: &mut impl DualModuleImpl, + ) -> DualNodePtr { + debug_assert!( + self.find_node(&invalid_subgraph).is_none(), + "do not create the same node twice" + ); + let mut interface = self.write(); + let node_index = interface.nodes.len() as NodeIndex; + interface.hashmap.insert(invalid_subgraph.clone(), node_index); + let node_ptr = DualNodePtr::new_value(DualNode { + index: node_index, + invalid_subgraph, + grow_rate: Rational::zero(), + dual_variable_at_last_updated_time: Rational::zero(), + global_time: None, + last_updated_time: Rational::zero(), + primal_module_serial_node: None, // to be filled in when initializing a primalnode + }); + interface.nodes.push(node_ptr.clone()); + drop(interface); + dual_module.add_dual_node_tune(&node_ptr); + node_ptr } @@ -406,27 +832,77 @@ impl DualModuleInterfacePtr { None => (false, self.create_node(invalid_subgraph.clone(), dual_module)), } } + + /// `find_or_create_node` for tuning + pub fn find_or_create_node_tune( + &self, + invalid_subgraph: &Arc, + dual_module: &mut impl DualModuleImpl, + ) -> (bool, DualNodePtr) { + match self.find_node(invalid_subgraph) { + Some(node_ptr) => (true, node_ptr), + None => (false, self.create_node_tune(invalid_subgraph.clone(), dual_module)), + } + } + + pub fn is_valid_cluster_auto_vertices(&self, edges: &BTreeSet) -> bool { + self.find_valid_subgraph_auto_vertices(edges).is_some() + } + + pub fn find_valid_subgraph_auto_vertices(&self, edges: &BTreeSet) -> Option { + let mut vertices: BTreeSet = BTreeSet::new(); + for edge_ptr in edges.iter() { + let local_vertices = &edge_ptr.get_vertex_neighbors(); + for vertex in local_vertices { + vertices.insert(vertex.upgrade_force()); + } + } + + self.find_valid_subgraph(edges, &vertices) + } + + pub fn find_valid_subgraph(&self, edges: &BTreeSet, vertices: &BTreeSet) -> Option { + let mut matrix = Echelon::::new(); + for edge_index in edges.iter() { + matrix.add_variable(edge_index.downgrade()); + } + + for vertex_index in vertices.iter() { + let incident_edges = &vertex_index.read_recursive().edges; + let parity = vertex_index.read_recursive().is_defect; + matrix.add_constraint(vertex_index.downgrade(), &incident_edges, parity); + } + matrix.get_solution() + } } // shortcuts for easier code writing at debugging impl DualModuleInterfacePtr { - pub fn create_node_vec(&self, edges: &[EdgeIndex], dual_module: &mut impl DualModuleImpl) -> DualNodePtr { + pub fn create_node_vec(&self, edges: &[EdgeWeak], dual_module: &mut impl DualModuleImpl) -> DualNodePtr { + let strong_edges = edges.iter() + .filter_map(|weak_edge| weak_edge.upgrade()) + .collect(); let invalid_subgraph = Arc::new(InvalidSubgraph::new( - edges.iter().cloned().collect(), - &self.read_recursive().decoding_graph, + &strong_edges )); self.create_node(invalid_subgraph, dual_module) } pub fn create_node_complete_vec( &self, - vertices: &[VertexIndex], - edges: &[EdgeIndex], + vertices: &[VertexWeak], + edges: &[EdgeWeak], dual_module: &mut impl DualModuleImpl, ) -> DualNodePtr { + let strong_edges = edges.iter() + .filter_map(|weak_edge| weak_edge.upgrade()) + .collect(); + let strong_vertices = vertices.iter() + .filter_map(|weak_vertex| weak_vertex.upgrade()) + .collect(); + let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete( - vertices.iter().cloned().collect(), - edges.iter().cloned().collect(), - &self.read_recursive().decoding_graph, + &strong_vertices, + &strong_edges )); self.create_node(invalid_subgraph, dual_module) } @@ -438,10 +914,13 @@ impl MWPSVisualizer for DualModuleInterfacePtr { let mut dual_nodes = Vec::::new(); for dual_node_ptr in interface.nodes.iter() { let dual_node = dual_node_ptr.read_recursive(); + let edges: Vec = dual_node.invalid_subgraph.edges.iter().map(|e|e.upgradable_read().edge_index).collect(); + let vertices: Vec = dual_node.invalid_subgraph.vertices.iter().map(|e|e.upgradable_read().vertex_index).collect(); + let hair: Vec = dual_node.invalid_subgraph.hair.iter().map(|e|e.upgradable_read().edge_index).collect(); dual_nodes.push(json!({ - if abbrev { "e" } else { "edges" }: dual_node.invalid_subgraph.edges, - if abbrev { "v" } else { "vertices" }: dual_node.invalid_subgraph.vertices, - if abbrev { "h" } else { "hair" }: dual_node.invalid_subgraph.hair, + if abbrev { "e" } else { "edges" }: edges, + if abbrev { "v" } else { "vertices" }: vertices, + if abbrev { "h" } else { "hair" }: hair, if abbrev { "d" } else { "dual_variable" }: dual_node.get_dual_variable().to_f64(), if abbrev { "dn" } else { "dual_variable_numerator" }: dual_node.get_dual_variable().numer().to_i64(), if abbrev { "dd" } else { "dual_variable_denominator" }: dual_node.get_dual_variable().denom().to_i64(), diff --git a/src/dual_module_parallel.rs b/src/dual_module_parallel.rs new file mode 100644 index 00000000..d9d8be68 --- /dev/null +++ b/src/dual_module_parallel.rs @@ -0,0 +1,2198 @@ +/// Parallel Implementation of Dual Module PQ +/// + + + +use super::dual_module_pq::*; +use crate::{add_shared_methods, dual_module::*}; +use super::pointers::*; +use super::util::*; +use super::visualize::*; +use crate::dual_module::DualModuleImpl; +use crate::rayon::prelude::*; +use crate::serde_json; +use crate::weak_table::PtrWeakHashSet; +use chrono::offset; +use hashbrown::HashMap; +use serde::{Serialize, Deserialize}; +use std::sync::{Arc, Mutex}; +use std::collections::BTreeSet; +use std::collections::HashSet; +use crate::primal_module::Affinity; +use crate::primal_module_serial::PrimalClusterPtr; +use crate::num_traits::{ToPrimitive, Zero}; +use crate::ordered_float::OrderedFloat; +use std::collections::VecDeque; +use std::cmp::Ordering; + + +pub struct DualModuleParallelUnit +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, { + pub unit_index: usize, + /// The corresponding serial_module, in this case, the serial module with priority queue implementation + pub serial_module: DualModulePQ, + /// * The serial units being fused with this serial unit. + /// * For non-boundary unit, the initial state of this vector contains the DualModuleParallelUnit of the boundary unit (aka + /// the unit formed by the boundary vertices of this unit). When more than one such boundary vertices units are present at initialization, + /// we should insert them based on their respective orientation in the time-space chunk block. + /// * For boundary unit, the initial state of this vector is the non-boundary unit it connects to. + /// * When we fuse 2 DualModuleParallelUnit, we could only fuse a non-boundary unit with a boundary unit + pub adjacent_parallel_units: Vec>, + /// Whether this unit is a boundary unit + pub is_boundary_unit: bool, + /// partition info + pub partition_info: Arc, + /// owning_range + pub owning_range: VertexRange, + pub enable_parallel_execution: bool, + /// should think a bit more about whether having this makes sense + /// the current mode of the dual module + /// note: currently does not have too much functionality + mode: DualModuleMode, +} + +pub type DualModuleParallelUnitPtr = ArcRwLock>; +pub type DualModuleParallelUnitWeak = WeakRwLock>; + +impl std::fmt::Debug for DualModuleParallelUnitPtr +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let unit = self.read_recursive(); + write!(f, "{}", unit.unit_index) + } +} + +impl std::fmt::Debug for DualModuleParallelUnitWeak +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.upgrade_force().fmt(f) + } +} + +impl Ord for DualModuleParallelUnitPtr +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn cmp(&self, other: &Self) -> Ordering { + // compare the pointer address + let ptr1 = Arc::as_ptr(self.ptr()); + let ptr2 = Arc::as_ptr(other.ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for DualModuleParallelUnitPtr +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for DualModuleParallelUnitWeak +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn cmp(&self, other: &Self) -> Ordering { + // compare the pointer address + let ptr1 = Arc::as_ptr(self.upgrade_force().ptr()); + let ptr2 = Arc::as_ptr(other.upgrade_force().ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + // println!("ptr1: {:?}", ptr1); + // println!("ptr2: {:?}", ptr2); + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for DualModuleParallelUnitWeak +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Clone for DualModuleParallelUnit +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn clone(&self) -> Self { + Self { + unit_index: self.unit_index.clone(), + serial_module: self.serial_module.clone(), + adjacent_parallel_units: self.adjacent_parallel_units.clone(), + is_boundary_unit: self.is_boundary_unit.clone(), + partition_info: self.partition_info.clone(), + owning_range: self.owning_range.clone(), + enable_parallel_execution: self.enable_parallel_execution.clone(), + mode: self.mode.clone(), + } + } +} + +pub struct DualModuleParallel +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + /// the set of all DualModuleParallelUnits, one for each partition + /// we set the read-write lock + pub units: Vec>, + /// configuration such as thread_pool_size + pub config: DualModuleParallelConfig, + /// partition information + pub partition_info: Arc, + /// thread pool used to execute async functions in parallel + pub thread_pool: Arc, + // /// an empty sync requests queue just to implement the trait + // pub empty_sync_request: Vec, + + /// a dynamic (to-be-update) undirected graph (DAG) to keep track of the relationship between different partition units, assumed to be acylic if we partition + /// along the time axis, but could be cyclic depending on the partition and fusion strategy + pub dag_partition_units: BTreeSet<(usize, usize, bool)>, // (unit_index0, unit_index1, is_fused) + /// partitioned initializers, used in both primal and dual parallel modules + pub partitioned_initializers: Vec, + + /// should think more about whether having this makes sense + /// the current mode of the dual module + /// note: currently does not have too much functionality + mode: DualModuleMode, +} + + + + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct DualModuleParallelConfig { + /// enable async execution of dual operations; only used when calling top-level operations, not used in individual units + #[serde(default = "dual_module_parallel_default_configs::thread_pool_size")] + pub thread_pool_size: usize, + /// enable parallel execution of a fused dual module + #[serde(default = "dual_module_parallel_default_configs::enable_parallel_execution")] + pub enable_parallel_execution: bool, +} + +impl Default for DualModuleParallelConfig { + fn default() -> Self { + serde_json::from_value(json!({})).unwrap() + } +} + +pub mod dual_module_parallel_default_configs { + pub fn thread_pool_size() -> usize { + 0 + } // by default to the number of CPU cores + pub fn enable_parallel_execution() -> bool { + false + } // by default disabled: parallel execution may cause too much context switch, yet not much speed benefit +} + + +impl DualModuleParallel +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + #[allow(clippy::unnecessary_cast)] + pub fn new_config( + initializer: &SolverInitializer, + partition_info: &PartitionInfo, + config: DualModuleParallelConfig + ) -> Self + { + // automatic reference counter for partition info + let partition_info = Arc::new(partition_info.clone()); + + // build thread pool + let mut thread_pool_builder = rayon::ThreadPoolBuilder::new(); + if config.thread_pool_size != 0 { + thread_pool_builder = thread_pool_builder.num_threads(config.thread_pool_size); + } + let thread_pool = thread_pool_builder.build().expect("creating thread pool failed"); + + // build partition initializer + let mut units = vec![]; + let unit_count = partition_info.units.len(); + let mut partitioned_initializers: Vec = (0..unit_count).map(|unit_index| { + let unit_partition_info = &partition_info.units[unit_index]; + let owning_range = &unit_partition_info.owning_range; + let boundary_vertices = &unit_partition_info.boundary_vertices; + + PartitionedSolverInitializer { + unit_index, + vertex_num: initializer.vertex_num, + edge_num: initializer.weighted_edges.len(), + owning_range: *owning_range, + weighted_edges: vec![], + boundary_vertices: boundary_vertices.clone(), + is_boundary_unit: unit_partition_info.is_boundary_unit, + defect_vertices: partition_info.config.defect_vertices.clone(), + // boundary_vertices: unit_partition_info.boundary_vertices.clone(), + // adjacent_partition_units: unit_partition_info.adjacent_partition_units.clone(), + // owning_interface: Some(partition_units[unit_index].downgrade()), + } + }).collect(); + + // now we assign each edge to its unique partition + // println!("edge num: {}", initializer.weighted_edges.len()); + for (edge_index, hyper_edge) in initializer.weighted_edges.iter().enumerate() { + let mut vertices_unit_indices: HashMap> = HashMap::new(); + let mut boundary_vertices_adjacent_units_index: HashMap> = HashMap::new(); // key: unit_index; value: all vertex indices belong to this unit + let mut exist_boundary_vertex = false; + let mut exist_boundary_unit_index = 0; + for vertex_index in hyper_edge.vertices.iter() { + let unit_index = partition_info.vertex_to_owning_unit.get(vertex_index).unwrap(); + let unit = &partition_info.units[*unit_index]; + if unit.is_boundary_unit { + exist_boundary_vertex = true; + exist_boundary_unit_index = unit.unit_index; + if let Some(x) = boundary_vertices_adjacent_units_index.get_mut(unit_index) { + x.push(*vertex_index); + } else { + let mut vertices = vec![]; + vertices.push(*vertex_index); + boundary_vertices_adjacent_units_index.insert(*unit_index, vertices.clone()); + } + } else { + if let Some(x) = vertices_unit_indices.get_mut(unit_index) { + x.push(*vertex_index); + } else { + let mut vertices = vec![]; + vertices.push(*vertex_index); + vertices_unit_indices.insert(*unit_index, vertices.clone()); + } + } + } + + // println!("hyper_edge index: {edge_index}"); + // println!("vertices_unit_indices: {vertices_unit_indices:?}"); + // println!("boundary vertices adjacent unit indices: {boundary_vertices_adjacent_units_index:?}"); + + + // if all vertices are the boundary vertices + if vertices_unit_indices.len() == 0 { + // we add the hyperedge to the boundary unit + let unit_index = boundary_vertices_adjacent_units_index.keys().next().unwrap(); + partitioned_initializers[*unit_index].weighted_edges.push((hyper_edge.clone(), edge_index)); + } else { + let first_vertex_unit_index = *vertices_unit_indices.keys().next().unwrap(); + let all_vertex_from_same_unit = vertices_unit_indices.len() == 1; // whether the rest (exluding boundary vertices) are from the same unit + if !exist_boundary_vertex { + // all within owning range of one unit (since for the vertices to span multiple units, one of them has to be the boundary vertex) + // we assume that for vertices of a hyperedge, if there aren't any boundary vertices among them, they must belong to the same partition unit + assert!(all_vertex_from_same_unit, "For the vertices of hyperedge {}, there does not exist boundary vertex but all the vertices do not belong to the same unit", edge_index); + // since all vertices this hyperedge connects to belong to the same unit, we can assign this hyperedge to that partition unit + partitioned_initializers[first_vertex_unit_index].weighted_edges.push((hyper_edge.clone(), edge_index)); + } else { + // there exist boundary vertex (among the vertices this hyper_edge connects to), the rest vertices span multiple units + // println!("vertices span multiple units"); + if all_vertex_from_same_unit { + let mut hyper_edge_clone = hyper_edge.clone(); + hyper_edge_clone.connected_to_boundary_vertex = true; + partitioned_initializers[first_vertex_unit_index].weighted_edges.push((hyper_edge_clone, edge_index)); + + // if vertices_unit_indices.get(&first_vertex_unit_index).unwrap().len() == 1 { + // // insert this edge to the non-boundary unit + // // println!("edge_index: {:?}, unit_index: {:?}", edge_index, first_vertex_unit_index); + // let mut hyper_edge_clone = hyper_edge.clone(); + // hyper_edge_clone.connected_to_boundary_vertex = true; + // partitioned_initializers[first_vertex_unit_index].weighted_edges.push((hyper_edge_clone, edge_index)); + // } else if vertices_unit_indices.get(&first_vertex_unit_index).unwrap().len() > 1 { + // // insert this edge to the boundary unit + // partitioned_initializers[exist_boundary_unit_index].weighted_edges.push((hyper_edge.clone(), edge_index)); + // } else { + // panic!("cannot find the corresponding vertices in unit"); + // } + } else { + // println!("exist boundary vertices, vertices unit indices {vertices_unit_indices:?}"); + // if the vertices of this hyperedge (excluding the boundary vertices) belong to 2 different partition unit + // sanity check: there really are only 2 unique partition units + // let mut sanity_check = HashSet::new(); + // for (_vertex_index, vertex_unit_index) in &vertices_unit_indices { + // sanity_check.insert(vertex_unit_index); + // } + // assert!(sanity_check.len() == 2, "there are fewer than 2 or more than 2 partition units"); + + // we create new hyperedge with the boundary vertex + verticies exlusive for one partition unit + for (unit_index, vertices) in vertices_unit_indices.iter_mut() { + if let Some(boundary_vertices) = boundary_vertices_adjacent_units_index.get(unit_index) { + vertices.extend(boundary_vertices); + } + } + + // now we add the boundary vertices in + for (unit_index, vertices) in vertices_unit_indices.iter() { + let mut hyper_edge_new = HyperEdge::new(vertices.clone(), hyper_edge.weight); + hyper_edge_new.connected_to_boundary_vertex = true; + partitioned_initializers[*unit_index].weighted_edges.push((hyper_edge_new, edge_index)); + } + } + } + } + } + + // now that we are done with assigning hyperedge to its unique partitions, we proceed to initialize DualModuleParallelUnit for every partition + // print function for check during dev + // println!("partitioned_initializers: {:?}", partitioned_initializers); + thread_pool.scope(|_| { + (0..unit_count) + .into_par_iter() + .map(|unit_index| { + // println!("unit_index: {unit_index}"); + let mut dual_module: DualModulePQ = DualModulePQ::new_partitioned(&partitioned_initializers[unit_index]); + + DualModuleParallelUnitPtr::new_value(DualModuleParallelUnit { + unit_index, + partition_info: Arc::clone(&partition_info), + owning_range: partition_info.units[unit_index].owning_range, + serial_module: dual_module, + enable_parallel_execution: config.enable_parallel_execution, + adjacent_parallel_units: vec![], + is_boundary_unit: partition_info.units[unit_index].is_boundary_unit, + mode: DualModuleMode::default(), + }) + + }) + .collect_into_vec(&mut units); + }); + + + for boundary_unit_index in partition_info.config.partitions.len()..unit_count { + let unit = units[boundary_unit_index].read_recursive(); + for (index, vertex_ptr) in unit.serial_module.vertices.iter().enumerate() { + let vertex_index = vertex_ptr.read_recursive().vertex_index; + let mut vertex = vertex_ptr.write(); + // fill in the `mirrored_vertices` of vertcies for boundary-unit + for adjacent_unit_index in partition_info.units[boundary_unit_index].adjacent_parallel_units.iter() { + let adjacent_unit = units[*adjacent_unit_index].read_recursive(); + let mut offset_corresponding_mirrored_vertex = adjacent_unit.owning_range.len(); + for adjacent_boundary_index_range in partitioned_initializers[*adjacent_unit_index].boundary_vertices.iter() { + if adjacent_boundary_index_range.contains(vertex_index) { + break; + } else { + offset_corresponding_mirrored_vertex += adjacent_boundary_index_range.len(); + } + } + + let corresponding_mirrored_vertex = &adjacent_unit.serial_module.vertices[offset_corresponding_mirrored_vertex + index]; + vertex.mirrored_vertices.push(corresponding_mirrored_vertex.downgrade()); + } + + // fill in the `mirrored_vertices` of vertices for non-boundary-unit + + for adjacent_unit_index in partition_info.units[boundary_unit_index].adjacent_parallel_units.iter() { + let adjacent_unit = units[*adjacent_unit_index].read_recursive(); + let mut offset_corresponding_mirrored_vertex = adjacent_unit.owning_range.len(); + for adjacent_boundary_index_range in partitioned_initializers[*adjacent_unit_index].boundary_vertices.iter() { + if adjacent_boundary_index_range.contains(vertex_index) { + break; + } else { + offset_corresponding_mirrored_vertex += adjacent_boundary_index_range.len(); + } + } + + // println!("offset_corresponding_mirrored_vertex: {:?}", offset_corresponding_mirrored_vertex); + let corresponding_mirrored_vertex_ptr = &adjacent_unit.serial_module.vertices[offset_corresponding_mirrored_vertex + index]; + let mut corresponding_mirrored_vertex = corresponding_mirrored_vertex_ptr.write(); + for vertex_ptr0 in vertex.mirrored_vertices.iter() { + if !vertex_ptr0.eq(&corresponding_mirrored_vertex_ptr.downgrade()) { + corresponding_mirrored_vertex.mirrored_vertices.push(vertex_ptr0.clone()); + } + } + corresponding_mirrored_vertex.mirrored_vertices.push(vertex_ptr.downgrade()); + } + + } + drop(unit); + } + + // // debug print + // for vertex_ptr in units[2].read_recursive().serial_module.vertices.iter() { + // let vertex = vertex_ptr.read_recursive(); + // println!("vertex {:?} in unit 2, mirrored vertices: {:?}, incident edges: {:?}", vertex.vertex_index, vertex.mirrored_vertices, vertex.edges); + // } + + + // for (edge, edge_index) in partitioned_initializers[2].weighted_edges.iter() { + // println!("edge index: {:?}", edge_index); + // } + + // now we are initializing dag_partition_units + let mut dag_partition_units = BTreeSet::new(); + let graph = &partition_info.config.dag_partition_units; + for edge_index in graph.edge_indices() { + let (source, target) = graph.edge_endpoints(edge_index).unwrap(); + dag_partition_units.insert((source.index(), target.index(), false)); + } + + Self { + units, + config, + partition_info, + thread_pool: Arc::new(thread_pool), + dag_partition_units, + partitioned_initializers, + mode: DualModuleMode::default(), + } + } + + /// find the parallel unit that handles this dual node, should be unique + pub fn find_handling_parallel_unit(&self, dual_node_ptr: &DualNodePtr) -> DualModuleParallelUnitPtr { + let defect_ptr = dual_node_ptr.get_representative_vertex(); + let owning_unit_index = self.partition_info.vertex_to_owning_unit.get(&defect_ptr.read_recursive().vertex_index); + match owning_unit_index { + Some(x) => { + let owning_unit_ptr = self.units[*x].clone(); + return owning_unit_ptr; + }, + None => { + panic!("This dual node {} is not contained in any partition, we cannot find a parallel unit that handles this dual node.", defect_ptr.read_recursive().vertex_index) + }} + } + + // statically fuse all units + pub fn static_fuse_all(&mut self) { + // we need to fill in the adjacent_parallel_units here + for unit_index in 0..self.units.len() { + let mut unit = self.units[unit_index].write(); + // println!("for unit {:?}", unit_index); + for adjacent_unit_index in &self.partition_info.units[unit_index].adjacent_parallel_units { + // println!("adjacent_parallel_unit: {:?}", adjacent_unit_index); + let pointer = &self.units[*adjacent_unit_index]; + unit.adjacent_parallel_units.push(pointer.clone()); + // println!("adjacent_parallel_unit ptr: {:?}", Arc::as_ptr(pointer.clone().ptr())); + } + } + + // we also need to change the is_fusion of all vertices to true. There might be a faster way to do this, e.g. have this unit store the info + // instead of each individual vertex + for unit_index in 0..self.units.len() { + let unit = self.units[unit_index].read_recursive(); + for vertex_ptr in unit.serial_module.vertices.iter() { + let mut vertex = vertex_ptr.write(); + vertex.fusion_done = true; + } + } + + + } +} + +impl DualModuleImpl for DualModuleParallel +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + /// create a new dual module with empty syndrome + fn new_empty(initializer: &SolverInitializer) -> Self { + Self::new_config(initializer, + &PartitionConfig::new(initializer.vertex_num).info(), + DualModuleParallelConfig::default(),) + } + + /// clear all growth and existing dual nodes, prepared for the next decoding + #[inline(never)] + fn clear(&mut self) { + self.thread_pool.scope(|_| { + self.units.par_iter().enumerate().for_each(|(unit_index, unit_ptr)| { + let mut unit = unit_ptr.write(); + unit.clear(); // to be implemented in DualModuleParallelUnit + }) + }) + } + + /// add defect node + fn add_defect_node(&mut self, dual_node_ptr: &DualNodePtr) { + let mut unit_ptr = self.find_handling_parallel_unit(dual_node_ptr); + self.thread_pool.scope(|_| { + let mut unit = unit_ptr.write(); + unit.add_defect_node(dual_node_ptr); + }) + } + + /// add corresponding dual node, note that the `internal_vertices` and `hair_edges` are not set + fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { + let mut unit_ptr = self.find_handling_parallel_unit(dual_node_ptr); + self.thread_pool.scope(|_| { + let mut unit = unit_ptr.write(); + unit.add_dual_node(dual_node_ptr); + }) + } + + /// update grow rate + fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { + let mut unit_ptr = self.find_handling_parallel_unit(dual_node_ptr); + self.thread_pool.scope(|_| { + let mut unit = unit_ptr.write(); + unit.set_grow_rate(dual_node_ptr, grow_rate); // to be implemented in DualModuleParallelUnit + }) + } + + /// An optional function that helps to break down the implementation of [`DualModuleImpl::compute_maximum_update_length`] + /// check the maximum length to grow (shrink) specific dual node, if length is 0, give the reason of why it cannot further grow (shrink). + /// if `simultaneous_update` is true, also check for the peer node according to [`DualNode::grow_state`]. + fn compute_maximum_update_length_dual_node( + &mut self, + dual_node_ptr: &DualNodePtr, + simultaneous_update: bool, + ) -> MaxUpdateLength { + let mut unit_ptr = self.find_handling_parallel_unit(dual_node_ptr); + self.thread_pool.scope(|_| { + let mut unit = unit_ptr.write(); + unit.compute_maximum_update_length_dual_node(dual_node_ptr, simultaneous_update) // to be implemented in DualModuleParallelUnit + }) + } + + /// check the maximum length to grow (shrink) for all nodes, return a list of conflicting reason and a single number indicating the maximum rate to grow: + /// this number will be 0 if any conflicting reason presents + fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { + // self.thread_pool.scope(|_| { + // let results: Vec<_> = self + // .units + // .par_iter() + // .filter_map(|unit_ptr| { + // // let mut unit = unit_ptr.write(); + // let mut group_max_update_length = GroupMaxUpdateLength::new(); + // unit_ptr.bfs_compute_maximum_update_length(&mut group_max_update_length); + // Some(group_max_update_length) + // }) + // .collect(); + // let mut group_max_update_length = GroupMaxUpdateLength::new(); + // for local_group_max_update_length in results.into_iter() { + // group_max_update_length.extend(local_group_max_update_length); + // } + // group_max_update_length + // }) + // let unit_ptr = &self.units[0]; + + let mut group_max_update_length = GroupMaxUpdateLength::new(); + let unit_ptr = &self.units[0]; + unit_ptr.bfs_compute_maximum_update_length(&mut group_max_update_length); + group_max_update_length + // Some(group_max_update_length) + } + + /// An optional function that can manipulate individual dual node, not necessarily supported by all implementations + fn grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational) { + let mut unit_ptr = self.find_handling_parallel_unit(dual_node_ptr); + self.thread_pool.scope(|_| { + let mut unit = unit_ptr.write(); + unit.grow_dual_node(dual_node_ptr, length) // to be implemented in DualModuleParallelUnit + }) + } + + /// grow a specific length globally, length must be positive. + /// note that a negative growth should be implemented by reversing the speed of each dual node + fn grow(&mut self, length: Rational) { + let unit = &self.units[0]; + unit.bfs_grow(length.clone()); + // for unit_ptr in self.units.iter() { + // unit_ptr.bfs_grow(length.clone()); + // } + // self.thread_pool.scope(|_| { + // self.units.par_iter().for_each(|unit_ptr| { + // unit_ptr.bfs_grow(length.clone()); // to be implemented in DualModuleParallelUnit + // }); + // }) + // self.thread_pool.scope(|_| { + // self.units.par_iter().for_each(|unit_ptr| { + // let mut unit = unit_ptr.write(); + // unit.grow(length.clone()); // to be implemented in DualModuleParallelUnit + // }); + // }) + } + + /// come back later to fix the owning_edge_range contains + fn get_edge_nodes(&self, edge_ptr: EdgePtr) -> Vec { + edge_ptr.read_recursive() + .dual_nodes + .iter() + .map(|x| x.upgrade_force().ptr) + .collect() + } + fn get_edge_slack(&self, edge_ptr: EdgePtr) -> Rational { + let edge = edge_ptr.read_recursive(); + let unit_ptr = &self.units[edge.unit_index.unwrap()]; + let mut unit = unit_ptr.write(); + unit.get_edge_slack(edge_ptr.clone()) + + // unimplemented!() + // let edge = edge_ptr.read_recursive(); + // edge.weight.clone() + // - (self.global_time.read_recursive().clone() - edge.last_updated_time.clone()) * edge.grow_rate.clone() + // - edge.growth_at_last_updated_time.clone() + } + fn is_edge_tight(&self, edge_ptr: EdgePtr) -> bool { + self.get_edge_slack(edge_ptr).is_zero() + } + + /* New tuning-related methods */ + // tuning mode shared methods + add_shared_methods!(); + + /// syncing all possible states (dual_variable and edge_weights) with global time, so global_time can be discarded later + fn sync(&mut self) { + self.thread_pool.scope(|_| { + self.units.par_iter().for_each(|unit_ptr| { + let mut unit = unit_ptr.write(); + unit.sync(); // to be implemented in DualModuleParallelUnit + }); + }) + } + + /// grow a specific edge on the spot + fn grow_edge(&self, edge_ptr: EdgePtr, amount: &Rational) { + let mut edge = edge_ptr.write(); + edge.growth_at_last_updated_time += amount; + } + + /// `is_edge_tight` but in tuning phase + fn is_edge_tight_tune(&self, edge_ptr: EdgePtr) -> bool { + let edge = edge_ptr.read_recursive(); + edge.weight == edge.growth_at_last_updated_time + } + + /// `get_edge_slack` but in tuning phase + fn get_edge_slack_tune(&self, edge_ptr: EdgePtr) -> Rational { + let edge = edge_ptr.read_recursive(); + edge.weight.clone() - edge.growth_at_last_updated_time.clone() + } + + /// change mode, clear queue as queue is no longer needed. also sync to get rid off the need for global time + fn advance_mode(&mut self) { + unimplemented!() + // self.mode_mut().advance(); + // self.obstacle_queue.clear(); + // self.sync(); + } + + /* miscs */ + + /// print all the states for the current dual module + fn debug_print(&self) { + println!("this dual_module doesn't support debug print"); + } + + /* affinity */ + + /// calculate affinity based on the following metric + /// Clusters with larger primal-dual gaps will receive high affinity because working on those clusters + /// will often reduce the gap faster. However, clusters with a large number of dual variables, vertices, + /// and hyperedges will receive a lower affinity + fn calculate_cluster_affinity(&mut self, cluster: PrimalClusterPtr) -> Option { + let mut start = 0.0; + let cluster = cluster.read_recursive(); + start -= cluster.edges.len() as f64 + cluster.nodes.len() as f64; + + let mut weight = Rational::zero(); + for edge_ptr in cluster.edges.iter() { + // let edge_ptr = self.edges[edge_index].read_recursive(); + let edge = edge_ptr.read_recursive(); + weight += &edge.weight - &edge.growth_at_last_updated_time; + } + for node in cluster.nodes.iter() { + let dual_node = node.read_recursive().dual_node_ptr.clone(); + weight -= &dual_node.read_recursive().dual_variable_at_last_updated_time; + } + if weight.is_zero() { + return None; + } + start += weight.to_f64().unwrap(); + Some(OrderedFloat::from(start)) + } + + /// get the edge free weight, for each edge what is the weight that are free to use by the given participating dual variables + fn get_edge_free_weight( + &self, + edge_ptr: EdgePtr, + participating_dual_variables: &hashbrown::HashSet, + ) -> Rational { + // let edge = self.edges[edge_index as usize].read_recursive(); + let edge = edge_ptr.read_recursive(); + let mut free_weight = edge.weight.clone(); + for dual_node in edge.dual_nodes.iter() { + if participating_dual_variables.contains(&dual_node.index) { + continue; + } + let dual_node = dual_node.upgrade_force(); + free_weight -= &dual_node.ptr.read_recursive().dual_variable_at_last_updated_time; + } + + free_weight + } + + /// exist for testing purposes + fn get_vertex_ptr(&self, vertex_index: VertexIndex) -> VertexPtr { + for unit in self.units.iter() { + if unit.read_recursive().owning_range.contains(vertex_index) { + return unit.read_recursive().get_vertex_ptr(vertex_index); + } + } + panic!("none of the units in DualModuleParallel contain vertex_index, cannot find the corresponding vertex pointer"); + } + + /// exist for testing purposes + fn get_edge_ptr(&self, edge_index: EdgeIndex) -> EdgePtr { + for unit in self.units.iter() { + if unit.read_recursive().owning_range.contains(edge_index) { + return unit.read_recursive().get_edge_ptr(edge_index); + } + } + panic!("none of the units in DualModuleParallel contain vertex_index, cannot find the corresponding vertex pointer"); + } +} + +impl DualModuleParallelUnitPtr +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + /// check the maximum length to grow (shrink) for all nodes, return a list of conflicting reason and a single number indicating the maximum rate to grow: + /// this number will be 0 if any conflicting reason presents + pub fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { + let mut group_max_update_length = GroupMaxUpdateLength::new(); + self.bfs_compute_maximum_update_length(&mut group_max_update_length); + group_max_update_length + } + + /// grow a specific length globally, length must be positive. + /// note that a negative growth should be implemented by reversing the speed of each dual node + pub fn grow(&mut self, length: Rational) { + // println!("grow by length: {:?}", length); + self.bfs_grow(length.clone()); + } + +} + +impl DualModuleImpl for DualModuleParallelUnit +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + /// create a new dual module with empty syndrome + fn new_empty(initializer: &SolverInitializer) -> Self { + // tentative, but in the future, I need to modify this so that I can create a new PartitionUnit and fuse it with an existing bigger block + panic!("creating parallel unit directly from initializer is forbidden, use `DualModuleParallel::new` instead"); + } + + /// clear all growth and existing dual nodes, prepared for the next decoding + fn clear(&mut self) { + self.serial_module.clear(); + } + + /// add defect node + fn add_defect_node(&mut self, dual_node_ptr: &DualNodePtr) { + self.serial_module.add_defect_node(dual_node_ptr); + } + + /// add corresponding dual node, note that the `internal_vertices` and `hair_edges` are not set + fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { + self.serial_module.add_dual_node(dual_node_ptr); + } + + /// update grow rate + fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { + self.serial_module.set_grow_rate(dual_node_ptr, grow_rate); + } + + /// An optional function that helps to break down the implementation of [`DualModuleImpl::compute_maximum_update_length`] + /// check the maximum length to grow (shrink) specific dual node, if length is 0, give the reason of why it cannot further grow (shrink). + /// if `simultaneous_update` is true, also check for the peer node according to [`DualNode::grow_state`]. + fn compute_maximum_update_length_dual_node( + &mut self, + dual_node_ptr: &DualNodePtr, + simultaneous_update: bool, + ) -> MaxUpdateLength { + self.serial_module + .compute_maximum_update_length_dual_node(dual_node_ptr, simultaneous_update) + + // updating dual node index is performed in fuse fn + // // we only update the max_update_length for the units involed in fusion + } + + /// check the maximum length to grow (shrink) for all nodes, return a list of conflicting reason and a single number indicating the maximum rate to grow: + /// this number will be 0 if any conflicting reason presents + fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { + // we should not need this, refer to the `compute_maximum_update_length()` implementation in DualModuleParallelUnitPtr + unimplemented!() + // println!("unit compute max update length"); + // let mut group_max_update_length = GroupMaxUpdateLength::new(); + // self.bfs_compute_maximum_update_length(&mut group_max_update_length); + + // // // we only update the group_max_update_length for the units involed in fusion + // // if self.involved_in_fusion { + // // group_max_update_length.update(); + // // } + // group_max_update_length + } + + // /// An optional function that can manipulate individual dual node, not necessarily supported by all implementations + // fn grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational) { + // let defect_vertex = dual_node_ptr.get_representative_vertex(); + // println!("grow_dual_node: defect vertex found from dual node ptr is {}", defect_vertex.read_recursive().vertex_index); + // let mut visited: HashSet = HashSet::new(); + // self.dfs_grow_dual_node(dual_node_ptr, length, defect_vertex, &mut visited); + // } + + /// grow a specific length globally, length must be positive. + /// note that a negative growth should be implemented by reversing the speed of each dual node + fn grow(&mut self, length: Rational) { + // we should not need this, refer to the `grow()` implementation in DualModuleParallelUnitPtr + unimplemented!() + // let x = &*self; + // // let dual_module_unit: ArcRwLock> = ArcRwLock::new_value(x.clone()); + // let dual_module_unit = std::ptr::addr_of!(self); + // dual_module_unit.bfs_grow(length); + // self.bfs_grow(length); + } + + fn get_edge_nodes(&self, edge_ptr: EdgePtr) -> Vec { + self.serial_module.get_edge_nodes(edge_ptr) + } + fn get_edge_slack(&self, edge_ptr: EdgePtr) -> Rational { + self.serial_module.get_edge_slack(edge_ptr) + } + fn is_edge_tight(&self, edge_ptr: EdgePtr) -> bool { + self.serial_module.is_edge_tight(edge_ptr) + } + + /* New tuning-related methods */ + /// mode mangements + // tuning mode shared methods + // self.write().serial_module.add_shared_methods!(); + /// Returns a reference to the mode field. + fn mode(&self) -> &DualModuleMode { + &self.mode + } + + /// Returns a mutable reference to the mode field. + fn mode_mut(&mut self) -> &mut DualModuleMode { + &mut self.mode + } + + fn advance_mode(&mut self) { + self.serial_module.advance_mode(); + } + + /// syncing all possible states (dual_variable and edge_weights) with global time, so global_time can be discarded later + fn sync(&mut self) { + self.serial_module.sync(); + } + + /// grow a specific edge on the spot + fn grow_edge(&self, edge_ptr: EdgePtr, amount: &Rational) { + self.serial_module.grow_edge(edge_ptr, amount); + } + + /// `is_edge_tight` but in tuning phase + fn is_edge_tight_tune(&self, edge_ptr: EdgePtr) -> bool { + self.serial_module.is_edge_tight_tune(edge_ptr) + } + + /// `get_edge_slack` but in tuning phase + fn get_edge_slack_tune(&self, edge_ptr: EdgePtr) -> Rational { + self.serial_module.get_edge_slack_tune(edge_ptr) + } + + /* miscs */ + + /// print all the states for the current dual module + fn debug_print(&self) { + self.serial_module.debug_print(); + } + + /* affinity */ + + /// calculate affinity based on the following metric + /// Clusters with larger primal-dual gaps will receive high affinity because working on those clusters + /// will often reduce the gap faster. However, clusters with a large number of dual variables, vertices, + /// and hyperedges will receive a lower affinity + fn calculate_cluster_affinity(&mut self, cluster: PrimalClusterPtr) -> Option { + self.serial_module.calculate_cluster_affinity(cluster) + } + + /// get the edge free weight, for each edge what is the weight that are free to use by the given participating dual variables + fn get_edge_free_weight( + &self, + edge_ptr: EdgePtr, + participating_dual_variables: &hashbrown::HashSet, + ) -> Rational { + self.serial_module.get_edge_free_weight(edge_ptr, participating_dual_variables) + } + + /// exist for testing purposes + fn get_vertex_ptr(&self, vertex_index: VertexIndex) -> VertexPtr { + let local_vertex_index = vertex_index - self.owning_range.start(); + self.serial_module.get_vertex_ptr(local_vertex_index) + } + + /// exist for testing purposes + fn get_edge_ptr(&self, edge_index: EdgeIndex) -> EdgePtr { + let local_edge_index = edge_index - self.owning_range.start(); + self.serial_module.get_edge_ptr(local_edge_index) + } +} + + + +// impl DualModuleImpl for DualModuleParallelUnitPtr +// where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +// { +// /// create a new dual module with empty syndrome +// fn new_empty(initializer: &SolverInitializer) -> Self { +// // tentative, but in the future, I need to modify this so that I can create a new PartitionUnit and fuse it with an existing bigger block +// panic!("creating parallel unit directly from initializer is forbidden, use `DualModuleParallel::new` instead"); +// } + +// /// clear all growth and existing dual nodes, prepared for the next decoding +// fn clear(&mut self) { +// self.write().serial_module.clear(); +// } + +// /// add defect node +// fn add_defect_node(&mut self, dual_node_ptr: &DualNodePtr) { +// self.write().serial_module.add_defect_node(dual_node_ptr); +// } + +// /// add corresponding dual node, note that the `internal_vertices` and `hair_edges` are not set +// fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { +// self.write().serial_module.add_dual_node(dual_node_ptr); +// } + +// /// update grow rate +// fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { +// self.write().serial_module.set_grow_rate(dual_node_ptr, grow_rate); +// } + +// /// An optional function that helps to break down the implementation of [`DualModuleImpl::compute_maximum_update_length`] +// /// check the maximum length to grow (shrink) specific dual node, if length is 0, give the reason of why it cannot further grow (shrink). +// /// if `simultaneous_update` is true, also check for the peer node according to [`DualNode::grow_state`]. +// fn compute_maximum_update_length_dual_node( +// &mut self, +// dual_node_ptr: &DualNodePtr, +// simultaneous_update: bool, +// ) -> MaxUpdateLength { +// self.write().serial_module +// .compute_maximum_update_length_dual_node(dual_node_ptr, simultaneous_update) + +// // updating dual node index is performed in fuse fn +// // // we only update the max_update_length for the units involed in fusion +// } + +// /// check the maximum length to grow (shrink) for all nodes, return a list of conflicting reason and a single number indicating the maximum rate to grow: +// /// this number will be 0 if any conflicting reason presents +// fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { +// // we should not need this, refer to the `compute_maximum_update_length()` implementation in DualModuleParallelUnitPtr +// unimplemented!() +// // println!("unit compute max update length"); +// // let mut group_max_update_length = GroupMaxUpdateLength::new(); +// // self.bfs_compute_maximum_update_length(&mut group_max_update_length); + +// // // // we only update the group_max_update_length for the units involed in fusion +// // // if self.involved_in_fusion { +// // // group_max_update_length.update(); +// // // } +// // group_max_update_length +// } + +// // /// An optional function that can manipulate individual dual node, not necessarily supported by all implementations +// // fn grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational) { +// // let defect_vertex = dual_node_ptr.get_representative_vertex(); +// // println!("grow_dual_node: defect vertex found from dual node ptr is {}", defect_vertex.read_recursive().vertex_index); +// // let mut visited: HashSet = HashSet::new(); +// // self.dfs_grow_dual_node(dual_node_ptr, length, defect_vertex, &mut visited); +// // } + +// /// grow a specific length globally, length must be positive. +// /// note that a negative growth should be implemented by reversing the speed of each dual node +// fn grow(&mut self, length: Rational) { +// // we should not need this, refer to the `grow()` implementation in DualModuleParallelUnitPtr +// unimplemented!() +// // let x = &*self; +// // // let dual_module_unit: ArcRwLock> = ArcRwLock::new_value(x.clone()); +// // let dual_module_unit = std::ptr::addr_of!(self); +// // dual_module_unit.bfs_grow(length); +// // self.bfs_grow(length); +// } + +// fn get_edge_nodes(&self, edge_ptr: EdgePtr) -> Vec { +// self.read_recursive().serial_module.get_edge_nodes(edge_ptr) +// } +// fn get_edge_slack(&self, edge_ptr: EdgePtr) -> Rational { +// self.read_recursive().serial_module.get_edge_slack(edge_ptr) +// } +// fn is_edge_tight(&self, edge_ptr: EdgePtr) -> bool { +// self.read_recursive().serial_module.is_edge_tight(edge_ptr) +// } + +// /* New tuning-related methods */ +// /// mode mangements +// // tuning mode shared methods +// // self.write().serial_module.add_shared_methods!(); +// /// Returns a reference to the mode field. +// fn mode(&self) -> &DualModuleMode { +// &self.read_recursive().mode +// } + +// /// Returns a mutable reference to the mode field. +// fn mode_mut(&mut self) -> &mut DualModuleMode { +// &mut self.read_recursive().mode +// } + +// fn advance_mode(&mut self) { +// self.write().serial_module.advance_mode(); +// } + +// /// syncing all possible states (dual_variable and edge_weights) with global time, so global_time can be discarded later +// fn sync(&mut self) { +// self.write().serial_module.sync(); +// } + +// /// grow a specific edge on the spot +// fn grow_edge(&self, edge_ptr: EdgePtr, amount: &Rational) { +// self.write().serial_module.grow_edge(edge_ptr, amount); +// } + +// /// `is_edge_tight` but in tuning phase +// fn is_edge_tight_tune(&self, edge_ptr: EdgePtr) -> bool { +// self.read_recursive().serial_module.is_edge_tight_tune(edge_ptr) +// } + +// /// `get_edge_slack` but in tuning phase +// fn get_edge_slack_tune(&self, edge_ptr: EdgePtr) -> Rational { +// self.read_recursive().serial_module.get_edge_slack_tune(edge_ptr) +// } + +// /* miscs */ + +// /// print all the states for the current dual module +// fn debug_print(&self) { +// self.read_recursive().serial_module.debug_print(); +// } + +// /* affinity */ + +// /// calculate affinity based on the following metric +// /// Clusters with larger primal-dual gaps will receive high affinity because working on those clusters +// /// will often reduce the gap faster. However, clusters with a large number of dual variables, vertices, +// /// and hyperedges will receive a lower affinity +// fn calculate_cluster_affinity(&mut self, cluster: PrimalClusterPtr) -> Option { +// self.write().serial_module.calculate_cluster_affinity(cluster) +// } + +// /// get the edge free weight, for each edge what is the weight that are free to use by the given participating dual variables +// fn get_edge_free_weight( +// &self, +// edge_ptr: EdgePtr, +// participating_dual_variables: &hashbrown::HashSet, +// ) -> Rational { +// self.read_recursive().serial_module.get_edge_free_weight(edge_ptr, participating_dual_variables) +// } + +// /// exist for testing purposes +// fn get_vertex_ptr(&self, vertex_index: VertexIndex) -> VertexPtr { +// let local_vertex_index = vertex_index - self.read_recursive().owning_range.start(); +// self.read_recursive().serial_module.get_vertex_ptr(local_vertex_index) +// } + +// /// exist for testing purposes +// fn get_edge_ptr(&self, edge_index: EdgeIndex) -> EdgePtr { +// let local_edge_index = edge_index - self.read_recursive().owning_range.start(); +// self.read_recursive().serial_module.get_edge_ptr(local_edge_index) +// } +// } + + +// impl DualModuleImpl for DualModuleParallelUnit +// where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +// { +// /// create a new dual module with empty syndrome +// fn new_empty(initializer: &SolverInitializer) -> Self { +// // tentative, but in the future, I need to modify this so that I can create a new PartitionUnit and fuse it with an existing bigger block +// panic!("creating parallel unit directly from initializer is forbidden, use `DualModuleParallel::new` instead"); +// } + +// /// clear all growth and existing dual nodes, prepared for the next decoding +// fn clear(&mut self) { +// self.serial_module.clear(); +// } + +// /// add defect node +// fn add_defect_node(&mut self, dual_node_ptr: &DualNodePtr) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// add corresponding dual node, note that the `internal_vertices` and `hair_edges` are not set +// fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// update grow rate +// fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// An optional function that helps to break down the implementation of [`DualModuleImpl::compute_maximum_update_length`] +// /// check the maximum length to grow (shrink) specific dual node, if length is 0, give the reason of why it cannot further grow (shrink). +// /// if `simultaneous_update` is true, also check for the peer node according to [`DualNode::grow_state`]. +// fn compute_maximum_update_length_dual_node( +// &mut self, +// dual_node_ptr: &DualNodePtr, +// simultaneous_update: bool, +// ) -> MaxUpdateLength { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// check the maximum length to grow (shrink) for all nodes, return a list of conflicting reason and a single number indicating the maximum rate to grow: +// /// this number will be 0 if any conflicting reason presents +// fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// // /// An optional function that can manipulate individual dual node, not necessarily supported by all implementations +// // fn grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational) { +// // let defect_vertex = dual_node_ptr.get_representative_vertex(); +// // println!("grow_dual_node: defect vertex found from dual node ptr is {}", defect_vertex.read_recursive().vertex_index); +// // let mut visited: HashSet = HashSet::new(); +// // self.dfs_grow_dual_node(dual_node_ptr, length, defect_vertex, &mut visited); +// // } + +// /// grow a specific length globally, length must be positive. +// /// note that a negative growth should be implemented by reversing the speed of each dual node +// fn grow(&mut self, length: Rational) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// fn get_edge_nodes(&self, edge_ptr: EdgePtr) -> Vec { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// fn get_edge_slack(&self, edge_ptr: EdgePtr) -> Rational { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } +// fn is_edge_tight(&self, edge_ptr: EdgePtr) -> bool { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /* New tuning-related methods */ +// /// mode mangements +// // tuning mode shared methods +// // self.write().serial_module.add_shared_methods!(); +// /// Returns a reference to the mode field. +// fn mode(&self) -> &DualModuleMode { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// Returns a mutable reference to the mode field. +// fn mode_mut(&mut self) -> &mut DualModuleMode { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// fn advance_mode(&mut self) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// syncing all possible states (dual_variable and edge_weights) with global time, so global_time can be discarded later +// fn sync(&mut self) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// grow a specific edge on the spot +// fn grow_edge(&self, edge_ptr: EdgePtr, amount: &Rational) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// `is_edge_tight` but in tuning phase +// fn is_edge_tight_tune(&self, edge_ptr: EdgePtr) -> bool { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// `get_edge_slack` but in tuning phase +// fn get_edge_slack_tune(&self, edge_ptr: EdgePtr) -> Rational { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /* miscs */ + +// /// print all the states for the current dual module +// fn debug_print(&self) { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /* affinity */ + +// /// calculate affinity based on the following metric +// /// Clusters with larger primal-dual gaps will receive high affinity because working on those clusters +// /// will often reduce the gap faster. However, clusters with a large number of dual variables, vertices, +// /// and hyperedges will receive a lower affinity +// fn calculate_cluster_affinity(&mut self, cluster: PrimalClusterPtr) -> Option { +// panic!("please use `clear` in DualModuleParallelUnitPtr"); +// } + +// /// get the edge free weight, for each edge what is the weight that are free to use by the given participating dual variables +// fn get_edge_free_weight( +// &self, +// edge_ptr: EdgePtr, +// participating_dual_variables: &hashbrown::HashSet, +// ) -> Rational { +// panic!("please use `get_edge_free_weight` in DualModuleParallelUnitPtr"); +// } + +// /// exist for testing purposes +// fn get_vertex_ptr(&self, vertex_index: VertexIndex) -> VertexPtr { +// panic!("please use `get_vertex_ptr` in DualModuleParallelUnitPtr"); +// } + +// /// exist for testing purposes +// fn get_edge_ptr(&self, edge_index: EdgeIndex) -> EdgePtr { +// panic!("please use `get_edge_ptr` in DualModuleParallelUnitPtr"); +// } +// } + +// impl DualModuleParallelUnit +// where Queue: FutureQueueMethods + Default + std::fmt::Debug, +// { +// fn new_config( +// initializer: &SolverInitializer, +// partition_info: &PartitionInfo, // contains the partition info of all partition units +// config: DualModuleParallelConfig +// ) -> Self +// { + + + +// Self { +// unit_index: , +// serial_module: , +// adjacent_parallel_units: , +// is_boundary_unit: , + +// } + + +// } +// } + +impl DualModuleParallelUnit +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + // pub fn fuse_helper(&mut self, + // other_dual_unit: &DualModuleParallelUnitPtr + // ) { + // if let Some(is_fused) = self.adjacent_parallel_units.get_mut(other_dual_unit) { + // *is_fused = true; + // } + // } + + // pub fn fuse( + // &mut self, + // self_interface: &DualModuleInterfacePtr, + // other_interface: &DualModuleInterfacePtr, + // other_dual_unit: &DualModuleParallelUnitPtr + // ) { + + // // change the index of dual nodes in the other interface + + + // // fuse dual unit + // self.fuse_helper(other_dual_unit); + // // if let Some(is_fused) = self.adjacent_parallel_units.get_mut(other_dual_unit) { + // // *is_fused = true; + // // } + // println!("fuse asdf"); + // // now we fuse the interface (copying the interface of other to myself) + // self_interface.fuse(other_interface); + // } + + // /// dfs to add defect node + // fn dfs_grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational, defect_vertex: VertexIndex, visited: &mut HashSet) { + + // if self.owning_range.contains(defect_vertex) { + // // println!("the unit containing this dual node is {} with owning range {} to {}", self.unit_index, self.owning_range.range[0], self.owning_range.range[1]); + // self.serial_module.grow_dual_node(dual_node_ptr, length); + // return; + // } + + // visited.insert(self.unit_index); + + // // println!("neighbor len: {}", self.adjacent_parallel_units.len()); + // for neighbor in self.adjacent_parallel_units.iter() { + // if !visited.contains(&neighbor.upgrade_force().read_recursive().unit_index) { + // neighbor.upgrade_force().write().dfs_grow_dual_node(dual_node_ptr, length.clone(), defect_vertex, visited); + // } + // } + // } +} + + +impl DualModuleParallelUnitPtr +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + // dfs grow all neighbors + pub fn dfs_grow(&self, length: Rational, visited: BTreeSet>) { + let mut dual_module_unit = self.write(); + + } + + + // I do need to iteratively grow all the neighbors, instead I only grow this unit + // this helps me to reduce the time complexity of copying all the nodes from one interface to the other during fusion + pub fn bfs_grow(&self, length: Rational) { + let mut dual_module_unit = self.write(); + if dual_module_unit.enable_parallel_execution { + // println!("enable parallel execution"); + // implementation using rayon without locks + // early terminate if no active dual nodes in this partition unit + // if !self.has_active_node { + // return; + // } + // println!("bfs grow"); + + // dual_module_unit.serial_module.grow(length.clone()); + // drop(dual_module_unit); + // let dual_module_unit = self.read_recursive(); + + // // could potentially use rayon to optimize it + // // implement a breadth first search to grow all connected (fused) neighbors + // let mut queue = VecDeque::new(); + // let mut visited = BTreeSet::new(); + // visited.insert(self.clone()); + // queue.push_back(self.clone()); + // drop(dual_module_unit); + + // while let Some(node) = { + // queue.pop_front() + // } { + // let neighbors = &node.read_recursive().adjacent_parallel_units; + + // neighbors.par_iter().for_each(|neighbor| { + // if !visited.contains(&neighbor) { + // neighbor.write().serial_module.grow(length.clone()); + // visited.insert(neighbor.clone()); + // queue.push_back(neighbor.clone()); + // } + // }); + // } + + // implementation using rayon with locks + // early terminate if no active dual nodes in this partition unit + // if !self.has_active_node { + // return; + // } + // println!("bfs grow"); + + dual_module_unit.serial_module.grow(length.clone()); + drop(dual_module_unit); + let dual_module_unit = self.read_recursive(); + + // could potentially use rayon to optimize it + // implement a breadth first search to grow all connected (fused) neighbors + let queue = Arc::new(Mutex::new(VecDeque::new())); + let visited = Arc::new(Mutex::new(BTreeSet::new())); + + let mut visited_lock = visited.lock().unwrap(); + visited_lock.insert(self.clone()); + drop(visited_lock); + + let mut queue_lock = queue.lock().unwrap(); + queue_lock.push_back(self.clone()); + drop(queue_lock); + drop(dual_module_unit); + + while let Some(node) = { + let mut queue_lock = queue.lock().unwrap(); + queue_lock.pop_front() + } { + let neighbors = &node.read_recursive().adjacent_parallel_units; + + neighbors.par_iter().for_each(|neighbor| { + let mut visited_lock = visited.lock().unwrap(); + let mut queue_lock = queue.lock().unwrap(); + + if !visited_lock.contains(&neighbor) { + neighbor.write().serial_module.grow(length.clone()); + visited_lock.insert(neighbor.clone()); + queue_lock.push_back(neighbor.clone()); + } + }); + } + } else { + // implementation using sequential for loop, we need to compare the resolve time of this and the version using rayon + dual_module_unit.serial_module.grow(length.clone()); + drop(dual_module_unit); + let dual_module_unit = self.read_recursive(); + // could potentially use rayon to optimize it + // implement a breadth first search to grow all connected (fused) neighbors + let mut frontier: VecDeque<_> = VecDeque::new(); + let mut visited = BTreeSet::new(); + // println!("index: {:?}", self.unit_index); + // visited.insert(Arc::as_ptr(self.ptr())); + visited.insert(self.clone()); + // println!("self pointer: {:?}", Arc::as_ptr(self.ptr())); + + for neighbor in dual_module_unit.adjacent_parallel_units.iter() { + // println!("first neighbor pointer: {:?}", Arc::as_ptr(neighbor.ptr())); + frontier.push_front(neighbor.clone()); + } + + drop(dual_module_unit); + while !frontier.is_empty() { + // println!("frontier len: {:?}", frontier.len()); + let temp = frontier.pop_front().unwrap(); + // println!("frontier len: {:?}", frontier.len()); + // let temp_ptr = temp_weak.upgrade_force(); + temp.write().serial_module.grow(length.clone()); + // visited.insert(Arc::as_ptr(temp.ptr())); + visited.insert(temp.clone()); + // println!("temp pointer: {:?}", Arc::as_ptr(temp.ptr())); + // println!("temp index: {:?}", temp.unit_index); + // println!("len: {:?}", temp.adjacent_parallel_units.len()); + + for neighbor in temp.read_recursive().adjacent_parallel_units.iter() { + // println!("hihi"); + // println!("neighbor pointer: {:?}", Arc::as_ptr(neighbor.ptr())); + // if !visited.contains(&Arc::as_ptr(neighbor.ptr())) { + // frontier.push_back(neighbor.clone()); + // } + if !visited.contains(neighbor) { + frontier.push_back(neighbor.clone()); + } + // println!("frontier len: {:?}", frontier.len()); + } + drop(temp); + // println!("after for loop"); + } + + } + } + + + fn bfs_compute_maximum_update_length(&self, group_max_update_length: &mut GroupMaxUpdateLength) { + let mut dual_module_unit = self.write(); + if dual_module_unit.enable_parallel_execution { + let serial_module_group_max_update_length = dual_module_unit.serial_module.compute_maximum_update_length(); + // println!("serial_module group max_update length: {:?}", serial_module_group_max_update_length); + drop(dual_module_unit); + let dual_module_unit = self.read_recursive(); + group_max_update_length.extend(serial_module_group_max_update_length); + + // implement a breadth first search to grow all connected (fused) neighbors + let queue = Arc::new(Mutex::new(VecDeque::new())); + let visited = Arc::new(Mutex::new(BTreeSet::new())); + + let mut visited_lock = visited.lock().unwrap(); + visited_lock.insert(self.clone()); + drop(visited_lock); + + let mut queue_lock = queue.lock().unwrap(); + queue_lock.push_back(self.clone()); + drop(queue_lock); + drop(dual_module_unit); + + let local_group_max_update_length = Arc::new(Mutex::new(GroupMaxUpdateLength::new())); + while let Some(node) = { + let mut queue_lock = queue.lock().unwrap(); + queue_lock.pop_front() + } { + let neighbors = &node.read_recursive().adjacent_parallel_units; + + + neighbors.par_iter().for_each(|neighbor| { + let mut visited_lock = visited.lock().unwrap(); + let mut queue_lock = queue.lock().unwrap(); + + + if !visited_lock.contains(&neighbor) { + let serial_module_group_max_update_length = neighbor.write().serial_module.compute_maximum_update_length(); + // group_max_update_length.extend(serial_module_group_max_update_length); + local_group_max_update_length.lock().unwrap().extend(serial_module_group_max_update_length); + visited_lock.insert(neighbor.clone()); + queue_lock.push_back(neighbor.clone()); + } + }); + } + let final_local_group_max_update_length = local_group_max_update_length.lock().unwrap(); + group_max_update_length.extend(final_local_group_max_update_length.clone()); + } else { + // implementation with sequential iteration of neighbors + // early terminate if no active dual nodes anywhere in the descendant + + // println!("bfs_compute_max_update_length"); + + + let serial_module_group_max_update_length = dual_module_unit.serial_module.compute_maximum_update_length(); + // println!("serial_module group max_update length: {:?}", serial_module_group_max_update_length); + drop(dual_module_unit); + let dual_module_unit = self.read_recursive(); + + group_max_update_length.extend(serial_module_group_max_update_length); + + // we need to find the maximum update length of all connected (fused) units + // so we run a bfs, we could potentially use rayon to optimize it + let mut frontier: VecDeque<_> = VecDeque::new(); + let mut visited = BTreeSet::new(); + visited.insert(self.clone()); + // println!("self pointer: {:?}", Arc::as_ptr(self.ptr())); + + for neighbor in dual_module_unit.adjacent_parallel_units.iter() { + // println!("first neighbor pointer: {:?}", Arc::as_ptr(neighbor.ptr())); + frontier.push_front(neighbor.clone()); + } + + while !frontier.is_empty() { + // println!("frontier len: {:?}", frontier.len()); + let temp = frontier.pop_front().unwrap(); + // println!("frontier len: {:?}", frontier.len()); + let serial_module_group_max_update_length = temp.write().serial_module.compute_maximum_update_length(); + // println!("serial_module_group_max_update_length: {:?}", serial_module_group_max_update_length); + group_max_update_length.extend(serial_module_group_max_update_length); + visited.insert(temp.clone()); + // println!("temp pointer: {:?}", Arc::as_ptr(temp.ptr())); + + for neighbor in temp.read_recursive().adjacent_parallel_units.iter() { + // println!("hihi"); + // println!("neighbor pointer: {:?}", Arc::as_ptr(neighbor.ptr())); + if !visited.contains(neighbor) { + frontier.push_back(neighbor.clone()); + } + // println!("frontier len: {:?}", frontier.len()); + + } + drop(temp); + // println!("after for loop"); + } + } + } + +} + +// now we implement the visualization functions +impl MWPSVisualizer for DualModuleParallel +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn snapshot(&self, abbrev: bool) -> serde_json::Value { + // do the sanity check first before taking snapshot + // self.sanity_check().unwrap(); + let mut value = json!({}); + for unit_ptr in self.units.iter() { + let unit = unit_ptr.read_recursive(); + let value_2 = unit.snapshot(abbrev); + // println!("value in unit {}: {}", unit.unit_index, value_2); + // snapshot_fix_missing_fields(&mut value_2, abbrev); + // let value = value.as_object_mut().expect("snapshot must be an object"); + // let value_2 = value_2.as_object_mut().expect("snapshot must be an object"); + // snapshot_copy_remaining_fields(value, value_2); + snapshot_combine_values(&mut value, value_2, abbrev); + // snapshot_append_values(&mut value, value_2, abbrev); + // println!("\n\n"); + // println!("after combine: {}", value); + } + value + } +} + +// now we proceed to implement the visualization tool +impl MWPSVisualizer for DualModuleParallelUnit +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, +{ + fn snapshot(&self, abbrev: bool) -> serde_json::Value { + // incomplete, tentative + // println!("snapshot unit index {}", self.unit_index); + self.serial_module.snapshot(abbrev) + } +} + + +#[cfg(test)] +pub mod tests { + use std::usize::MAX; + + use rayon::iter::split; + use slp::Solver; + + use super::super::example_codes::*; + use super::super::primal_module::*; + use super::super::primal_module_serial::*; + use crate::decoding_hypergraph::*; + use super::*; + use crate::num_traits::FromPrimitive; + + use crate::plugin_single_hair::PluginSingleHair; + use crate::plugin_union_find::PluginUnionFind; + use crate::plugin::PluginVec; + use crate::model_hypergraph::ModelHyperGraph; + + #[test] + fn dual_module_parallel_tentative_test_1() + { + // cargo test dual_module_parallel_tentative_test_1 -- --nocapture + let visualize_filename = "dual_module_parallel_tentative_test_1.json".to_string(); + let weight = 600; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + // let weight = 600; // do not change, the data is hard-coded + // let pxy = 0.0602828812732227; + // let code = CodeCapacityTailoredCode::new(7, pxy, 0.1, weight); // do not change probabilities: the data is hard-coded + let mut visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename); + visualizer.snapshot("code".to_string(), &code).unwrap(); + + // create dual module + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 18), // unit 0 + VertexRange::new(24, 42), // unit 1 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 2, by fusing 0 and 1 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + + let partition_info = partition_config.info(); + + // create dual module + let decoding_graph = DecodingHyperGraph::new_defects(model_graph.clone(), vec![3, 29, 30]); + let mut dual_module: DualModuleParallel>, FutureObstacleQueue> = + DualModuleParallel::new_config(&initializer, &partition_info, DualModuleParallelConfig::default()); + dual_module.static_fuse_all(); + + // try to work on a simple syndrome + let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph.syndrome_pattern, &mut dual_module); + + // println!("interface_ptr json: {}", interface_ptr.snapshot(false)); + // println!("dual_module json: {}", dual_module.snapshot(false)); + + visualizer + .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) + .unwrap(); + + println!("done first visualization"); + + // // grow them each by half + let begin_time = std::time::Instant::now(); + let dual_node_3_ptr = interface_ptr.read_recursive().nodes[0].clone(); + let dual_node_12_ptr = interface_ptr.read_recursive().nodes[1].clone(); + let dual_node_30_ptr = interface_ptr.read_recursive().nodes[2].clone(); + dual_module.set_grow_rate(&dual_node_3_ptr, Rational::from_usize(1).unwrap()); + dual_module.set_grow_rate(&dual_node_12_ptr, Rational::from_usize(1).unwrap()); + dual_module.set_grow_rate(&dual_node_30_ptr, Rational::from_usize(1).unwrap()); + + dual_module.grow(Rational::from_usize(weight / 2).unwrap()); + // dual_module.debug_update_all(&interface_ptr.read_recursive().nodes); + + println!("start second visualization"); + + visualizer + .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) + .unwrap(); + + // cluster becomes solved + dual_module.grow(Rational::from_usize(weight / 2).unwrap()); + visualizer + .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) + .unwrap(); + let end_time = std::time::Instant::now(); + let resolve_time = end_time - begin_time; + + // the result subgraph + let subgraph = vec![dual_module.get_edge_ptr(15).downgrade(), dual_module.get_edge_ptr(20).downgrade()]; + visualizer + .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) + .unwrap(); + println!("resolve time {:?}", resolve_time); + + } + + + #[allow(clippy::too_many_arguments)] + pub fn dual_module_parallel_basic_standard_syndrome_optional_viz( + _code: impl ExampleCode, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, + mut dual_module: impl DualModuleImpl + MWPSVisualizer, + model_graph: Arc, + mut visualizer: Option, + ) -> ( + DualModuleInterfacePtr, + PrimalModuleSerial, + impl DualModuleImpl + MWPSVisualizer, + ) { + // create primal module + let mut primal_module = PrimalModuleSerial::new_empty(&model_graph.initializer); + primal_module.growing_strategy = growing_strategy; + primal_module.plugins = Arc::new(plugins); + // primal_module.config = serde_json::from_value(json!({"timeout":1})).unwrap(); + // try to work on a simple syndrome + let decoding_graph = DecodingHyperGraph::new_defects(model_graph, defect_vertices.clone()); + let interface_ptr = DualModuleInterfacePtr::new(); + + let begin_time = std::time::Instant::now(); + primal_module.solve_visualizer( + &interface_ptr, + decoding_graph.syndrome_pattern.clone(), + &mut dual_module, + visualizer.as_mut(), + ); + + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, 0); + let end_time = std::time::Instant::now(); + let resolve_time = begin_time - end_time; + println!("resolve time: {:?}", resolve_time); + if let Some(visualizer) = visualizer.as_mut() { + visualizer + .snapshot_combined( + "subgraph".to_string(), + vec![&interface_ptr, &dual_module, &subgraph, &weight_range], + ) + .unwrap(); + } + // assert!( + // decoding_graph + // .model_graph + // .matches_subgraph_syndrome(&subgraph, &defect_vertices), + // "the result subgraph is invalid" + // ); + // assert_eq!( + // Rational::from_usize(final_dual).unwrap(), + // weight_range.upper, + // "unmatched sum dual variables" + // ); + // assert_eq!( + // Rational::from_usize(final_dual).unwrap(), + // weight_range.lower, + // "unexpected final dual variable sum" + // ); + (interface_ptr, primal_module, dual_module) + } + + pub fn dual_module_parallel_basic_standard_syndrome( + code: impl ExampleCode, + visualize_filename: String, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, + initializer: &Arc, + partition_info: PartitionInfo, + model_graph: &Arc, + ) -> ( + DualModuleInterfacePtr, + PrimalModuleSerial, + impl DualModuleImpl + MWPSVisualizer, + ) { + println!("{defect_vertices:?}"); + let visualizer = { + let visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename.clone()); + visualizer + }; + + // create dual module + let mut dual_module_parallel_config = DualModuleParallelConfig::default(); + dual_module_parallel_config.enable_parallel_execution = true; + let mut dual_module: DualModuleParallel>, FutureObstacleQueue> = + DualModuleParallel::new_config(&initializer, &partition_info, dual_module_parallel_config); + dual_module.static_fuse_all(); + // let mut dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); + + dual_module_parallel_basic_standard_syndrome_optional_viz( + code, + defect_vertices, + final_dual, + plugins, + growing_strategy, + dual_module, + model_graph.clone(), + Some(visualizer), + ) + } + + /// test a simple case, split into 2, no defect vertex in boundary-unit, clusters do not grow into other units + #[test] + fn dual_module_parallel_basic_test_2() { + // cargo test dual_module_parallel_basic_test_2 -- --nocapture + let visualize_filename = "dual_module_parallel_basic_test_2.json".to_string(); + let weight = 1; // do not change, the data is hard-coded + // let pxy = 0.0602828812732227; + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![2, 35]; + + // create model graph + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 18), // unit 0 + VertexRange::new(24, 42), // unit 1 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 2, by fusing 0 and 1 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + let partition_info = partition_config.info(); + + dual_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 4, + vec![], + GrowingStrategy::ModeBased, + initializer, + partition_info, + &model_graph, + ); + } + + /// test a simple case, split into 2, a defect vertex in boundary-unit, clusters do grow into other units + #[test] + fn dual_module_parallel_basic_test_3() { + // cargo test dual_module_parallel_basic_test_3 -- --nocapture + let visualize_filename = "dual_module_parallel_basic_test_3.json".to_string(); + let weight = 1; // do not change, the data is hard-coded + // let pxy = 0.0602828812732227; + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![19, 35]; + + // create model graph + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 18), // unit 0 + VertexRange::new(24, 42), // unit 1 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 2, by fusing 0 and 1 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + let partition_info = partition_config.info(); + + + dual_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 3, + vec![], + GrowingStrategy::ModeBased, + initializer, + partition_info, + &model_graph, + ); + } + + /// test a simple case, split into 2, a defect vertex in boundary-unit, clusters grow into other units + #[test] + fn dual_module_parallel_basic_test_4() { + // cargo test dual_module_parallel_basic_test_4 -- --nocapture + let visualize_filename = "dual_module_parallel_basic_test_4.json".to_string(); + let weight = 1; // do not change, the data is hard-coded + // let pxy = 0.0602828812732227; + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![16, 19, 29, 32, 39]; + + // create model graph + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 18), // unit 0 + VertexRange::new(24, 42), // unit 1 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 2, by fusing 0 and 1 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + let partition_info = partition_config.info(); + + dual_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 5, + vec![], + GrowingStrategy::ModeBased, + initializer, + partition_info, + &model_graph, + ); + } + + /// test a simple case, split into 4, a defect vertex in boundary-unit, clusters grow into other units + #[test] + fn dual_module_parallel_basic_test_5() { + // cargo test dual_module_parallel_basic_test_5 -- --nocapture + let visualize_filename = "dual_module_parallel_basic_test_5.json".to_string(); + let weight = 1; // do not change, the data is hard-coded + // let pxy = 0.0602828812732227; + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![16, 19, 28]; + + // create model graph + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 6), // unit 0 + VertexRange::new(12, 18), // unit 1 + VertexRange::new(24, 30), // unit 2 + VertexRange::new(36, 42), // unit 3 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 4, by fusing 0 and 1 + (1, 2), // unit 5, + (2, 3), // unit 6 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + let c = partition_config.dag_partition_units.add_node(()); + let d = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + partition_config.dag_partition_units.add_edge(b, c, false); + partition_config.dag_partition_units.add_edge(c, d, false); + + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + let partition_info = partition_config.info(); + + dual_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 4, + vec![], + GrowingStrategy::ModeBased, + initializer, + partition_info, + &model_graph, + ); + } + + + /// test for time partition + #[allow(clippy::unnecessary_cast)] + pub fn graph_time_partition(initializer: &SolverInitializer, positions: &Vec, defect_vertices: &Vec, split_num: usize) -> PartitionConfig { + assert!(positions.len() > 0, "positive number of positions"); + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + let mut last_t = positions[0].t; + let mut t_list: Vec = vec![]; + t_list.push(last_t); + for position in positions { + assert!(position.t >= last_t, "t not monotonically increasing, vertex reordering must be performed before calling this"); + if position.t != last_t { + t_list.push(position.t); + } + last_t = position.t; + } + + // pick the t value in the middle to split it + println!("t_list first: {:?}, t_list last: {:?}", t_list[0], t_list.last().unwrap()); + let mut t_split_vec: Vec = vec![0.0; split_num - 1]; + for i in 0..(split_num - 1) { + let index: usize = t_list.len()/split_num * (i + 1); + t_split_vec[i] = t_list[index]; + } + println!("t_split_vec: {:?}", t_split_vec); + + // find the vertices indices + let mut split_start_index_vec = vec![MAX; split_num - 1]; + let mut split_end_index_vec = vec![MAX; split_num - 1]; + let mut start_index = 0; + let mut end_index = 0; + for (vertex_index, position) in positions.iter().enumerate() { + if start_index < split_num - 1 { + if split_start_index_vec[start_index] == MAX && position.t == t_split_vec[start_index] { + split_start_index_vec[start_index] = vertex_index; + if start_index != 0 { + end_index += 1; + } + start_index += 1; + } + } + + if end_index < split_num - 1 { + if position.t == t_split_vec[end_index] { + split_end_index_vec[end_index] = vertex_index + 1; + // end_index += 1; + } + } + } + + println!("split_start_index_vec: {:?}", split_start_index_vec); + println!("split_end_index_vec: {:?}", split_end_index_vec); + assert!(split_start_index_vec.iter().all(|&x| x != MAX), "Some elements in split_start_index_vec are equal to MAX"); + + // partitions are found + let mut graph_nodes = vec![]; + let mut partitions_vec = vec![]; + for i in 0..split_num { + if i == 0 { + partitions_vec.push(VertexRange::new(0, split_start_index_vec[0])); + } else if i == split_num - 1 { + partitions_vec.push(VertexRange::new(split_end_index_vec[i - 1], positions.len())); + } else { + partitions_vec.push(VertexRange::new(split_end_index_vec[i - 1], split_start_index_vec[i])); + } + + if i < split_num - 1 { + partition_config.fusions.push((i, i+1)); + } + + let a = partition_config.dag_partition_units.add_node(()); + graph_nodes.push(a.clone()); + } + partition_config.partitions = partitions_vec; + + for i in 0..split_num { + if i < split_num - 1 { + partition_config.dag_partition_units.add_edge(graph_nodes[i], graph_nodes[i+1], false); + } + } + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + partition_config + } + + pub fn dual_module_parallel_evaluation_qec_playground_helper( + code: impl ExampleCode, + visualize_filename: String, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, + split_num: usize, + ) -> ( + DualModuleInterfacePtr, + PrimalModuleSerial, + impl DualModuleImpl + MWPSVisualizer, + ) { + println!("{defect_vertices:?}"); + let visualizer = { + let visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename.clone()); + visualizer + }; + + // create dual module + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let partition_config = graph_time_partition(&initializer, &code.get_positions(), &defect_vertices, split_num); + let partition_info = partition_config.info(); + + + // create dual module + // let decoding_graph = DecodingHyperGraph::new_defects(model_graph.clone(), vec![3, 29, 30]); + let mut dual_module_parallel_config = DualModuleParallelConfig::default(); + dual_module_parallel_config.enable_parallel_execution = true; + let mut dual_module: DualModuleParallel>, FutureObstacleQueue> = + DualModuleParallel::new_config(&initializer, &partition_info, dual_module_parallel_config); + dual_module.static_fuse_all(); + + dual_module_parallel_basic_standard_syndrome_optional_viz( + code, + defect_vertices, + final_dual, + plugins, + growing_strategy, + dual_module, + model_graph, + Some(visualizer), + ) + } + + #[test] + fn dual_module_parallel_circuit_level_noise_qec_playground_1() { + // cargo test dual_module_parallel_circuit_level_noise_qec_playground_1 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode, + "nm": 18, + }); + + let code = QECPlaygroundCode::new(3, 0.1, config); + let defect_vertices = vec![3, 10, 18, 19, 31]; + + let visualize_filename = "dual_module_parallel_circuit_level_noise_qec_playground_1.json".to_string(); + dual_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices, + 1661019, + vec![], + GrowingStrategy::ModeBased, + 2, + ); + } + + /// test solver on circuit level noise with random errors, split into 2 + #[test] + fn dual_module_parallel_circuit_level_noise_qec_playground_2() { + // cargo test dual_module_parallel_circuit_level_noise_qec_playground_2 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode + }); + + let mut code = QECPlaygroundCode::new(7, 0.005, config); + let defect_vertices = code.generate_random_errors(132).0.defect_vertices; + + let visualize_filename = "dual_module_parallel_circuit_level_noise_qec_playground_2.json".to_string(); + dual_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices.clone(), + 2424788, + vec![], + GrowingStrategy::ModeBased, + 2, + ); + } + + /// test solver on circuit level noise with random errors, split into 4 + #[test] + fn dual_module_parallel_circuit_level_noise_qec_playground_3() { + // cargo test dual_module_parallel_circuit_level_noise_qec_playground_3 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode, + "nm": 18, + }); + + let mut code = QECPlaygroundCode::new(7, 0.005, config); + let defect_vertices = code.generate_random_errors(132).0.defect_vertices; + + let visualize_filename = "dual_module_parallel_circuit_level_noise_qec_playground_3.json".to_string(); + dual_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices.clone(), + 2424788, + vec![], + GrowingStrategy::ModeBased, + 8, + ); + } +} \ No newline at end of file diff --git a/src/dual_module_pq.rs b/src/dual_module_pq.rs index 8bb64a19..40aff106 100644 --- a/src/dual_module_pq.rs +++ b/src/dual_module_pq.rs @@ -5,11 +5,16 @@ //! Only debug tests are failing, which aligns with the dual_module_serial behavior //! -use crate::dual_module::*; +use color_print::cprintln; use crate::num_traits::{ToPrimitive, Zero}; +use crate::ordered_float::OrderedFloat; use crate::pointers::*; +use crate::primal_module::Affinity; +use crate::primal_module_serial::PrimalClusterPtr; use crate::util::*; use crate::visualize::*; +use crate::{add_shared_methods, dual_module::*}; +use std::sync::Arc; use std::{ cmp::{Ordering, Reverse}, @@ -17,12 +22,18 @@ use std::{ }; use derivative::Derivative; -use itertools::Itertools; +use hashbrown::hash_map::Entry; +use hashbrown::HashMap; +use heapz::RankPairingHeap; +use heapz::{DecreaseKey, Heap}; +use itertools::partition; use num_traits::{FromPrimitive, Signed}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock}; +use pheap::PairingHeap; +use priority_queue::PriorityQueue; /* Helper structs for events/obstacles during growing */ -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct FutureEvent { /// when the event will happen pub time: T, @@ -50,23 +61,53 @@ impl PartialOrd for FutureEvent { } } -#[derive(PartialEq, Eq, Debug)] +#[derive(Debug, Clone)] pub enum Obstacle { - Conflict { edge_index: EdgeIndex }, + Conflict { edge_ptr: EdgePtr }, ShrinkToZero { dual_node_ptr: DualNodePtr }, } +// implement hash for Obstacle +impl std::hash::Hash for Obstacle { + fn hash(&self, state: &mut H) { + match self { + Obstacle::Conflict { edge_ptr } => { + state.write_u8(0); + edge_ptr.hash(state); + } + Obstacle::ShrinkToZero { dual_node_ptr } => { + // (1, dual_node_ptr).hash(state); // todo: perhaps swap to using OrderedDualNodePtr + state.write_u8(1); + dual_node_ptr.hash(state); + } + } + } +} + +impl PartialEq for Obstacle { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Obstacle::Conflict { edge_ptr: e_1 }, Obstacle::Conflict { edge_ptr: e_2 }) => e_1.eq(e_2), + (Obstacle::ShrinkToZero { dual_node_ptr: d_1 }, Obstacle::ShrinkToZero { dual_node_ptr: d_2 }) => d_1.eq(d_2), + _ => false, + } + } +} + +impl Eq for Obstacle {} + impl Obstacle { /// return if the current obstacle is valid, only needed for pq that allows for invalid (duplicates that are different) events - fn is_valid + Default + std::fmt::Debug>( + fn is_valid + Default + std::fmt::Debug + Clone>( &self, - dual_module_pq: &DualModulePQ, + _dual_module_pq: &DualModulePQ, event_time: &Rational, // time associated with the obstacle ) -> bool { #[allow(clippy::unnecessary_cast)] match self { - Obstacle::Conflict { edge_index } => { - let edge = dual_module_pq.edges[*edge_index as usize].read_recursive(); + Obstacle::Conflict { edge_ptr } => { + let edge = edge_ptr.read_recursive(); + // cprintln!("Obstacle edge_ptr, edge_index: {:?}, edge.grow_rate: {:?}", edge.edge_index, edge.grow_rate); // not changing, cannot have conflict if !edge.grow_rate.is_positive() { return false; @@ -99,12 +140,134 @@ impl Obstacle { pub type FutureObstacle = FutureEvent; pub type MinBinaryHeap = BinaryHeap>; -pub type FutureObstacleQueue = MinBinaryHeap>; +pub type _FutureObstacleQueue = MinBinaryHeap>; + +pub type MinPriorityQueue = PriorityQueue>; +pub type FutureObstacleQueue = MinPriorityQueue; -pub trait FutureQueueMethods { - /// defines the behavior of `will_happen`, if the queue can contain invalid/duplicate events - const MAY_BE_INVALID: bool = true; +#[derive(Debug, Clone)] +pub struct PairingPQ { + pub container: HashMap, + pub heap: PairingHeap, +} +// implement default for PairingPQ +impl Default for PairingPQ { + fn default() -> Self { + Self { + container: HashMap::default(), + heap: PairingHeap::new(), + } + } +} + +impl + std::ops::SubAssign> + FutureQueueMethods for PairingPQ +{ + fn will_happen(&mut self, time: T, event: Obstacle) { + cprintln!(" will happen for PairingPQ"); + match self.container.entry(event.clone()) { + Entry::Vacant(entry) => { + entry.insert(time.clone()); + self.heap.insert(event, time); + } + Entry::Occupied(mut entry) => { + let old_time = entry.get().clone(); + *entry.get_mut() = time.clone(); + self.heap.decrease_prio(&event, time.clone() - old_time); + } + } + } + fn peek_event(&self) -> Option<(&T, &Obstacle)> { + self.heap.find_min().map(|future| (future.1, future.0)) + } + fn pop_event(&mut self) -> Option<(T, Obstacle)> { + let res = self.heap.delete_min().map(|future| (future.1, future.0)); + match &res { + Some((_, event)) => { + self.container.remove(event); + } + None => {} + } + res + } + fn clear(&mut self) { + self.container.clear(); + while !self.heap.is_empty() { + self.heap.delete_min(); + } + } + fn len(&self) -> usize { + self.heap.len() + } +} + +#[derive(Debug, Clone)] +pub struct RankPairingPQ { + pub container: HashMap, + pub heap: RankPairingHeap, +} + +impl Default for RankPairingPQ { + fn default() -> Self { + Self { + container: HashMap::default(), + heap: RankPairingHeap::multi_pass_min2(), + } + } +} + +impl FutureQueueMethods for RankPairingPQ { + fn will_happen(&mut self, time: T, event: Obstacle) { + cprintln!(" will happen for RankPairingPQ"); + if self.container.contains_key(&event) { + self.heap.update(&event, time.clone()); + self.container.insert(event, time); + } else { + self.heap.push(event.clone(), time.clone()); + self.container.insert(event, time); + } + } + fn peek_event(&self) -> Option<(&T, &Obstacle)> { + self.heap.top().map(|key| (self.container.get(key).unwrap(), key)) + } + fn pop_event(&mut self) -> Option<(T, Obstacle)> { + match self.heap.pop() { + None => None, + Some(key) => Some((self.container.remove(&key).unwrap(), key)), + } + } + fn clear(&mut self) { + self.container.clear(); + while !self.heap.is_empty() { + self.heap.pop(); + } + } + fn len(&self) -> usize { + self.heap.size() + } +} + +impl FutureQueueMethods for FutureObstacleQueue { + fn will_happen(&mut self, time: T, event: Obstacle) { + // cprintln!(" will happen for FutureObstacleQueue"); + self.push(event, Reverse(time)); + } + fn peek_event(&self) -> Option<(&T, &Obstacle)> { + self.peek().map(|future| (&future.1 .0, future.0)) + } + fn pop_event(&mut self) -> Option<(T, Obstacle)> { + self.pop().map(|future| (future.1 .0, future.0)) + } + fn clear(&mut self) { + self.clear(); + } + fn len(&self) -> usize { + self.len() + } +} + +pub trait FutureQueueMethods { /// Append an event at time T /// Note: this may have multiple distinct yet valid behaviors, e,g, weather there are duplicates allowed in the data strcture, default to allow fn will_happen(&mut self, time: T, event: E); @@ -117,10 +280,21 @@ pub trait FutureQueueMethods { /// clear for a queue fn clear(&mut self); + + /// length of the queue + fn len(&self) -> usize; + + /// is empty + fn is_empty(&self) -> bool { + self.len() == 0 + } } -impl FutureQueueMethods for MinBinaryHeap> { +impl FutureQueueMethods + for MinBinaryHeap> +{ fn will_happen(&mut self, time: T, event: E) { + cprintln!(" will happen for MinBinaryHeap"); self.push(Reverse(FutureEvent { time, event })) } fn peek_event(&self) -> Option<(&T, &E)> { @@ -132,6 +306,9 @@ impl FutureQueueMethods for MinBinaryHeap usize { + self.len() + } } /* Vertices and Edges */ @@ -143,8 +320,15 @@ pub struct Vertex { /// if a vertex is defect, then [`Vertex::propagated_dual_node`] always corresponds to that root pub is_defect: bool, /// all neighbor edges, in surface code this should be constant number of edges - #[derivative(Debug = "ignore")] + // #[derivative(Debug = "ignore")] pub edges: Vec, + /// whether this vertex is a mirrored vertex. Note that all the vertices on the boundary (including those in boundary-unit) are mirrored vertices + pub is_mirror: bool, + /// whether fusion is completed. This relies on the assumption that all units that have this vertex have been fused together + pub fusion_done: bool, + /// if this vertex is in boundary unit, find its corresponding mirror vertices in the other units. If this vertex is in non-boundary unit but a mirrored vertex, + /// find its other mirrored vertices in other units (both boundary and non-boundary units) + pub mirrored_vertices: Vec, } impl Vertex { @@ -171,26 +355,73 @@ impl std::fmt::Debug for VertexWeak { } } +impl Ord for VertexPtr { + fn cmp(&self, other: &Self) -> Ordering { + // compare the pointer address + let ptr1 = Arc::as_ptr(self.ptr()); + let ptr2 = Arc::as_ptr(other.ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for VertexPtr { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl VertexPtr { + pub fn get_edge_neighbors(&self) -> Vec { + let vertex = self.read_recursive(); + if vertex.fusion_done && vertex.is_mirror { + let mut edges: Vec = vec![]; + edges.extend(vertex.edges.clone()); + for mirrored_vertex in vertex.mirrored_vertices.iter() { + edges.extend(mirrored_vertex.upgrade_force().read_recursive().edges.clone()); + } + // println!("incident edges of vertex {:?} are: {:?}", vertex.vertex_index, edges); + edges + } else { + vertex.edges.clone() + } + } +} + #[derive(Derivative)] #[derivative(Debug)] pub struct Edge { /// global edge index - edge_index: EdgeIndex, + pub edge_index: EdgeIndex, /// total weight of this edge - weight: Rational, - #[derivative(Debug = "ignore")] - vertices: Vec, + pub weight: Rational, + // #[derivative(Debug = "ignore")] + pub vertices: Vec, /// the dual nodes that contributes to this edge - dual_nodes: Vec, + pub dual_nodes: Vec, /* fields that are different from that of dual_module_serial, or slightly differently interpreted */ /// the speed of growth, at the current time /// Note: changing this should cause the `growth_at_last_updated_time` and `last_updated_time` to update - grow_rate: Rational, + pub grow_rate: Rational, /// the last time this Edge is synced/updated with the global time - last_updated_time: Rational, + pub last_updated_time: Rational, /// growth value at the last updated time, also, growth_at_last_updated_time <= weight - growth_at_last_updated_time: Rational, + pub growth_at_last_updated_time: Rational, + + /// the partition unit this edge belongs to. For non-parallel implementation, this value is set to None. + pub unit_index: Option, + + /// whether this edge is connected to a boundary vertex, (this edges must belong to non-boundary unit) + pub connected_to_boundary_vertex: bool, + + // /// pointer to the global time of its corresponding unit, for parallelization purpose + // pub global_time: ArcRwLock, + + #[cfg(feature = "incr_lp")] + /// storing the weights of the clusters that are currently contributing to this edge + cluster_weights: hashbrown::HashMap, } impl Edge { @@ -198,6 +429,8 @@ impl Edge { self.growth_at_last_updated_time = Rational::zero(); self.last_updated_time = Rational::zero(); self.dual_nodes.clear(); + #[cfg(feature = "incr_lp")] + self.cluster_weights.clear(); } } @@ -207,11 +440,17 @@ pub type EdgeWeak = WeakRwLock; impl std::fmt::Debug for EdgePtr { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let edge = self.read_recursive(); - write!( - f, - "[edge: {}]: weight: {}, grow_rate: {}, growth_at_last_updated_time: {}, last_updated_time: {}\n\tdual_nodes: {:?}", - edge.edge_index, edge.weight, edge.grow_rate, edge.growth_at_last_updated_time, edge.last_updated_time, edge.dual_nodes - ) + write!(f, "[edge: {}, edge.grow_rate: {}]", edge.edge_index, edge.grow_rate) + // write!( + // f, + // "[edge: {}]: weight: {}, grow_rate: {}, growth_at_last_updated_time: {}, last_updated_time: {}\n\tdual_nodes: {:?}\n", + // edge.edge_index, + // edge.weight, + // edge.grow_rate, + // edge.growth_at_last_updated_time, + // edge.last_updated_time, + // edge.dual_nodes.iter().filter(|node| !node.weak_ptr.upgrade_force().read_recursive().grow_rate.is_zero()).collect::>() + // ) } } @@ -219,15 +458,74 @@ impl std::fmt::Debug for EdgeWeak { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let edge_ptr = self.upgrade_force(); let edge = edge_ptr.read_recursive(); - write!( - f, - "[edge: {}]: weight: {}, grow_rate: {}, growth_at_last_updated_time: {}, last_updated_time: {}\n\tdual_nodes: {:?}", - edge.edge_index, edge.weight, edge.grow_rate, edge.growth_at_last_updated_time, edge.last_updated_time, edge.dual_nodes - ) + write!(f, "[edge: {}", edge.edge_index) + // write!( + // f, + // "[edge: {}]: weight: {}, grow_rate: {}, growth_at_last_updated_time: {}, last_updated_time: {}\n\tdual_nodes: {:?}\n", + // edge.edge_index, edge.weight, edge.grow_rate, edge.growth_at_last_updated_time, edge.last_updated_time, edge.dual_nodes.iter().filter(|node| !node.weak_ptr.upgrade_force().read_recursive().grow_rate.is_zero()).collect::>() + // ) + } +} + +impl Ord for EdgePtr { + fn cmp(&self, other: &Self) -> Ordering { + // let edge_1 = self.read_recursive(); + // let edge_2 = other.read_recursive(); + // edge_1.edge_index.cmp(&edge_2.edge_index) + // compare the pointer address + let ptr1 = Arc::as_ptr(self.ptr()); + let ptr2 = Arc::as_ptr(other.ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for EdgePtr { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for EdgeWeak { + fn cmp(&self, other: &Self) -> Ordering { + // let edge_1 = self.upgrade_force().read_recursive(); + // let edge_2 = other.upgrade_force().read_recursive(); + // edge_1.edge_index.cmp(&edge_2.edge_index) + // self.upgrade_force().read_recursive().edge_index.cmp(&other.upgrade_force().read_recursive().edge_index) + // compare the pointer address + let ptr1 = Arc::as_ptr(self.upgrade_force().ptr()); + let ptr2 = Arc::as_ptr(other.upgrade_force().ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for EdgeWeak { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl EdgePtr { + pub fn get_vertex_neighbors(&self) -> Vec { + let edge = self.read_recursive(); + let mut incident_vertices: Vec = vec![]; + for vertex_weak in edge.vertices.iter() { + let vertex_ptr = vertex_weak.upgrade_force(); + let vertex = vertex_ptr.read_recursive(); + incident_vertices.push(vertex_weak.clone()); + if vertex.is_mirror && vertex.fusion_done { + incident_vertices.extend(vertex.mirrored_vertices.clone()); + } + } + return incident_vertices; } } /* the actual dual module */ +#[derive(Clone)] pub struct DualModulePQ where Queue: FutureQueueMethods + Default + std::fmt::Debug, @@ -242,24 +540,42 @@ where /// the global time of this dual module /// Note: Wrap-around edge case is not currently considered global_time: ArcRwLock, + + /// the current mode of the dual module + /// note: currently does not have too much functionality + mode: DualModuleMode, + /// the number of all vertices (including those partitioned into other serial module) + pub vertex_num: VertexNum, + /// the number of all edges (including those partitioned into other seiral module) + pub edge_num: usize, + /// all mirrored vertices of this unit, mainly for parallel implementation + pub all_mirrored_vertices: Vec, + + /// all defect vertices (including those mirrored vertices) in this unit + pub all_defect_vertices: Vec, } impl DualModulePQ where - Queue: FutureQueueMethods + Default + std::fmt::Debug, + Queue: FutureQueueMethods + Default + std::fmt::Debug + Clone, { /// helper function to bring an edge update to speed with current time if needed - fn update_edge_if_necessary(&self, edge: &mut RwLockWriteGuard) { + fn update_edge_if_necessary(&self, edge_ptr: &EdgePtr) { let global_time = self.global_time.read_recursive(); + // let global_time_ptr = &edge.global_time; + // let global_time = global_time_ptr.read_recursive(); + let mut edge = edge_ptr.write(); + // let global_time_ptr = edge.global_time.clone(); + // let global_time = global_time_ptr.read_recursive(); if edge.last_updated_time == global_time.clone() { // the edge is not behind return; } - debug_assert!( - global_time.clone() >= edge.last_updated_time, - "global time is behind, maybe a wrap-around has happened" - ); + // debug_assert!( + // global_time.clone() >= edge.last_updated_time, + // "global time is behind, maybe a wrap-around has happened" + // ); let time_diff = global_time.clone() - &edge.last_updated_time; let newly_grown_amount = &time_diff * &edge.grow_rate; @@ -269,20 +585,24 @@ where edge.growth_at_last_updated_time <= edge.weight, "growth larger than weight: check if events are 1) inserted and 2) handled correctly" ); + drop(edge); } /// helper function to bring a dual node update to speed with current time if needed - fn update_dual_node_if_necessary(&mut self, node: &mut RwLockWriteGuard) { + fn update_dual_node_if_necessary(&mut self, node_ptr: &DualNodePtr) { let global_time = self.global_time.read_recursive(); + let mut node = node_ptr.write(); + // let global_time_ptr = node.global_time.clone().unwrap(); + // let global_time = global_time_ptr.read_recursive(); if node.last_updated_time == global_time.clone() { // the edge is not behind return; } - debug_assert!( - global_time.clone() >= node.last_updated_time, - "global time is behind, maybe a wrap-around has happened" - ); + // debug_assert!( + // global_time.clone() >= node.last_updated_time, + // "global time is behind, maybe a wrap-around has happened" + // ); let dual_variable = node.get_dual_variable(); node.set_dual_variable(dual_variable); @@ -297,14 +617,14 @@ where #[allow(dead_code)] fn debug_update_all(&mut self, dual_node_ptrs: &[DualNodePtr]) { // updating all edges - for edge in self.edges.iter() { - let mut edge = edge.write(); - self.update_edge_if_necessary(&mut edge); + for edge_ptr in self.edges.iter() { + // let mut edge = edge.write(); + self.update_edge_if_necessary(&edge_ptr); } // updating all dual nodes for dual_node_ptr in dual_node_ptrs.iter() { - let mut dual_node = dual_node_ptr.write(); - self.update_dual_node_if_necessary(&mut dual_node); + // let mut dual_node = dual_node_ptr.write(); + self.update_dual_node_if_necessary(&dual_node_ptr); } } } @@ -314,7 +634,7 @@ pub type DualModulePQWeak = WeakRwLock>; impl DualModuleImpl for DualModulePQ where - Queue: FutureQueueMethods + Default + std::fmt::Debug, + Queue: FutureQueueMethods + Default + std::fmt::Debug + Clone, { /// initialize the dual module, which is supposed to be reused for multiple decoding tasks with the same structure #[allow(clippy::unnecessary_cast)] @@ -327,9 +647,14 @@ where vertex_index, is_defect: false, edges: vec![], + is_mirror: false, // set to false for non-parallel implementation + fusion_done: false, // set to false for non-parallel implementation + mirrored_vertices: vec![], // set to empty for non-parallel implementation }) }) .collect(); + // set global time + let global_time = ArcRwLock::new_value(Rational::zero()); // set edges let mut edges = Vec::::new(); for hyperedge in initializer.weighted_edges.iter() { @@ -345,6 +670,10 @@ where last_updated_time: Rational::zero(), growth_at_last_updated_time: Rational::zero(), grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), }); for &vertex_index in hyperedge.vertices.iter() { vertices[vertex_index as usize].write().edges.push(edge_ptr.downgrade()); @@ -355,7 +684,12 @@ where vertices, edges, obstacle_queue: Queue::default(), - global_time: ArcRwLock::new_value(Rational::zero()), + global_time: global_time.clone(), + mode: DualModuleMode::default(), + vertex_num: initializer.vertex_num, + edge_num: initializer.weighted_edges.len(), + all_mirrored_vertices: vec![], + all_defect_vertices: vec![], // used only for parallel implementation } } @@ -366,6 +700,7 @@ where self.obstacle_queue.clear(); self.global_time.write().set_zero(); + self.mode_mut().reset(); } #[allow(clippy::unnecessary_cast)] @@ -377,24 +712,27 @@ where dual_node.invalid_subgraph.vertices.len() == 1, "defect node (without edges) should only work on a single vertex, for simplicity" ); - let vertex_index = dual_node.invalid_subgraph.vertices.iter().next().unwrap(); - let mut vertex = self.vertices[*vertex_index as usize].write(); - assert!(!vertex.is_defect, "defect should not be added twice"); - vertex.is_defect = true; + // let vertex_ptr = dual_node.invalid_subgraph.vertices.iter().next().unwrap(); + // let mut vertex = vertex_ptr.write(); + // let mut vertex = self.vertices[*vertex_index as usize].write(); + // assert!(!vertex.is_defect, "defect should not be added twice"); + // vertex.is_defect = true; drop(dual_node); - drop(vertex); + // drop(vertex); self.add_dual_node(dual_node_ptr); } #[allow(clippy::unnecessary_cast)] /// Mostly invoked by `add_defect_node`, triggering a pq update, and edges updates fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { - dual_node_ptr.write().init_time(self.global_time.clone()); + dual_node_ptr.write().init_time(self.global_time.clone()); // should be okay because a dual node will only be added in its own unit let global_time = self.global_time.read_recursive(); let dual_node_weak = dual_node_ptr.downgrade(); let dual_node = dual_node_ptr.read_recursive(); + // cprintln!("`fn add_dual_node()`, dual_node_ptr: {:?}", dual_node_ptr); if dual_node.grow_rate.is_negative() { + // cprintln!("dual_node.grow_rate is negative"); self.obstacle_queue.will_happen( // it is okay to use global_time now, as this must be up-to-speed dual_node.get_dual_variable().clone() / (-dual_node.grow_rate.clone()) + global_time.clone(), @@ -403,117 +741,205 @@ where }, ); } - - for &edge_index in dual_node.invalid_subgraph.hair.iter() { - let mut edge = self.edges[edge_index as usize].write(); + // drop(global_time); + let dual_node_grow_rate = dual_node.grow_rate; + // println!("dual_node_grow_rate: {:?}", dual_node_grow_rate); + for edge_ptr in dual_node.invalid_subgraph.hair.iter() { + // let mut edge = self.edges[edge_index as usize].write(); + // let mut edge = edge_ptr.write(); // should make sure the edge is up-to-speed before making its variables change - self.update_edge_if_necessary(&mut edge); + self.update_edge_if_necessary(&edge_ptr); + + let mut edge = edge_ptr.write(); + // println!("edge.grow_rate before: {:?}", edge.grow_rate); + edge.grow_rate += dual_node_grow_rate; + // println!("edge.grow_rate after: {:?}", edge.grow_rate); + edge.dual_nodes + .push(OrderedDualNodeWeak::new(dual_node.index, dual_node_weak.clone())); + drop(edge); + let edge = edge_ptr.read_recursive(); - edge.grow_rate += &dual_node.grow_rate; - edge.dual_nodes.push(dual_node_weak.clone()); + // let global_time_ptr = edge.global_time.clone(); + // let global_time = global_time_ptr.read_recursive(); + // cprintln!("`fn add_dual_node()`, edge_index: {:?}, edge.grow_rate {:?}", edge.edge_index, edge.grow_rate); if edge.grow_rate.is_positive() { + // cprintln!("edge.grow_Rate is positive"); self.obstacle_queue.will_happen( // it is okay to use global_time now, as this must be up-to-speed (edge.weight.clone() - edge.growth_at_last_updated_time.clone()) / edge.grow_rate.clone() + global_time.clone(), - Obstacle::Conflict { edge_index }, + Obstacle::Conflict { edge_ptr: edge_ptr.clone() }, ); + + // println!("self.obstacle_Queue: {:?}", self.obstacle_queue); + + // if let Some((time, event)) = self.obstacle_queue.peek_event() { + // match event { + // Obstacle::Conflict { edge_ptr } => println!("peek event edge_ptr: {:?}, edge.grow_rate: {:?}", edge_ptr.read_recursive().edge_index, edge_ptr.read_recursive().grow_rate), + // Obstacle::ShrinkToZero { dual_node_ptr } => (), + // } + // } } } + // println!("self.obstacle_Queue: {:?}", self.obstacle_queue); + } + + #[allow(clippy::unnecessary_cast)] + fn add_dual_node_tune(&mut self, dual_node_ptr: &DualNodePtr) { + let dual_node_weak = dual_node_ptr.downgrade(); + let dual_node = dual_node_ptr.read_recursive(); + + for edge_ptr in dual_node.invalid_subgraph.hair.iter() { + // let mut edge = self.edges[edge_index as usize].write(); + let mut edge = edge_ptr.write(); + + edge.grow_rate += &dual_node.grow_rate; + edge.dual_nodes + .push(OrderedDualNodeWeak::new(dual_node.index, dual_node_weak.clone())); + } } #[allow(clippy::unnecessary_cast)] fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { + // let mut dual_node = dual_node_ptr.write(); + // println!("set_grow_rate invoked on {:?}, to be {:?}", dual_node.index, grow_rate); + self.update_dual_node_if_necessary(&dual_node_ptr); let mut dual_node = dual_node_ptr.write(); - self.update_dual_node_if_necessary(&mut dual_node); + // let global_time_ptr = dual_node.global_time.clone().unwrap(); + // let global_time = global_time_ptr.read_recursive(); let global_time = self.global_time.read_recursive(); let grow_rate_diff = &grow_rate - &dual_node.grow_rate; - dual_node.grow_rate = grow_rate; + dual_node.grow_rate = grow_rate.clone(); if dual_node.grow_rate.is_negative() { self.obstacle_queue.will_happen( // it is okay to use global_time now, as this must be up-to-speed - dual_node.get_dual_variable().clone() / (-dual_node.grow_rate.clone()) + global_time.clone(), + dual_node.get_dual_variable().clone() / (-grow_rate) + global_time.clone(), Obstacle::ShrinkToZero { dual_node_ptr: dual_node_ptr.clone(), }, ); } drop(dual_node); + // drop(global_time); let dual_node = dual_node_ptr.read_recursive(); - for &edge_index in dual_node.invalid_subgraph.hair.iter() { - let mut edge = self.edges[edge_index as usize].write(); - self.update_edge_if_necessary(&mut edge); + for edge_ptr in dual_node.invalid_subgraph.hair.iter() { + // let mut edge = self.edges[edge_index as usize].write(); + // let mut edge = edge_ptr.write(); + self.update_edge_if_necessary(&edge_ptr); + + let mut edge = edge_ptr.write(); edge.grow_rate += &grow_rate_diff; + // let global_time_ptr = edge.global_time.clone(); + // let global_time = global_time_ptr.read_recursive(); if edge.grow_rate.is_positive() { self.obstacle_queue.will_happen( // it is okay to use global_time now, as this must be up-to-speed (edge.weight.clone() - edge.growth_at_last_updated_time.clone()) / edge.grow_rate.clone() + global_time.clone(), - Obstacle::Conflict { edge_index }, + Obstacle::Conflict { edge_ptr: edge_ptr.clone() }, ); } } } + #[allow(clippy::unnecessary_cast)] + fn set_grow_rate_tune(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { + let mut dual_node = dual_node_ptr.write(); + println!("set_grow_rate_tune invoked on {:?}, to be {:?}", dual_node.index, grow_rate); + + let grow_rate_diff = &grow_rate - &dual_node.grow_rate; + dual_node.grow_rate = grow_rate; + + for edge_ptr in dual_node.invalid_subgraph.hair.iter() { + // let mut edge = self.edges[edge_index as usize].write(); + let mut edge = edge_ptr.write(); + edge.grow_rate += &grow_rate_diff; + } + } + fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { + // self.debug_print(); + + // cprintln!("self.obstacle_queue: {:?}", self.obstacle_queue); let global_time = self.global_time.read_recursive(); - // finding a valid event to process, only when invalids exist - if Queue::MAY_BE_INVALID { - // getting rid of all the invalid events - while let Some((time, event)) = self.obstacle_queue.peek_event() { - // found a valid event - if event.is_valid(self, time) { - // valid grow - if time != &global_time.clone() { - return GroupMaxUpdateLength::ValidGrow(time - global_time.clone()); - } - // goto else - break; + // getting rid of all the invalid events + while let Some((time, event)) = self.obstacle_queue.peek_event() { + // cprintln!(" event found: {:?}", event); + // found a valid event + if event.is_valid(self, time) { + // cprintln!("valid event: {:?}", event); + // valid grow + if time != &global_time.clone() { + // cprintln!("group max update length within fn: {:?}", GroupMaxUpdateLength::ValidGrow(time - global_time.clone())); + return GroupMaxUpdateLength::ValidGrow(time - global_time.clone()); } - self.obstacle_queue.pop_event(); + // goto else + break; } + self.obstacle_queue.pop_event(); } // else , it is a valid conflict to resolve if let Some((_, event)) = self.obstacle_queue.pop_event() { // this is used, since queues are not sets, and can contain duplicate events - // Note: chekc that this is the assumption, though not much more overhead anyway - let mut group_max_update_length_set = BTreeSet::default(); - group_max_update_length_set.insert(match event { - Obstacle::Conflict { edge_index } => MaxUpdateLength::Conflicting(edge_index), - Obstacle::ShrinkToZero { dual_node_ptr } => MaxUpdateLength::ShrinkProhibited(dual_node_ptr), + // Note: check that this is the assumption, though not much more overhead anyway + // let mut group_max_update_length_set = BTreeSet::default(); + + // Note: With de-dup queue implementation, we could use vectors here + let mut group_max_update_length = GroupMaxUpdateLength::new(); + group_max_update_length.add(match event { + Obstacle::Conflict { edge_ptr } => MaxUpdateLength::Conflicting(edge_ptr), + Obstacle::ShrinkToZero { dual_node_ptr } => { + let index = dual_node_ptr.read_recursive().index; + MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new(index, dual_node_ptr)) + } }); // append all conflicts that happen at the same time as now while let Some((time, _)) = self.obstacle_queue.peek_event() { if &global_time.clone() == time { let (time, event) = self.obstacle_queue.pop_event().unwrap(); + // cprintln!(" event found: {:?}", event); if !event.is_valid(self, &time) { continue; } - // add - group_max_update_length_set.insert(match event { - Obstacle::Conflict { edge_index } => MaxUpdateLength::Conflicting(edge_index), - Obstacle::ShrinkToZero { dual_node_ptr } => MaxUpdateLength::ShrinkProhibited(dual_node_ptr), + group_max_update_length.add(match event { + Obstacle::Conflict { edge_ptr } => MaxUpdateLength::Conflicting(edge_ptr), + Obstacle::ShrinkToZero { dual_node_ptr } => { + let index = dual_node_ptr.read_recursive().index; + MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new(index, dual_node_ptr)) + } }); } else { break; } } - return GroupMaxUpdateLength::Conflicts(group_max_update_length_set.into_iter().collect_vec()); + // println!("len: {:?}", group_max_update_length.len()); + // if let GroupMaxUpdateLength::Conflicts(conflicts) = &group_max_update_length { + // for conflict in conflicts.iter() { + // if let MaxUpdateLength::Conflicting(edge_ptr) = conflict { + // println!("edge_ptr.nodes: {:?}", edge_ptr.read_recursive().dua) + // } else { + // println!("not a conlifcting edge: {:?}", conflict); + // } + // } + // } + // cprintln!("group max update length within fn: {:?}", group_max_update_length); + return group_max_update_length; } // nothing useful could be done, return unbounded GroupMaxUpdateLength::new() } + /// for pq implementation, simply updating the global time is enough, could be part of the `compute_maximum_update_length` function fn grow(&mut self, length: Rational) { debug_assert!( @@ -521,55 +947,391 @@ where "growth should be positive; if desired, please set grow rate to negative for shrinking" ); let mut global_time_write = self.global_time.write(); + // println!("global time before grow: {:?}", global_time_write); *global_time_write = global_time_write.clone() + length; + // println!("global time after grow: {:?}", global_time_write); } /* identical with the dual_module_serial */ #[allow(clippy::unnecessary_cast)] - fn get_edge_nodes(&self, edge_index: EdgeIndex) -> Vec { - self.edges[edge_index as usize] - .read_recursive() - .dual_nodes - .iter() - .map(|x| x.upgrade_force()) - .collect() + fn get_edge_nodes(&self, edge_ptr: EdgePtr) -> Vec { + edge_ptr.read_recursive() + .dual_nodes + .iter() + .map(|x| x.upgrade_force().ptr) + .collect() } #[allow(clippy::unnecessary_cast)] /// how much away from saturated is the edge - fn get_edge_slack(&self, edge_index: EdgeIndex) -> Rational { - let edge = self.edges[edge_index as usize].read_recursive(); + fn get_edge_slack(&self, edge_ptr: EdgePtr) -> Rational { + // let edge = self.edges[edge_index as usize].read_recursive(); + let edge = edge_ptr.read_recursive(); edge.weight.clone() - (self.global_time.read_recursive().clone() - edge.last_updated_time.clone()) * edge.grow_rate.clone() - edge.growth_at_last_updated_time.clone() } /// is the edge saturated - fn is_edge_tight(&self, edge_index: EdgeIndex) -> bool { - self.get_edge_slack(edge_index).is_zero() + fn is_edge_tight(&self, edge_ptr: EdgePtr) -> bool { + self.get_edge_slack(edge_ptr).is_zero() + } + + /* tuning mode related new methods */ + + // tuning mode shared methods + add_shared_methods!(); + + /// is the edge tight, but for tuning mode + fn is_edge_tight_tune(&self, edge_ptr: EdgePtr) -> bool { + // let edge = self.edges[edge_index].read_recursive(); + let edge = edge_ptr.read_recursive(); + edge.weight == edge.growth_at_last_updated_time + } + + fn get_edge_slack_tune(&self, edge_ptr: EdgePtr) -> Rational { + // let edge = self.edges[edge_index].read_recursive(); + let edge = edge_ptr.read_recursive(); + edge.weight.clone() - edge.growth_at_last_updated_time.clone() + } + + /// change mode, clear queue as queue is no longer needed. also sync to get rid off the need for global time + fn advance_mode(&mut self) { + self.mode_mut().advance(); + self.obstacle_queue.clear(); + self.sync(); + } + + /// grow specific amount for a specific edge + fn grow_edge(&self, edge_ptr: EdgePtr, amount: &Rational) { + // let mut edge = self.edges[edge_index].write(); + let mut edge = edge_ptr.write(); + edge.growth_at_last_updated_time += amount; + } + + /// sync all states and global time so the concept of time and pq can retire + fn sync(&mut self) { + // note: we can either set the global time to be zero, or just not change it anymore + + let mut nodes_touched = BTreeSet::new(); + + for edges in self.edges.iter_mut() { + let mut edge = edges.write(); + + // update if necessary + let global_time = self.global_time.read_recursive(); + // let global_time_ptr = edge.global_time.clone(); + // let global_time = global_time_ptr.read_recursive(); + if edge.last_updated_time != global_time.clone() { + // the edge is behind + debug_assert!( + global_time.clone() >= edge.last_updated_time, + "global time is behind, maybe a wrap-around has happened" + ); + + let time_diff = global_time.clone() - &edge.last_updated_time; + let newly_grown_amount = &time_diff * &edge.grow_rate; + edge.growth_at_last_updated_time += newly_grown_amount; + edge.last_updated_time = global_time.clone(); + debug_assert!( + edge.growth_at_last_updated_time <= edge.weight, + "growth larger than weight: check if events are 1) inserted and 2) handled correctly" + ); + } + + for dual_node_ptr in edge.dual_nodes.iter() { + if nodes_touched.contains(&dual_node_ptr.index) { + continue; + } + let _dual_node_ptr = dual_node_ptr.upgrade_force(); + let node = _dual_node_ptr.ptr.read_recursive(); + nodes_touched.insert(node.index); + + // update if necessary + let global_time = self.global_time.read_recursive(); + // let global_time_ptr = node.global_time.clone().unwrap(); + // let global_time = global_time_ptr.read_recursive(); + if node.last_updated_time != global_time.clone() { + // the node is behind + debug_assert!( + global_time.clone() >= node.last_updated_time, + "global time is behind, maybe a wrap-around has happened" + ); + + drop(node); + let mut node: RwLockWriteGuard = _dual_node_ptr.ptr.write(); + + let dual_variable = node.get_dual_variable(); + node.set_dual_variable(dual_variable); + node.last_updated_time = global_time.clone(); + debug_assert!( + !node.get_dual_variable().is_negative(), + "negative dual variable: check if events are 1) inserted and 2) handled correctly" + ); + } + } + } + } + + /// misc debug print statement + fn debug_print(&self) { + // println!("\n[current states]"); + // println!("global time: {:?}", self.global_time.read_recursive()); + // println!( + // "edges: {:?}", + // self.edges + // .iter() + // .filter(|e| !e.read_recursive().grow_rate.is_zero()) + // .collect::>() + // ); + if self.obstacle_queue.len() > 0 { + println!("pq: {:?}", self.obstacle_queue.len()); + } + + // println!("\n[current states]"); + // println!("global time: {:?}", self.global_time.read_recursive()); + // let mut all_nodes = BTreeSet::default(); + // for edge in self.edges.iter() { + // let edge = edge.read_recursive(); + // for node in edge.dual_nodes.iter() { + // let node = node.upgrade_force(); + // if node.read_recursive().grow_rate.is_zero() { + // continue; + // } + // all_nodes.insert(node); + // } + // } + // println!("nodes: {:?}", all_nodes); + } + + /* affinity */ + fn calculate_cluster_affinity(&mut self, cluster: PrimalClusterPtr) -> Option { + let mut start = 0.0; + let cluster = cluster.read_recursive(); + start -= cluster.edges.len() as f64 + cluster.nodes.len() as f64; + + let mut weight = Rational::zero(); + for edge_ptr in cluster.edges.iter() { + let edge = edge_ptr.read_recursive(); + weight += &edge.weight - &edge.growth_at_last_updated_time; + } + for node in cluster.nodes.iter() { + let dual_node = node.read_recursive().dual_node_ptr.clone(); + weight -= &dual_node.read_recursive().dual_variable_at_last_updated_time; + } + if weight.is_zero() { + return None; + } + start += weight.to_f64().unwrap(); + Some(OrderedFloat::from(start)) + } + + fn get_edge_free_weight( + &self, + edge_ptr: EdgePtr, + participating_dual_variables: &hashbrown::HashSet, + ) -> Rational { + let edge = edge_ptr.read_recursive(); + let mut free_weight = edge.weight.clone(); + for dual_node in edge.dual_nodes.iter() { + if participating_dual_variables.contains(&dual_node.index) { + continue; + } + let dual_node = dual_node.upgrade_force(); + free_weight -= &dual_node.ptr.read_recursive().dual_variable_at_last_updated_time; + } + + free_weight + } + + fn get_vertex_ptr(&self, vertex_index: VertexIndex) -> VertexPtr { + self.vertices[vertex_index].clone() + } + + fn get_edge_ptr(&self, edge_index: EdgeIndex) -> EdgePtr { + self.edges[edge_index].clone() + } + + #[cfg(feature = "incr_lp")] + fn get_edge_free_weight_cluster(&self, edge_index: EdgeIndex, cluster_index: NodeIndex) -> Rational { + let edge = self.edges[edge_index as usize].read_recursive(); + edge.weight.clone() + - edge + .cluster_weights + .iter() + .filter_map(|(c_idx, y)| if cluster_index.ne(c_idx) { Some(y) } else { None }) + .sum::() + } + + #[cfg(feature = "incr_lp")] + fn update_edge_cluster_weights_union( + &self, + dual_node_ptr: &DualNodePtr, + drained_cluster_index: NodeIndex, + absorbing_cluster_index: NodeIndex, + ) { + let dual_node = dual_node_ptr.read_recursive(); + for edge_index in dual_node.invalid_subgraph.hair.iter() { + let mut edge = self.edges[*edge_index as usize].write(); + if let Some(removed) = edge.cluster_weights.remove(&drained_cluster_index) { + *edge + .cluster_weights + .entry(absorbing_cluster_index) + .or_insert(Rational::zero()) += removed; + } + } + } + + #[cfg(feature = "incr_lp")] + fn update_edge_cluster_weights(&self, edge_index: usize, cluster_index: usize, weight: Rational) { + match self.edges[edge_index].write().cluster_weights.entry(cluster_index) { + hashbrown::hash_map::Entry::Occupied(mut o) => { + *o.get_mut() += weight; + } + hashbrown::hash_map::Entry::Vacant(v) => { + v.insert(weight); + } + } + } +} + + +impl DualModulePQ +where Queue: FutureQueueMethods + Default + std::fmt::Debug + Clone, +{ + /// to be called in dual_module_parallel.rs + pub fn new_partitioned(partitioned_initializer: &PartitionedSolverInitializer) -> Self { + // println!("///////////////////////////////////////////////////////////////////////////////"); + // println!("for new_partitioned: {partitioned_initializer:?}"); + // println!("///////////////////////////////////////////////////////////////////////////////"); + /// debug printing + + let mut all_defect_vertices = vec![]; + // create vertices + let mut vertices: Vec = partitioned_initializer.owning_range.iter().map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: if partitioned_initializer.defect_vertices.contains(&vertex_index) {all_defect_vertices.push(vertex_index); true} else {false}, + edges: Vec::new(), + is_mirror: if partitioned_initializer.is_boundary_unit {true} else {false}, // all the vertices on the boundary are mirror vertices + fusion_done: if partitioned_initializer.is_boundary_unit {false} else {true}, // initialized to false + mirrored_vertices: vec![], // initialized to empty, to be filled in `new_config()` in parallel implementation + }) + }).collect(); + + // now we want to add the boundary vertices into the vertices for this partition (if this partition is non-boundary unit) + let mut total_boundary_vertices = HashMap::::new(); // all boundary vertices mapping to the specific local partition index + let mut all_mirrored_vertices = vec![]; + if !partitioned_initializer.is_boundary_unit { + // only the index_range matters here, the units of the adjacent partitions do not matter here + for adjacent_index_range in partitioned_initializer.boundary_vertices.iter(){ + for vertex_index in adjacent_index_range.range[0]..adjacent_index_range.range[1] { + if !partitioned_initializer.owning_range.contains(vertex_index) { + total_boundary_vertices.insert(vertex_index, vertices.len() as VertexIndex); + let vertex_ptr0 = VertexPtr::new_value(Vertex { + vertex_index: vertex_index, + is_defect: if partitioned_initializer.defect_vertices.contains(&vertex_index) {all_defect_vertices.push(vertex_index); true} else {false}, + edges: Vec::new(), + is_mirror: true, + fusion_done: false, // initialized to false + mirrored_vertices: vec![], // set to empty, to be filled in `new_config()` in parallel implementation + }); + vertices.push(vertex_ptr0.clone()); + all_mirrored_vertices.push(vertex_ptr0); + } + } + } + } + + // initialize global time + let global_time = ArcRwLock::new_value(Rational::zero()); + + // set edges + let mut edges = Vec::::new(); + for (hyper_edge, edge_index) in partitioned_initializer.weighted_edges.iter() { + // above, we have created the vertices that follow its own numbering rule for the index + // so we need to calculate the vertex indices of the hyper_edge to make it match the local index + // then, we can create EdgePtr + let mut local_hyper_edge_vertices = Vec::>::new(); + for vertex_index in hyper_edge.vertices.iter() { + // println!("vertex_index: {:?}", vertex_index); + let local_index = if partitioned_initializer.owning_range.contains(*vertex_index) { + vertex_index - partitioned_initializer.owning_range.start() + } else { + total_boundary_vertices[vertex_index] + }; + local_hyper_edge_vertices.push(vertices[local_index].downgrade()); + } + // now we create the edgeptr + let edge_ptr = EdgePtr::new_value(Edge { + edge_index: *edge_index, + weight: Rational::from_usize(hyper_edge.weight).unwrap(), + dual_nodes: vec![], + vertices: local_hyper_edge_vertices, + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: Some(partitioned_initializer.unit_index), + connected_to_boundary_vertex: hyper_edge.connected_to_boundary_vertex, + }); + + // we also need to update the vertices of this hyper_edge + for vertex_index in hyper_edge.vertices.iter() { + let local_index = if partitioned_initializer.owning_range.contains(*vertex_index) { + vertex_index - partitioned_initializer.owning_range.start() + } else { + total_boundary_vertices[vertex_index] + }; + vertices[local_index].write().edges.push(edge_ptr.downgrade()); + } + // for &vertex_index in hyper_edge.vertices.iter() { + // vertices[vertex_index as usize].write().edges.push(edge_ptr.downgrade()); + // } + edges.push(edge_ptr); + + } + + + + Self { + vertices, + edges, + obstacle_queue: Queue::default(), + global_time: global_time.clone(), + mode: DualModuleMode::default(), + vertex_num: partitioned_initializer.vertex_num, + edge_num: partitioned_initializer.edge_num, + all_mirrored_vertices, + all_defect_vertices, + } } + } impl MWPSVisualizer for DualModulePQ where - Queue: FutureQueueMethods + Default + std::fmt::Debug, + Queue: FutureQueueMethods + Default + std::fmt::Debug + Clone, { fn snapshot(&self, abbrev: bool) -> serde_json::Value { - let mut vertices: Vec = vec![]; + let mut vertices: Vec = (0..self.vertex_num).map(|_| serde_json::Value::Null).collect(); for vertex_ptr in self.vertices.iter() { let vertex = vertex_ptr.read_recursive(); - vertices.push(json!({ + // println!("vertex index: {:?}", vertex.vertex_index); + vertices[vertex.vertex_index as usize] = json!({ if abbrev { "s" } else { "is_defect" }: i32::from(vertex.is_defect), - })); + }); + } - let mut edges: Vec = vec![]; + + let mut edges: Vec = (0..self.edge_num).map(|_| serde_json::Value::Null).collect(); for edge_ptr in self.edges.iter() { let edge = edge_ptr.read_recursive(); let current_growth = &edge.growth_at_last_updated_time + (&self.global_time.read_recursive().clone() - &edge.last_updated_time) * &edge.grow_rate; let unexplored = &edge.weight - ¤t_growth; - edges.push(json!({ + // println!("edge_index: {:?}", edge.edge_index); + edges[edge.edge_index as usize] = json!({ if abbrev { "w" } else { "weight" }: edge.weight.to_f64(), if abbrev { "v" } else { "vertices" }: edge.vertices.iter().map(|x| x.upgrade_force().read_recursive().vertex_index).collect::>(), if abbrev { "g" } else { "growth" }: current_growth.to_f64(), @@ -577,7 +1339,7 @@ where "gd": current_growth.denom().to_i64(), "un": unexplored.numer().to_i64(), "ud": unexplored.denom().to_i64(), - })); + }); } json!({ "vertices": vertices, @@ -592,52 +1354,53 @@ mod tests { use crate::decoding_hypergraph::*; use crate::example_codes::*; - #[test] - fn dual_module_pq_learn_priority_queue_1() { - // cargo test dual_module_pq_learn_priority_queue_1 -- --nocapture - let mut future_obstacle_queue = FutureObstacleQueue::::new(); - assert_eq!(0, future_obstacle_queue.len()); - macro_rules! ref_event { - ($index:expr) => { - Some((&$index, &Obstacle::Conflict { edge_index: $index })) - }; - } - macro_rules! value_event { - ($index:expr) => { - Some(($index, Obstacle::Conflict { edge_index: $index })) - }; - } - // test basic order - future_obstacle_queue.will_happen(2, Obstacle::Conflict { edge_index: 2 }); - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); - future_obstacle_queue.will_happen(3, Obstacle::Conflict { edge_index: 3 }); - assert_eq!(future_obstacle_queue.peek_event(), ref_event!(1)); - assert_eq!(future_obstacle_queue.peek_event(), ref_event!(1)); - assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); - assert_eq!(future_obstacle_queue.peek_event(), ref_event!(2)); - assert_eq!(future_obstacle_queue.pop_event(), value_event!(2)); - assert_eq!(future_obstacle_queue.pop_event(), value_event!(3)); - assert_eq!(future_obstacle_queue.peek_event(), None); - // test duplicate elements, the queue must be able to hold all the duplicate events - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); - assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); - assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); - assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); - assert_eq!(future_obstacle_queue.peek_event(), None); - // test order of events at the same time - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 2 }); - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); - future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 3 }); - let mut events = vec![]; - while let Some((time, event)) = future_obstacle_queue.pop_event() { - assert_eq!(time, 1); - events.push(event); - } - assert_eq!(events.len(), 3); - println!("events: {events:?}"); - } + // this test can't be run because we need to create vertexptr and edgeptr first, we could possibly create them to run this test later + // #[test] + // fn dual_module_pq_learn_priority_queue_1() { + // // cargo test dual_module_pq_learn_priority_queue_1 -- --nocapture + // let mut future_obstacle_queue = _FutureObstacleQueue::::new(); + // assert_eq!(0, future_obstacle_queue.len()); + // macro_rules! ref_event { + // ($index:expr) => { + // Some((&$index, &Obstacle::Conflict { edge_ptr: $index })) + // }; + // } + // macro_rules! value_event { + // ($index:expr) => { + // Some(($index, Obstacle::Conflict { edge_ptr: $index })) + // }; + // } + // // test basic order + // future_obstacle_queue.will_happen(2, Obstacle::Conflict { edge_ptr: 2 }); + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_ptr: 1 }); + // future_obstacle_queue.will_happen(3, Obstacle::Conflict { edge_ptr: 3 }); + // assert_eq!(future_obstacle_queue.peek_event(), ref_event!(1)); + // assert_eq!(future_obstacle_queue.peek_event(), ref_event!(1)); + // assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); + // assert_eq!(future_obstacle_queue.peek_event(), ref_event!(2)); + // assert_eq!(future_obstacle_queue.pop_event(), value_event!(2)); + // assert_eq!(future_obstacle_queue.pop_event(), value_event!(3)); + // assert_eq!(future_obstacle_queue.peek_event(), None); + // // test duplicate elements, the queue must be able to hold all the duplicate events + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); + // assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); + // assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); + // assert_eq!(future_obstacle_queue.pop_event(), value_event!(1)); + // assert_eq!(future_obstacle_queue.peek_event(), None); + // // test order of events at the same time + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 2 }); + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 1 }); + // future_obstacle_queue.will_happen(1, Obstacle::Conflict { edge_index: 3 }); + // let mut events = vec![]; + // while let Some((time, event)) = future_obstacle_queue.pop_event() { + // assert_eq!(time, 1); + // events.push(event); + // } + // assert_eq!(events.len(), 3); + // println!("events: {events:?}"); + // } #[test] fn dual_module_pq_basics_1() { @@ -657,7 +1420,7 @@ mod tests { let mut dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); // try to work on a simple syndrome let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![3, 12]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); + let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph.syndrome_pattern, &mut dual_module); visualizer .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) @@ -682,7 +1445,8 @@ mod tests { .unwrap(); // the result subgraph - let subgraph = vec![15, 20]; + + let subgraph = vec![dual_module.edges[15].downgrade(), dual_module.edges[20].downgrade()]; visualizer .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) .unwrap(); @@ -706,7 +1470,7 @@ mod tests { let mut dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); // try to work on a simple syndrome let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![23, 24, 29, 30]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); + let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph.syndrome_pattern, &mut dual_module); visualizer .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) .unwrap(); @@ -725,7 +1489,7 @@ mod tests { .unwrap(); // the result subgraph - let subgraph = vec![24]; + let subgraph = vec![dual_module.edges[24].downgrade()]; visualizer .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) .unwrap(); @@ -750,7 +1514,7 @@ mod tests { let mut dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); // try to work on a simple syndrome let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![17, 23, 29, 30]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); + let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph.syndrome_pattern, &mut dual_module); visualizer .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) .unwrap(); @@ -779,7 +1543,8 @@ mod tests { dual_module.set_grow_rate(&dual_node_30_ptr, Rational::from_i64(0).unwrap()); // create cluster - interface_ptr.create_node_vec(&[24], &mut dual_module); + let edge_weak = dual_module.get_edge_ptr(24).downgrade(); + interface_ptr.create_node_vec(&[edge_weak], &mut dual_module); let dual_node_cluster_ptr = interface_ptr.read_recursive().nodes[4].clone(); dual_module.set_grow_rate(&dual_node_17_ptr, Rational::from_i64(1).unwrap()); dual_module.set_grow_rate(&dual_node_cluster_ptr, Rational::from_i64(1).unwrap()); @@ -795,7 +1560,11 @@ mod tests { dual_module.set_grow_rate(&dual_node_cluster_ptr, Rational::from_i64(0).unwrap()); // create bigger cluster - interface_ptr.create_node_vec(&[18, 23, 24, 31], &mut dual_module); + let edge_weak_1 = dual_module.get_edge_ptr(18).downgrade(); + let edge_weak_2 = dual_module.get_edge_ptr(23).downgrade(); + let edge_weak_3 = dual_module.get_edge_ptr(24).downgrade(); + let edge_weak_4 = dual_module.get_edge_ptr(31).downgrade(); + interface_ptr.create_node_vec(&[edge_weak_1, edge_weak_2, edge_weak_3, edge_weak_4], &mut dual_module); let dual_node_bigger_cluster_ptr = interface_ptr.read_recursive().nodes[5].clone(); dual_module.set_grow_rate(&dual_node_bigger_cluster_ptr, Rational::from_i64(1).unwrap()); @@ -807,7 +1576,7 @@ mod tests { .unwrap(); // the result subgraph - let subgraph = vec![82, 24]; + let subgraph = vec![dual_module.edges[82].downgrade(), dual_module.edges[24].downgrade()]; visualizer .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) .unwrap(); diff --git a/src/dual_module_serial.rs b/src/dual_module_serial.rs index 9346ee41..537e5bec 100644 --- a/src/dual_module_serial.rs +++ b/src/dual_module_serial.rs @@ -1,631 +1,765 @@ -//! Serial Dual Module -//! -//! A serial implementation of the dual module -//! - -use crate::derivative::Derivative; -use crate::dual_module::*; -use crate::num_traits::sign::Signed; -use crate::num_traits::{ToPrimitive, Zero}; -use crate::pointers::*; -use crate::util::*; -use crate::visualize::*; -use num_traits::FromPrimitive; -use std::collections::BTreeSet; - -pub struct DualModuleSerial { - /// all vertices including virtual ones - pub vertices: Vec, - /// keep edges, which can also be accessed in [`Self::vertices`] - pub edges: Vec, - /// maintain an active list to optimize for average cases: most defect vertices have already been matched, and we only need to work on a few remained; - /// note that this list may contain duplicate nodes - pub active_edges: BTreeSet, - /// active nodes - pub active_nodes: BTreeSet, -} - -pub type DualModuleSerialPtr = ArcRwLock; -pub type DualModuleSerialWeak = WeakRwLock; - -#[derive(Derivative)] -#[derivative(Debug)] -pub struct Vertex { - /// the index of this vertex in the decoding graph, not necessary the index in [`DualModuleSerial::vertices`] if it's partitioned - pub vertex_index: VertexIndex, - /// if a vertex is defect, then [`Vertex::propagated_dual_node`] always corresponds to that root - pub is_defect: bool, - /// all neighbor edges, in surface code this should be constant number of edges - #[derivative(Debug = "ignore")] - pub edges: Vec, -} - -pub type VertexPtr = ArcRwLock; -pub type VertexWeak = WeakRwLock; - -impl std::fmt::Debug for VertexPtr { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let vertex = self.read_recursive(); - write!(f, "{}", vertex.vertex_index) - } -} - -impl std::fmt::Debug for VertexWeak { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let vertex_ptr = self.upgrade_force(); - let vertex = vertex_ptr.read_recursive(); - write!(f, "{}", vertex.vertex_index) - } -} - -#[derive(Derivative)] -#[derivative(Debug)] -pub struct Edge { - /// global edge index - edge_index: EdgeIndex, - /// total weight of this edge - weight: Rational, - #[derivative(Debug = "ignore")] - vertices: Vec, - /// growth value, growth <= weight - growth: Rational, - /// the dual nodes that contributes to this edge - dual_nodes: Vec, - /// the speed of growth - grow_rate: Rational, -} - -pub type EdgePtr = ArcRwLock; -pub type EdgeWeak = WeakRwLock; - -impl std::fmt::Debug for EdgePtr { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let edge = self.read_recursive(); - write!( - f, - "[edge: {}]: weight: {}, grow_rate: {}, growth: {}\n\tdual_nodes: {:?}", - edge.edge_index, edge.weight, edge.grow_rate, edge.growth, edge.dual_nodes - ) - } -} - -impl std::fmt::Debug for EdgeWeak { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let edge_ptr = self.upgrade_force(); - let edge = edge_ptr.read_recursive(); - write!( - f, - "[edge: {}]: weight: {}, grow_rate: {}, growth: {}\n\tdual_nodes: {:?}", - edge.edge_index, edge.weight, edge.grow_rate, edge.growth, edge.dual_nodes - ) - } -} - -impl DualModuleImpl for DualModuleSerial { - /// initialize the dual module, which is supposed to be reused for multiple decoding tasks with the same structure - #[allow(clippy::unnecessary_cast)] - fn new_empty(initializer: &SolverInitializer) -> Self { - initializer.sanity_check().unwrap(); - // create vertices - let vertices: Vec = (0..initializer.vertex_num) - .map(|vertex_index| { - VertexPtr::new_value(Vertex { - vertex_index, - is_defect: false, - edges: vec![], - }) - }) - .collect(); - // set edges - let mut edges = Vec::::new(); - for hyperedge in initializer.weighted_edges.iter() { - let edge_ptr = EdgePtr::new_value(Edge { - edge_index: edges.len() as EdgeIndex, - growth: Rational::zero(), - weight: Rational::from_usize(hyperedge.weight).unwrap(), - dual_nodes: vec![], - vertices: hyperedge - .vertices - .iter() - .map(|i| vertices[*i as usize].downgrade()) - .collect::>(), - grow_rate: Rational::zero(), - }); - for &vertex_index in hyperedge.vertices.iter() { - vertices[vertex_index as usize].write().edges.push(edge_ptr.downgrade()); - } - edges.push(edge_ptr); - } - Self { - vertices, - edges, - active_edges: BTreeSet::new(), - active_nodes: BTreeSet::new(), - } - } - - /// clear all growth and existing dual nodes - fn clear(&mut self) { - self.active_edges.clear(); - self.active_nodes.clear(); - for vertex_ptr in self.vertices.iter() { - vertex_ptr.write().clear(); - } - for edge_ptr in self.edges.iter() { - edge_ptr.write().clear(); - } - } - - fn add_defect_node(&mut self, dual_node_ptr: &DualNodePtr) { - let dual_node = dual_node_ptr.read_recursive(); - debug_assert!(dual_node.invalid_subgraph.edges.is_empty()); - debug_assert!( - dual_node.invalid_subgraph.vertices.len() == 1, - "defect node (without edges) should only work on a single vertex, for simplicity" - ); - let vertex_index = dual_node.invalid_subgraph.vertices.iter().next().unwrap(); - let mut vertex = self.vertices[*vertex_index].write(); - assert!(!vertex.is_defect, "defect should not be added twice"); - vertex.is_defect = true; - drop(dual_node); - drop(vertex); - self.add_dual_node(dual_node_ptr); - } - - #[allow(clippy::unnecessary_cast)] - fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { - // make sure the active edges are set - let dual_node_weak = dual_node_ptr.downgrade(); - let dual_node = dual_node_ptr.read_recursive(); - for &edge_index in dual_node.invalid_subgraph.hair.iter() { - let mut edge = self.edges[edge_index as usize].write(); - edge.grow_rate += &dual_node.grow_rate; - edge.dual_nodes.push(dual_node_weak.clone()); - if edge.grow_rate.is_zero() { - self.active_edges.remove(&edge_index); - } else { - self.active_edges.insert(edge_index); - } - } - self.active_nodes.insert(dual_node_ptr.clone()); - } - - #[allow(clippy::unnecessary_cast)] - fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { - let mut dual_node = dual_node_ptr.write(); - let grow_rate_diff = grow_rate.clone() - &dual_node.grow_rate; - dual_node.grow_rate = grow_rate; - drop(dual_node); - let dual_node = dual_node_ptr.read_recursive(); - for &edge_index in dual_node.invalid_subgraph.hair.iter() { - let mut edge = self.edges[edge_index as usize].write(); - edge.grow_rate += &grow_rate_diff; - if edge.grow_rate.is_zero() { - self.active_edges.remove(&edge_index); - } else { - self.active_edges.insert(edge_index); - } - } - if dual_node.grow_rate.is_zero() { - self.active_nodes.remove(dual_node_ptr); - } else { - self.active_nodes.insert(dual_node_ptr.clone()); - } - } - - #[allow(clippy::collapsible_else_if, clippy::unnecessary_cast)] - fn compute_maximum_update_length_dual_node( - &mut self, - dual_node_ptr: &DualNodePtr, - simultaneous_update: bool, - ) -> MaxUpdateLength { - let node = dual_node_ptr.read_recursive(); - let mut max_update_length = MaxUpdateLength::new(); - for &edge_index in node.invalid_subgraph.hair.iter() { - let edge = self.edges[edge_index as usize].read_recursive(); - let mut grow_rate = Rational::zero(); - if simultaneous_update { - // consider all dual nodes - for node_weak in edge.dual_nodes.iter() { - grow_rate += node_weak.upgrade_force().read_recursive().grow_rate.clone(); - } - } else { - grow_rate = node.grow_rate.clone(); - } - if grow_rate.is_positive() { - let edge_remain = edge.weight.clone() - edge.growth.clone(); - if edge_remain.is_zero() { - max_update_length.merge(MaxUpdateLength::Conflicting(edge_index)); - } else { - max_update_length.merge(MaxUpdateLength::ValidGrow(edge_remain / grow_rate)); - } - } else if grow_rate.is_negative() { - if edge.growth.is_zero() { - if node.grow_rate.is_negative() { - max_update_length.merge(MaxUpdateLength::ShrinkProhibited(dual_node_ptr.clone())); - } else { - // find a negatively growing edge - let mut found = false; - for node_weak in edge.dual_nodes.iter() { - let node_ptr = node_weak.upgrade_force(); - if node_ptr.read_recursive().grow_rate.is_negative() { - max_update_length.merge(MaxUpdateLength::ShrinkProhibited(node_ptr)); - found = true; - break; - } - } - assert!(found, "unreachable"); - } - } else { - max_update_length.merge(MaxUpdateLength::ValidGrow(-edge.growth.clone() / grow_rate)); - } - } - } - max_update_length - } - - #[allow(clippy::unnecessary_cast)] - fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { - let mut group_max_update_length = GroupMaxUpdateLength::new(); - for &edge_index in self.active_edges.iter() { - let edge = self.edges[edge_index as usize].read_recursive(); - let mut grow_rate = Rational::zero(); - for node_weak in edge.dual_nodes.iter() { - let node_ptr = node_weak.upgrade_force(); - let node = node_ptr.read_recursive(); - grow_rate += node.grow_rate.clone(); - } - if grow_rate.is_positive() { - let edge_remain = edge.weight.clone() - edge.growth.clone(); - if edge_remain.is_zero() { - group_max_update_length.add(MaxUpdateLength::Conflicting(edge_index)); - } else { - group_max_update_length.add(MaxUpdateLength::ValidGrow(edge_remain / grow_rate)); - } - } else if grow_rate.is_negative() { - if edge.growth.is_zero() { - // it will be reported when iterating active dual nodes - } else { - group_max_update_length.add(MaxUpdateLength::ValidGrow(-edge.growth.clone() / grow_rate)); - } - } - } - for node_ptr in self.active_nodes.iter() { - let node = node_ptr.read_recursive(); - if node.grow_rate.is_negative() { - if node.get_dual_variable().is_positive() { - group_max_update_length - .add(MaxUpdateLength::ValidGrow(-node.get_dual_variable() / node.grow_rate.clone())); - } else { - group_max_update_length.add(MaxUpdateLength::ShrinkProhibited(node_ptr.clone())); - } - } - } - group_max_update_length - } - - #[allow(clippy::unnecessary_cast)] - fn grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational) { - if length.is_zero() { - eprintln!("[warning] calling `grow_dual_node` with zero length, nothing to do"); - return; - } - let node = dual_node_ptr.read_recursive(); - let grow_amount = length * node.grow_rate.clone(); - for &edge_index in node.invalid_subgraph.hair.iter() { - let mut edge = self.edges[edge_index as usize].write(); - edge.growth += grow_amount.clone(); - assert!( - !edge.growth.is_negative(), - "edge {} over-shrunk: the new growth is {:?}", - edge_index, - edge.growth - ); - assert!( - edge.growth <= edge.weight, - "edge {} over-grown: the new growth is {:?}, weight is {:?}", - edge_index, - edge.growth, - edge.weight - ); - } - drop(node); - // update dual variable - let mut dual_node_ptr_write = dual_node_ptr.write(); - let dual_variable = dual_node_ptr_write.get_dual_variable(); - dual_node_ptr_write.set_dual_variable(dual_variable + grow_amount); - } - - #[allow(clippy::unnecessary_cast)] - fn grow(&mut self, length: Rational) { - debug_assert!( - length.is_positive(), - "growth should be positive; if desired, please set grow rate to negative for shrinking" - ); - // update the active edges - for &edge_index in self.active_edges.iter() { - let mut edge = self.edges[edge_index as usize].write(); - let mut grow_rate = Rational::zero(); - for node_weak in edge.dual_nodes.iter() { - grow_rate += node_weak.upgrade_force().read_recursive().grow_rate.clone(); - } - edge.growth += length.clone() * grow_rate; - assert!( - !edge.growth.is_negative(), - "edge {} over-shrunk: the new growth is {:?}", - edge_index, - edge.growth - ); - assert!( - edge.growth <= edge.weight, - "edge {} over-grown: the new growth is {:?}, weight is {:?}", - edge_index, - edge.growth, - edge.weight - ); - } - // update dual variables - for node_ptr in self.active_nodes.iter() { - let mut node = node_ptr.write(); - let grow_rate = node.grow_rate.clone(); - let dual_variable = node.get_dual_variable(); - node.set_dual_variable(dual_variable + length.clone() * grow_rate); - } - } - - #[allow(clippy::unnecessary_cast)] - fn get_edge_nodes(&self, edge_index: EdgeIndex) -> Vec { - self.edges[edge_index as usize] - .read_recursive() - .dual_nodes - .iter() - .map(|x| x.upgrade_force()) - .collect() - } - - fn get_edge_slack(&self, edge_index: EdgeIndex) -> Rational { - let edge = self.edges[edge_index].read_recursive(); - edge.weight.clone() - edge.growth.clone() - } - - #[allow(clippy::unnecessary_cast)] - fn is_edge_tight(&self, edge_index: EdgeIndex) -> bool { - let edge = self.edges[edge_index as usize].read_recursive(); - edge.growth == edge.weight - } -} - -/* -Implementing fast clear operations -*/ - -impl Edge { - fn clear(&mut self) { - self.growth = Rational::zero(); - self.dual_nodes.clear(); - } -} - -impl Vertex { - fn clear(&mut self) { - self.is_defect = false; - } -} - -/* -Implementing visualization functions -*/ - -impl MWPSVisualizer for DualModuleSerial { - fn snapshot(&self, abbrev: bool) -> serde_json::Value { - let mut vertices: Vec = vec![]; - for vertex_ptr in self.vertices.iter() { - let vertex = vertex_ptr.read_recursive(); - vertices.push(json!({ - if abbrev { "s" } else { "is_defect" }: i32::from(vertex.is_defect), - })); - } - let mut edges: Vec = vec![]; - for edge_ptr in self.edges.iter() { - let edge = edge_ptr.read_recursive(); - let unexplored = edge.weight.clone() - edge.growth.clone(); - edges.push(json!({ - if abbrev { "w" } else { "weight" }: edge.weight.to_f64(), - if abbrev { "v" } else { "vertices" }: edge.vertices.iter().map(|x| x.upgrade_force().read_recursive().vertex_index).collect::>(), - if abbrev { "g" } else { "growth" }: edge.growth.to_f64(), - "gn": edge.growth.numer().to_i64(), - "gd": edge.growth.denom().to_i64(), - "un": unexplored.numer().to_i64(), - "ud": unexplored.denom().to_i64(), - })); - } - json!({ - "vertices": vertices, - "edges": edges, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::decoding_hypergraph::*; - use crate::example_codes::*; - - #[test] - fn dual_module_serial_basics_1() { - // cargo test dual_module_serial_basics_1 -- --nocapture - let visualize_filename = "dual_module_serial_basics_1.json".to_string(); - let weight = 1000; - let code = CodeCapacityColorCode::new(7, 0.1, weight); - let mut visualizer = Visualizer::new( - Some(visualize_data_folder() + visualize_filename.as_str()), - code.get_positions(), - true, - ) - .unwrap(); - print_visualize_link(visualize_filename); - // create dual module - let model_graph = code.get_model_graph(); - let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); - // try to work on a simple syndrome - let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![3, 12]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); - visualizer - .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // grow them each by half - let dual_node_3_ptr = interface_ptr.read_recursive().nodes[0].clone(); - let dual_node_12_ptr = interface_ptr.read_recursive().nodes[1].clone(); - dual_module.grow_dual_node(&dual_node_3_ptr, Rational::from_usize(weight / 2).unwrap()); - dual_module.grow_dual_node(&dual_node_12_ptr, Rational::from_usize(weight / 2).unwrap()); - visualizer - .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // cluster becomes solved - dual_module.grow_dual_node(&dual_node_3_ptr, Rational::from_usize(weight / 2).unwrap()); - dual_module.grow_dual_node(&dual_node_12_ptr, Rational::from_usize(weight / 2).unwrap()); - visualizer - .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // the result subgraph - let subgraph = vec![15, 20]; - visualizer - .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) - .unwrap(); - } - - #[test] - fn dual_module_serial_basics_2() { - // cargo test dual_module_serial_basics_2 -- --nocapture - let visualize_filename = "dual_module_serial_basics_2.json".to_string(); - let weight = 1000; - let code = CodeCapacityTailoredCode::new(7, 0., 0.1, weight); - let mut visualizer = Visualizer::new( - Some(visualize_data_folder() + visualize_filename.as_str()), - code.get_positions(), - true, - ) - .unwrap(); - print_visualize_link(visualize_filename); - // create dual module - let model_graph = code.get_model_graph(); - let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); - // try to work on a simple syndrome - let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![23, 24, 29, 30]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); - visualizer - .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // grow them each by half - let dual_node_23_ptr = interface_ptr.read_recursive().nodes[0].clone(); - let dual_node_24_ptr = interface_ptr.read_recursive().nodes[1].clone(); - let dual_node_29_ptr = interface_ptr.read_recursive().nodes[2].clone(); - let dual_node_30_ptr = interface_ptr.read_recursive().nodes[3].clone(); - dual_module.grow_dual_node(&dual_node_23_ptr, Rational::from_usize(weight / 4).unwrap()); - dual_module.grow_dual_node(&dual_node_24_ptr, Rational::from_usize(weight / 4).unwrap()); - dual_module.grow_dual_node(&dual_node_29_ptr, Rational::from_usize(weight / 4).unwrap()); - dual_module.grow_dual_node(&dual_node_30_ptr, Rational::from_usize(weight / 4).unwrap()); - visualizer - .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // the result subgraph - let subgraph = vec![24]; - visualizer - .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) - .unwrap(); - } - - #[test] - fn dual_module_serial_basics_3() { - // cargo test dual_module_serial_basics_3 -- --nocapture - let visualize_filename = "dual_module_serial_basics_3.json".to_string(); - let weight = 600; // do not change, the data is hard-coded - let pxy = 0.0602828812732227; - let code = CodeCapacityTailoredCode::new(7, pxy, 0.1, weight); // do not change probabilities: the data is hard-coded - let mut visualizer = Visualizer::new( - Some(visualize_data_folder() + visualize_filename.as_str()), - code.get_positions(), - true, - ) - .unwrap(); - print_visualize_link(visualize_filename); - // create dual module - let model_graph = code.get_model_graph(); - let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); - // try to work on a simple syndrome - let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![17, 23, 29, 30]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); - visualizer - .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // grow them each by half - let dual_node_17_ptr = interface_ptr.read_recursive().nodes[0].clone(); - let dual_node_23_ptr = interface_ptr.read_recursive().nodes[1].clone(); - let dual_node_29_ptr = interface_ptr.read_recursive().nodes[2].clone(); - let dual_node_30_ptr = interface_ptr.read_recursive().nodes[3].clone(); - dual_module.grow_dual_node(&dual_node_17_ptr, Rational::from_i64(160).unwrap()); - dual_module.grow_dual_node(&dual_node_23_ptr, Rational::from_i64(160).unwrap()); - dual_module.grow_dual_node(&dual_node_29_ptr, Rational::from_i64(160).unwrap()); - dual_module.grow_dual_node(&dual_node_30_ptr, Rational::from_i64(160).unwrap()); - visualizer - .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // create cluster - interface_ptr.create_node_vec(&[24], &mut dual_module); - let dual_node_cluster_ptr = interface_ptr.read_recursive().nodes[4].clone(); - dual_module.grow_dual_node(&dual_node_17_ptr, Rational::from_i64(160).unwrap()); - dual_module.grow_dual_node(&dual_node_cluster_ptr, Rational::from_i64(160).unwrap()); - visualizer - .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // create bigger cluster - interface_ptr.create_node_vec(&[18, 23, 24, 31], &mut dual_module); - let dual_node_bigger_cluster_ptr = interface_ptr.read_recursive().nodes[5].clone(); - dual_module.grow_dual_node(&dual_node_bigger_cluster_ptr, Rational::from_i64(120).unwrap()); - visualizer - .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // the result subgraph - let subgraph = vec![82, 24]; - visualizer - .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) - .unwrap(); - } - - #[test] - fn dual_module_serial_find_valid_subgraph_1() { - // cargo test dual_module_serial_find_valid_subgraph_1 -- --nocapture - let visualize_filename = "dual_module_serial_find_valid_subgraph_1.json".to_string(); - let weight = 1000; - let code = CodeCapacityColorCode::new(7, 0.1, weight); - let mut visualizer = Visualizer::new( - Some(visualize_data_folder() + visualize_filename.as_str()), - code.get_positions(), - true, - ) - .unwrap(); - print_visualize_link(visualize_filename); - // create dual module - let model_graph = code.get_model_graph(); - let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); - // try to work on a simple syndrome - let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![3, 12]); - let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph.clone(), &mut dual_module); - visualizer - .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) - .unwrap(); - // invalid clusters - assert!(!decoding_graph.is_valid_cluster_auto_vertices(&vec![20].into_iter().collect())); - assert!(!decoding_graph.is_valid_cluster_auto_vertices(&vec![9, 20].into_iter().collect())); - assert!(!decoding_graph.is_valid_cluster_auto_vertices(&vec![15].into_iter().collect())); - assert!(decoding_graph.is_valid_cluster_auto_vertices(&vec![15, 20].into_iter().collect())); - // the result subgraph - let subgraph = decoding_graph - .find_valid_subgraph_auto_vertices(&vec![9, 15, 20, 21].into_iter().collect()) - .unwrap(); - visualizer - .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) - .unwrap(); - } -} +// //! Serial Dual Module +// //! +// //! A serial implementation of the dual module +// //! + +// use crate::derivative::Derivative; +// use crate::num_traits::sign::Signed; +// use crate::num_traits::{ToPrimitive, Zero}; +// use crate::ordered_float::OrderedFloat; +// use crate::pointers::*; +// use crate::primal_module::Affinity; +// use crate::primal_module_serial::PrimalClusterPtr; +// use crate::util::*; +// use crate::visualize::*; +// use crate::{add_shared_methods, dual_module::*}; +// use num_traits::FromPrimitive; +// use std::collections::BTreeSet; + +// pub struct DualModuleSerial { +// /// all vertices including virtual ones +// pub vertices: Vec, +// /// keep edges, which can also be accessed in [`Self::vertices`] +// pub edges: Vec, +// /// maintain an active list to optimize for average cases: most defect vertices have already been matched, and we only need to work on a few remained; +// /// note that this list may contain duplicate nodes +// pub active_edges: BTreeSet, +// /// active nodes +// pub active_nodes: BTreeSet, + +// /// the current mode of the dual module +// /// note: currently does not have too much functionality +// mode: DualModuleMode, +// } + +// pub type DualModuleSerialPtr = ArcRwLock; +// pub type DualModuleSerialWeak = WeakRwLock; + +// #[derive(Derivative)] +// #[derivative(Debug)] +// pub struct Vertex { +// /// the index of this vertex in the decoding graph, not necessary the index in [`DualModuleSerial::vertices`] if it's partitioned +// pub vertex_index: VertexIndex, +// /// if a vertex is defect, then [`Vertex::propagated_dual_node`] always corresponds to that root +// pub is_defect: bool, +// /// all neighbor edges, in surface code this should be constant number of edges +// #[derivative(Debug = "ignore")] +// pub edges: Vec, +// } + +// pub type VertexPtr = ArcRwLock; +// pub type VertexWeak = WeakRwLock; + +// impl std::fmt::Debug for VertexPtr { +// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +// let vertex = self.read_recursive(); +// write!(f, "{}", vertex.vertex_index) +// } +// } + +// impl std::fmt::Debug for VertexWeak { +// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +// let vertex_ptr = self.upgrade_force(); +// let vertex = vertex_ptr.read_recursive(); +// write!(f, "{}", vertex.vertex_index) +// } +// } + +// #[derive(Derivative)] +// #[derivative(Debug)] +// pub struct Edge { +// /// global edge index +// pub edge_index: EdgeIndex, +// /// total weight of this edge +// weight: Rational, +// #[derivative(Debug = "ignore")] +// vertices: Vec, +// /// growth value, growth <= weight +// growth: Rational, +// /// the dual nodes that contributes to this edge +// dual_nodes: Vec, +// /// the speed of growth +// grow_rate: Rational, + +// #[cfg(feature = "incr_lp")] +// /// storing the weights of the clusters that are currently contributing to this edge +// cluster_weights: hashbrown::HashMap, +// } + +// pub type EdgePtr = ArcRwLock; +// pub type EdgeWeak = WeakRwLock; + +// impl std::fmt::Debug for EdgePtr { +// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +// let edge = self.read_recursive(); +// write!( +// f, +// "[edge: {}]: weight: {}, grow_rate: {}, growth: {}\n\tdual_nodes: {:?}", +// edge.edge_index, +// edge.weight, +// edge.grow_rate, +// edge.growth, +// edge.dual_nodes +// .iter() +// .filter(|node| !node.upgrade_force().read_recursive().grow_rate.is_zero()) +// .collect::>() +// ) +// } +// } + +// impl std::fmt::Debug for EdgeWeak { +// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +// let edge_ptr = self.upgrade_force(); +// let edge = edge_ptr.read_recursive(); +// write!( +// f, +// "[edge: {}]: weight: {}, grow_rate: {}, growth: {}\n\tdual_nodes: {:?}", +// edge.edge_index, +// edge.weight, +// edge.grow_rate, +// edge.growth, +// edge.dual_nodes +// .iter() +// .filter(|node| !node.upgrade_force().read_recursive().grow_rate.is_zero()) +// .collect::>() +// ) +// } +// } + +// impl DualModuleImpl for DualModuleSerial { +// /// initialize the dual module, which is supposed to be reused for multiple decoding tasks with the same structure +// #[allow(clippy::unnecessary_cast)] +// fn new_empty(initializer: &SolverInitializer) -> Self { +// initializer.sanity_check().unwrap(); +// // create vertices +// let vertices: Vec = (0..initializer.vertex_num) +// .map(|vertex_index| { +// VertexPtr::new_value(Vertex { +// vertex_index, +// is_defect: false, +// edges: vec![], +// }) +// }) +// .collect(); +// // set edges +// let mut edges = Vec::::new(); +// for hyperedge in initializer.weighted_edges.iter() { +// let edge_ptr = EdgePtr::new_value(Edge { +// edge_index: edges.len() as EdgeIndex, +// growth: Rational::zero(), +// weight: Rational::from_usize(hyperedge.weight).unwrap(), +// dual_nodes: vec![], +// vertices: hyperedge +// .vertices +// .iter() +// .map(|i| vertices[*i as usize].downgrade()) +// .collect::>(), +// grow_rate: Rational::zero(), +// #[cfg(feature = "incr_lp")] +// cluster_weights: hashbrown::HashMap::new(), +// }); +// for &vertex_index in hyperedge.vertices.iter() { +// vertices[vertex_index as usize].write().edges.push(edge_ptr.downgrade()); +// } +// edges.push(edge_ptr); +// } +// Self { +// vertices, +// edges, +// active_edges: BTreeSet::new(), +// active_nodes: BTreeSet::new(), +// mode: DualModuleMode::default(), +// } +// } + +// /// clear all growth and existing dual nodes +// fn clear(&mut self) { +// self.active_edges.clear(); +// self.active_nodes.clear(); +// for vertex_ptr in self.vertices.iter() { +// vertex_ptr.write().clear(); +// } +// for edge_ptr in self.edges.iter() { +// edge_ptr.write().clear(); +// } +// } + +// fn add_defect_node(&mut self, dual_node_ptr: &DualNodePtr) { +// let dual_node = dual_node_ptr.read_recursive(); +// debug_assert!(dual_node.invalid_subgraph.edges.is_empty()); +// debug_assert!( +// dual_node.invalid_subgraph.vertices.len() == 1, +// "defect node (without edges) should only work on a single vertex, for simplicity" +// ); +// let vertex_index = dual_node.invalid_subgraph.vertices.iter().next().unwrap(); +// let mut vertex = self.vertices[*vertex_index].write(); +// assert!(!vertex.is_defect, "defect should not be added twice"); +// vertex.is_defect = true; +// drop(dual_node); +// drop(vertex); +// self.add_dual_node(dual_node_ptr); +// } + +// #[allow(clippy::unnecessary_cast)] +// fn add_dual_node(&mut self, dual_node_ptr: &DualNodePtr) { +// // make sure the active edges are set +// let dual_node_weak = dual_node_ptr.downgrade(); +// let dual_node = dual_node_ptr.read_recursive(); +// for &edge_index in dual_node.invalid_subgraph.hair.iter() { +// let mut edge = self.edges[edge_index as usize].write(); +// edge.grow_rate += &dual_node.grow_rate; +// edge.dual_nodes.push(dual_node_weak.clone()); +// if edge.grow_rate.is_zero() { +// self.active_edges.remove(&edge_index); +// } else { +// self.active_edges.insert(edge_index); +// } +// } +// self.active_nodes +// .insert(OrderedDualNodePtr::new(dual_node.index, dual_node_ptr.clone())); +// } + +// #[allow(clippy::unnecessary_cast)] +// fn set_grow_rate(&mut self, dual_node_ptr: &DualNodePtr, grow_rate: Rational) { +// let mut dual_node = dual_node_ptr.write(); +// let grow_rate_diff = grow_rate.clone() - &dual_node.grow_rate; +// dual_node.grow_rate = grow_rate; +// drop(dual_node); +// let dual_node = dual_node_ptr.read_recursive(); +// for &edge_index in dual_node.invalid_subgraph.hair.iter() { +// let mut edge = self.edges[edge_index as usize].write(); +// edge.grow_rate += &grow_rate_diff; +// if edge.grow_rate.is_zero() { +// self.active_edges.remove(&edge_index); +// } else { +// self.active_edges.insert(edge_index); +// } +// } +// if dual_node.grow_rate.is_zero() { +// self.active_nodes +// .remove(&OrderedDualNodePtr::new(dual_node.index, dual_node_ptr.clone())); +// } else { +// self.active_nodes +// .insert(OrderedDualNodePtr::new(dual_node.index, dual_node_ptr.clone())); +// } +// } + +// #[allow(clippy::collapsible_else_if, clippy::unnecessary_cast)] +// fn compute_maximum_update_length_dual_node( +// &mut self, +// dual_node_ptr: &DualNodePtr, +// simultaneous_update: bool, +// ) -> MaxUpdateLength { +// let node = dual_node_ptr.read_recursive(); +// let mut max_update_length = MaxUpdateLength::new(); +// for &edge_index in node.invalid_subgraph.hair.iter() { +// let edge = self.edges[edge_index as usize].read_recursive(); +// let mut grow_rate = Rational::zero(); +// if simultaneous_update { +// // consider all dual nodes +// for node_weak in edge.dual_nodes.iter() { +// grow_rate += node_weak.upgrade_force().read_recursive().grow_rate.clone(); +// } +// } else { +// grow_rate = node.grow_rate.clone(); +// } +// if grow_rate.is_positive() { +// let edge_remain = edge.weight.clone() - edge.growth.clone(); +// if edge_remain.is_zero() { +// max_update_length.merge(MaxUpdateLength::Conflicting(edge_index)); +// } else { +// max_update_length.merge(MaxUpdateLength::ValidGrow(edge_remain / grow_rate)); +// } +// } else if grow_rate.is_negative() { +// if edge.growth.is_zero() { +// if node.grow_rate.is_negative() { +// max_update_length.merge(MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new( +// node.index, +// dual_node_ptr.clone(), +// ))); +// } else { +// // find a negatively growing edge +// let mut found = false; +// for node_weak in edge.dual_nodes.iter() { +// let node_ptr = node_weak.upgrade_force(); +// if node_ptr.read_recursive().grow_rate.is_negative() { +// let index = node_ptr.read_recursive().index; +// max_update_length +// .merge(MaxUpdateLength::ShrinkProhibited(OrderedDualNodePtr::new(index, node_ptr))); +// found = true; +// break; +// } +// } +// assert!(found, "unreachable"); +// } +// } else { +// max_update_length.merge(MaxUpdateLength::ValidGrow(-edge.growth.clone() / grow_rate)); +// } +// } +// } +// max_update_length +// } + +// #[allow(clippy::unnecessary_cast)] +// fn compute_maximum_update_length(&mut self) -> GroupMaxUpdateLength { +// let mut group_max_update_length = GroupMaxUpdateLength::new(); +// for &edge_index in self.active_edges.iter() { +// let edge = self.edges[edge_index as usize].read_recursive(); +// let mut grow_rate = Rational::zero(); +// for node_weak in edge.dual_nodes.iter() { +// let node_ptr = node_weak.upgrade_force(); +// let node = node_ptr.read_recursive(); +// grow_rate += node.grow_rate.clone(); +// } +// if grow_rate.is_positive() { +// let edge_remain = edge.weight.clone() - edge.growth.clone(); +// if edge_remain.is_zero() { +// group_max_update_length.add(MaxUpdateLength::Conflicting(edge_index)); +// } else { +// group_max_update_length.add(MaxUpdateLength::ValidGrow(edge_remain / grow_rate)); +// } +// } else if grow_rate.is_negative() { +// if edge.growth.is_zero() { +// // it will be reported when iterating active dual nodes +// } else { +// group_max_update_length.add(MaxUpdateLength::ValidGrow(-edge.growth.clone() / grow_rate)); +// } +// } +// } +// for node_ptr in self.active_nodes.iter() { +// let node = node_ptr.ptr.read_recursive(); +// if node.grow_rate.is_negative() { +// if node.get_dual_variable().is_positive() { +// group_max_update_length +// .add(MaxUpdateLength::ValidGrow(-node.get_dual_variable() / node.grow_rate.clone())); +// } else { +// group_max_update_length.add(MaxUpdateLength::ShrinkProhibited(node_ptr.clone())); +// } +// } +// } +// group_max_update_length +// } + +// #[allow(clippy::unnecessary_cast)] +// fn grow_dual_node(&mut self, dual_node_ptr: &DualNodePtr, length: Rational) { +// if length.is_zero() { +// eprintln!("[warning] calling `grow_dual_node` with zero length, nothing to do"); +// return; +// } +// let node = dual_node_ptr.read_recursive(); +// let grow_amount = length * node.grow_rate.clone(); +// for &edge_index in node.invalid_subgraph.hair.iter() { +// let mut edge = self.edges[edge_index as usize].write(); +// edge.growth += grow_amount.clone(); +// // assert!( +// // !edge.growth.is_negative(), +// // "edge {} over-shrunk: the new growth is {:?}", +// // edge_index, +// // edge.growth +// // ); +// // assert!( +// // edge.growth <= edge.weight, +// // "edge {} over-grown: the new growth is {:?}, weight is {:?}", +// // edge_index, +// // edge.growth, +// // edge.weight +// // ); +// } +// drop(node); +// // update dual variable +// let mut dual_node_ptr_write = dual_node_ptr.write(); +// let dual_variable = dual_node_ptr_write.get_dual_variable(); +// dual_node_ptr_write.set_dual_variable(dual_variable + grow_amount); +// } + +// #[allow(clippy::unnecessary_cast)] +// fn grow(&mut self, length: Rational) { +// debug_assert!( +// length.is_positive(), +// "growth should be positive; if desired, please set grow rate to negative for shrinking" +// ); +// // update the active edges +// for &edge_index in self.active_edges.iter() { +// let mut edge = self.edges[edge_index as usize].write(); +// let mut grow_rate = Rational::zero(); +// for node_weak in edge.dual_nodes.iter() { +// grow_rate += node_weak.upgrade_force().read_recursive().grow_rate.clone(); +// } +// edge.growth += length.clone() * grow_rate; +// // assert!( +// // !edge.growth.is_negative(), +// // "edge {} over-shrunk: the new growth is {:?}", +// // edge_index, +// // edge.growth +// // ); +// // assert!( +// // edge.growth <= edge.weight, +// // "edge {} over-grown: the new growth is {:?}, weight is {:?}", +// // edge_index, +// // edge.growth, +// // edge.weight +// // ); +// } +// // update dual variables +// for node_ptr in self.active_nodes.iter() { +// let mut node = node_ptr.ptr.write(); +// let grow_rate = node.grow_rate.clone(); +// let dual_variable = node.get_dual_variable(); +// node.set_dual_variable(dual_variable + length.clone() * grow_rate); +// } +// } + +// #[allow(clippy::unnecessary_cast)] +// fn get_edge_nodes(&self, edge_index: EdgeIndex) -> Vec { +// self.edges[edge_index as usize] +// .read_recursive() +// .dual_nodes +// .iter() +// .map(|x| x.upgrade_force()) +// .collect() +// } + +// fn get_edge_slack(&self, edge_index: EdgeIndex) -> Rational { +// let edge = self.edges[edge_index].read_recursive(); +// edge.weight.clone() - edge.growth.clone() +// } + +// #[allow(clippy::unnecessary_cast)] +// fn is_edge_tight(&self, edge_index: EdgeIndex) -> bool { +// let edge = self.edges[edge_index as usize].read_recursive(); +// edge.growth == edge.weight +// } + +// add_shared_methods!(); + +// /// miscs +// fn debug_print(&self) { +// println!("\n[current states]"); +// println!("edges: {:?}", self.edges); +// } + +// fn grow_edge(&self, edge_index: EdgeIndex, amount: &Rational) { +// let mut edge = self.edges[edge_index].write(); +// edge.growth += amount; +// } + +// /* affinity */ +// fn calculate_cluster_affinity(&mut self, cluster: PrimalClusterPtr) -> Option { +// let mut start = 0.0; +// let cluster = cluster.read_recursive(); +// start -= cluster.edges.len() as f64 + cluster.nodes.len() as f64; + +// let mut weight = Rational::zero(); +// for &edge_index in cluster.edges.iter() { +// let edge_ptr = self.edges[edge_index].read_recursive(); +// weight += &edge_ptr.weight - &edge_ptr.growth; +// } +// for node in cluster.nodes.iter() { +// let dual_node = node.read_recursive().dual_node_ptr.clone(); +// weight -= &dual_node.read_recursive().dual_variable_at_last_updated_time; +// } +// if weight.is_zero() { +// return None; +// } +// start += weight.to_f64().unwrap(); + +// Some(OrderedFloat::from(start)) +// } + +// fn get_edge_free_weight( +// &self, +// edge_index: EdgeIndex, +// participating_dual_variables: &hashbrown::HashSet, +// ) -> Rational { +// let edge = self.edges[edge_index as usize].read_recursive(); +// let mut free_weight = edge.weight.clone(); +// for dual_node in edge.dual_nodes.iter() { +// let dual_node = dual_node.upgrade_force(); +// if participating_dual_variables.contains(&dual_node.read_recursive().index) { +// continue; +// } +// free_weight -= &dual_node.read_recursive().dual_variable_at_last_updated_time; +// } + +// free_weight +// } + +// #[cfg(feature = "incr_lp")] +// fn get_edge_free_weight_cluster(&self, edge_index: EdgeIndex, cluster_index: NodeIndex) -> Rational { +// let edge = self.edges[edge_index as usize].read_recursive(); +// edge.weight.clone() +// - edge +// .cluster_weights +// .iter() +// .filter_map(|(c_idx, y)| if cluster_index.ne(c_idx) { Some(y) } else { None }) +// .sum::() +// } + +// #[cfg(feature = "incr_lp")] +// fn update_edge_cluster_weights_union( +// &self, +// dual_node_ptr: &DualNodePtr, +// drained_cluster_index: NodeIndex, +// absorbing_cluster_index: NodeIndex, +// ) { +// let dual_node = dual_node_ptr.read_recursive(); +// for edge_index in dual_node.invalid_subgraph.hair.iter() { +// let mut edge = self.edges[*edge_index as usize].write(); +// if let Some(removed) = edge.cluster_weights.remove(&drained_cluster_index) { +// *edge +// .cluster_weights +// .entry(absorbing_cluster_index) +// .or_insert(Rational::zero()) += removed; +// } +// } +// } + +// #[cfg(feature = "incr_lp")] +// fn update_edge_cluster_weights(&self, edge_index: usize, cluster_index: usize, weight: Rational) { +// match self.edges[edge_index].write().cluster_weights.entry(cluster_index) { +// hashbrown::hash_map::Entry::Occupied(mut o) => { +// *o.get_mut() += weight; +// } +// hashbrown::hash_map::Entry::Vacant(v) => { +// v.insert(weight); +// } +// } +// } +// } + +// /* +// Implementing fast clear operations +// */ + +// impl Edge { +// fn clear(&mut self) { +// self.growth = Rational::zero(); +// self.dual_nodes.clear(); +// #[cfg(feature = "incr_lp")] +// self.cluster_weights.clear(); +// } +// } + +// impl Vertex { +// fn clear(&mut self) { +// self.is_defect = false; +// } +// } + +// /* +// Implementing visualization functions +// */ + +// impl MWPSVisualizer for DualModuleSerial { +// fn snapshot(&self, abbrev: bool) -> serde_json::Value { +// let mut vertices: Vec = vec![]; +// for vertex_ptr in self.vertices.iter() { +// let vertex = vertex_ptr.read_recursive(); +// vertices.push(json!({ +// if abbrev { "s" } else { "is_defect" }: i32::from(vertex.is_defect), +// })); +// } +// let mut edges: Vec = vec![]; +// for edge_ptr in self.edges.iter() { +// let edge = edge_ptr.read_recursive(); +// let unexplored = edge.weight.clone() - edge.growth.clone(); +// edges.push(json!({ +// if abbrev { "w" } else { "weight" }: edge.weight.to_f64(), +// if abbrev { "v" } else { "vertices" }: edge.vertices.iter().map(|x| x.upgrade_force().read_recursive().vertex_index).collect::>(), +// if abbrev { "g" } else { "growth" }: edge.growth.to_f64(), +// "gn": edge.growth.numer().to_i64(), +// "gd": edge.growth.denom().to_i64(), +// "un": unexplored.numer().to_i64(), +// "ud": unexplored.denom().to_i64(), +// })); +// } +// json!({ +// "vertices": vertices, +// "edges": edges, +// }) +// } +// } + +// #[cfg(test)] +// mod tests { +// use super::*; +// use crate::decoding_hypergraph::*; +// use crate::example_codes::*; + +// #[test] +// fn dual_module_serial_basics_1() { +// // cargo test dual_module_serial_basics_1 -- --nocapture +// let visualize_filename = "dual_module_serial_basics_1.json".to_string(); +// let weight = 1000; +// let code = CodeCapacityColorCode::new(7, 0.1, weight); +// let mut visualizer = Visualizer::new( +// Some(visualize_data_folder() + visualize_filename.as_str()), +// code.get_positions(), +// true, +// ) +// .unwrap(); +// print_visualize_link(visualize_filename); +// // create dual module +// let model_graph = code.get_model_graph(); +// let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); +// // try to work on a simple syndrome +// let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![3, 12]); +// let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); +// visualizer +// .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // grow them each by half +// let dual_node_3_ptr = interface_ptr.read_recursive().nodes[0].clone(); +// let dual_node_12_ptr = interface_ptr.read_recursive().nodes[1].clone(); +// dual_module.grow_dual_node(&dual_node_3_ptr, Rational::from_usize(weight / 2).unwrap()); +// dual_module.grow_dual_node(&dual_node_12_ptr, Rational::from_usize(weight / 2).unwrap()); +// visualizer +// .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // cluster becomes solved +// dual_module.grow_dual_node(&dual_node_3_ptr, Rational::from_usize(weight / 2).unwrap()); +// dual_module.grow_dual_node(&dual_node_12_ptr, Rational::from_usize(weight / 2).unwrap()); +// visualizer +// .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // the result subgraph +// let subgraph = vec![15, 20]; +// visualizer +// .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) +// .unwrap(); +// } + +// #[test] +// fn dual_module_serial_basics_2() { +// // cargo test dual_module_serial_basics_2 -- --nocapture +// let visualize_filename = "dual_module_serial_basics_2.json".to_string(); +// let weight = 1000; +// let code = CodeCapacityTailoredCode::new(7, 0., 0.1, weight); +// let mut visualizer = Visualizer::new( +// Some(visualize_data_folder() + visualize_filename.as_str()), +// code.get_positions(), +// true, +// ) +// .unwrap(); +// print_visualize_link(visualize_filename); +// // create dual module +// let model_graph = code.get_model_graph(); +// let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); +// // try to work on a simple syndrome +// let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![23, 24, 29, 30]); +// let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); +// visualizer +// .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // grow them each by half +// let dual_node_23_ptr = interface_ptr.read_recursive().nodes[0].clone(); +// let dual_node_24_ptr = interface_ptr.read_recursive().nodes[1].clone(); +// let dual_node_29_ptr = interface_ptr.read_recursive().nodes[2].clone(); +// let dual_node_30_ptr = interface_ptr.read_recursive().nodes[3].clone(); +// dual_module.grow_dual_node(&dual_node_23_ptr, Rational::from_usize(weight / 4).unwrap()); +// dual_module.grow_dual_node(&dual_node_24_ptr, Rational::from_usize(weight / 4).unwrap()); +// dual_module.grow_dual_node(&dual_node_29_ptr, Rational::from_usize(weight / 4).unwrap()); +// dual_module.grow_dual_node(&dual_node_30_ptr, Rational::from_usize(weight / 4).unwrap()); +// visualizer +// .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // the result subgraph +// let subgraph = vec![24]; +// visualizer +// .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) +// .unwrap(); +// } + +// #[test] +// fn dual_module_serial_basics_3() { +// // cargo test dual_module_serial_basics_3 -- --nocapture +// let visualize_filename = "dual_module_serial_basics_3.json".to_string(); +// let weight = 600; // do not change, the data is hard-coded +// let pxy = 0.0602828812732227; +// let code = CodeCapacityTailoredCode::new(7, pxy, 0.1, weight); // do not change probabilities: the data is hard-coded +// let mut visualizer = Visualizer::new( +// Some(visualize_data_folder() + visualize_filename.as_str()), +// code.get_positions(), +// true, +// ) +// .unwrap(); +// print_visualize_link(visualize_filename); +// // create dual module +// let model_graph = code.get_model_graph(); +// let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); +// // try to work on a simple syndrome +// let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![17, 23, 29, 30]); +// let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph, &mut dual_module); +// visualizer +// .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // grow them each by half +// let dual_node_17_ptr = interface_ptr.read_recursive().nodes[0].clone(); +// let dual_node_23_ptr = interface_ptr.read_recursive().nodes[1].clone(); +// let dual_node_29_ptr = interface_ptr.read_recursive().nodes[2].clone(); +// let dual_node_30_ptr = interface_ptr.read_recursive().nodes[3].clone(); +// dual_module.grow_dual_node(&dual_node_17_ptr, Rational::from_i64(160).unwrap()); +// dual_module.grow_dual_node(&dual_node_23_ptr, Rational::from_i64(160).unwrap()); +// dual_module.grow_dual_node(&dual_node_29_ptr, Rational::from_i64(160).unwrap()); +// dual_module.grow_dual_node(&dual_node_30_ptr, Rational::from_i64(160).unwrap()); +// visualizer +// .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // create cluster +// interface_ptr.create_node_vec(&[24], &mut dual_module); +// let dual_node_cluster_ptr = interface_ptr.read_recursive().nodes[4].clone(); +// dual_module.grow_dual_node(&dual_node_17_ptr, Rational::from_i64(160).unwrap()); +// dual_module.grow_dual_node(&dual_node_cluster_ptr, Rational::from_i64(160).unwrap()); +// visualizer +// .snapshot_combined("grow".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // create bigger cluster +// interface_ptr.create_node_vec(&[18, 23, 24, 31], &mut dual_module); +// let dual_node_bigger_cluster_ptr = interface_ptr.read_recursive().nodes[5].clone(); +// dual_module.grow_dual_node(&dual_node_bigger_cluster_ptr, Rational::from_i64(120).unwrap()); +// visualizer +// .snapshot_combined("solved".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // the result subgraph +// let subgraph = vec![82, 24]; +// visualizer +// .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) +// .unwrap(); +// } + +// #[test] +// fn dual_module_serial_find_valid_subgraph_1() { +// // cargo test dual_module_serial_find_valid_subgraph_1 -- --nocapture +// let visualize_filename = "dual_module_serial_find_valid_subgraph_1.json".to_string(); +// let weight = 1000; +// let code = CodeCapacityColorCode::new(7, 0.1, weight); +// let mut visualizer = Visualizer::new( +// Some(visualize_data_folder() + visualize_filename.as_str()), +// code.get_positions(), +// true, +// ) +// .unwrap(); +// print_visualize_link(visualize_filename); +// // create dual module +// let model_graph = code.get_model_graph(); +// let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); +// // try to work on a simple syndrome +// let decoding_graph = DecodingHyperGraph::new_defects(model_graph, vec![3, 12]); +// let interface_ptr = DualModuleInterfacePtr::new_load(decoding_graph.clone(), &mut dual_module); +// visualizer +// .snapshot_combined("syndrome".to_string(), vec![&interface_ptr, &dual_module]) +// .unwrap(); +// // invalid clusters +// assert!(!decoding_graph.is_valid_cluster_auto_vertices(&vec![20].into_iter().collect())); +// assert!(!decoding_graph.is_valid_cluster_auto_vertices(&vec![9, 20].into_iter().collect())); +// assert!(!decoding_graph.is_valid_cluster_auto_vertices(&vec![15].into_iter().collect())); +// assert!(decoding_graph.is_valid_cluster_auto_vertices(&vec![15, 20].into_iter().collect())); +// // the result subgraph +// let subgraph = decoding_graph +// .find_valid_subgraph_auto_vertices(&vec![9, 15, 20, 21].into_iter().collect()) +// .unwrap(); +// visualizer +// .snapshot_combined("subgraph".to_string(), vec![&interface_ptr, &dual_module, &subgraph]) +// .unwrap(); +// } +// } diff --git a/src/example_codes.rs b/src/example_codes.rs index 62a7191a..afc07b41 100644 --- a/src/example_codes.rs +++ b/src/example_codes.rs @@ -362,7 +362,7 @@ pub trait ExampleCode { /// generate random errors based on the edge probabilities and a seed for pseudo number generator #[allow(clippy::unnecessary_cast)] - fn generate_random_errors(&mut self, seed: u64) -> (SyndromePattern, Subgraph) { + fn generate_random_errors(&mut self, seed: u64) -> (SyndromePattern, Vec) { let mut rng = DeterministicRng::seed_from_u64(seed); let (vertices, edges) = self.vertices_edges(); for vertex in vertices.iter_mut() { @@ -878,6 +878,7 @@ impl CodeCapacityColorCode { #[cfg(feature = "qecp_integrate")] #[cfg_attr(feature = "python_binding", cfg_eval)] #[cfg_attr(feature = "python_binding", pyclass)] +#[derive(Debug, Clone)] pub struct QECPlaygroundCode { simulator: qecp::simulator::Simulator, noise_model: std::sync::Arc, @@ -901,7 +902,7 @@ impl ExampleCode for QECPlaygroundCode { } // override simulation function #[allow(clippy::unnecessary_cast)] - fn generate_random_errors(&mut self, seed: u64) -> (SyndromePattern, Subgraph) { + fn generate_random_errors(&mut self, seed: u64) -> (SyndromePattern, Vec) { use qecp::simulator::SimulatorGenerics; let rng = qecp::reproducible_rand::Xoroshiro128StarStar::seed_from_u64(seed); self.simulator.set_rng(rng); @@ -942,8 +943,7 @@ impl ExampleCode for QECPlaygroundCode { } } -#[cfg(feature = "python_binding")] -#[cfg(feature = "qecp_integrate")] +#[cfg(all(feature = "python_binding", feature = "qecp_integrate"))] bind_trait_example_code! {QECPlaygroundCode} #[cfg(feature = "qecp_integrate")] @@ -1154,7 +1154,7 @@ impl ExampleCode for ErrorPatternReader { fn immutable_vertices_edges(&self) -> (&Vec, &Vec) { (&self.vertices, &self.edges) } - fn generate_random_errors(&mut self, _seed: u64) -> (SyndromePattern, Subgraph) { + fn generate_random_errors(&mut self, _seed: u64) -> (SyndromePattern, Vec) { assert!( self.syndrome_index < self.syndrome_patterns.len(), "reading syndrome pattern more than in the file, consider generate the file with more data points" diff --git a/src/heapz/.circleci/config.yml b/src/heapz/.circleci/config.yml new file mode 100644 index 00000000..ff262e18 --- /dev/null +++ b/src/heapz/.circleci/config.yml @@ -0,0 +1,143 @@ +version: 2.1 + +orbs: + rust: circleci/rust@1.6.0 + +executors: + rust-app: + environment: + PIPELINE_NUM: << pipeline.number >> + TZ: "America/Los_Angeles" + docker: + - image: cimg/rust:1.62.1 + + ubuntu: + environment: + PIPELINE_NUM: << pipeline.number >> + TZ: "America/Los_Angeles" + machine: + image: ubuntu-2004:2022.07.1 + docker_layer_caching: true + +workflows: + ci-cd: + jobs: + - check_formatting + - check_wasm_support + - build + - test: + requires: + - build + - benchmark: + requires: + - build + - publish: + filters: + branches: + only: main + requires: + - check_formatting + - check_wasm_support + - build + - test + - check_version: + filters: + branches: + ignore: + - main + +jobs: + benchmark: + executor: rust-app + environment: + MASTER_BRANCH_URL: git@github.com:Ruddickmg/heapz.git + steps: + - checkout + - run: + name: Checkout Master + command: git clone "$MASTER_BRANCH_URL" + - restore_cache: + key: dependency-cache-{{ checksum "Cargo.lock" }} + - restore_cache: + key: benchmark-cache-{{ checksum "heapz/Cargo.lock" }} + - run: + name: run benchmarks + command: cargo bench + - save_cache: + key: benchmark-cache-{{ checksum "Cargo.lock" }} + paths: + - "./target/criterion" + + publish: + executor: rust-app + steps: + - checkout + - run: + name: Login to crates.io + command: cargo login + - run: + name: Verify publish will work + command: cargo publish --dry-run + - run: + name: Publish + command: cargo publish + + build: + executor: rust-app + steps: + - checkout + - restore_cache: + key: dependency-cache-{{ checksum "Cargo.lock" }} + - run: + name: Stable Build + command: cargo build + - save_cache: + key: dependency-cache-{{ checksum "Cargo.lock" }} + paths: + - "~/.cargo" + - "./target" + + check_wasm_support: + executor: rust-app + steps: + - checkout + - run: + name: Add wasm target + command: rustup target add wasm32-unknown-unknown + - run: + name: Verify code can be built in wasm + command: cargo check --target wasm32-unknown-unknown + + check_version: + executor: ubuntu + environment: + MASTER_BRANCH_URL: git@github.com:Ruddickmg/heapz.git + steps: + - checkout + - run: + name: Checkout Master + command: git clone "$MASTER_BRANCH_URL" + - run: + name: Check that version has changed + command: | + export MASTER_VERSION="$(cat ./heapz/Cargo.toml | grep -oP '^version = "\K[^"]+')" + export BRANCH_VERSION="$(cat ./Cargo.toml | grep -oP '^version = "\K[^"]+')" + if [ "$MASTER_VERSION" = "$BRANCH_VERSION" ]; then exit 1; else exit 0; fi + + check_formatting: + executor: rust-app + steps: + - checkout + - run: + name: Check formatting + command: cargo fmt -- --check + + test: + executor: rust-app + steps: + - checkout + - restore_cache: + key: dependency-cache-{{ checksum "Cargo.lock" }} + - run: + name: test + command: cargo test \ No newline at end of file diff --git a/src/heapz/.gitignore b/src/heapz/.gitignore new file mode 100644 index 00000000..92462427 --- /dev/null +++ b/src/heapz/.gitignore @@ -0,0 +1,3 @@ +/target +.idea/* +/.idea diff --git a/src/heapz/Cargo.toml b/src/heapz/Cargo.toml new file mode 100644 index 00000000..03e4e6c5 --- /dev/null +++ b/src/heapz/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "heapz" +version = "1.1.4" +edition = "2021" +license-file = "LICENSE.md" +description = "Heap/Priority Queue implementations" +repository = "https://github.com/Ruddickmg/heapz" +readme = "README.md" +keywords = ["heap", "priority", "queue"] +categories = ["algorithms", "data-structures", "wasm"] + +[dev-dependencies] +rand = "0.8.5" +criterion = "0.4.0" + +[[bench]] +name = "rank_pairing_heap" +harness = false + +[[bench]] +name = "pairing_heap" +harness = false \ No newline at end of file diff --git a/src/heapz/LICENSE.md b/src/heapz/LICENSE.md new file mode 100644 index 00000000..9779850e --- /dev/null +++ b/src/heapz/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Marcus Ruddick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/src/heapz/README.md b/src/heapz/README.md new file mode 100644 index 00000000..702facb7 --- /dev/null +++ b/src/heapz/README.md @@ -0,0 +1,8 @@ +# Heapz + +A collection of heap/priority queue implementations + +### Heap types + +- [Pairing Heap](https://en.wikipedia.org/wiki/Pairing_heap) +- [Ranked Paring Heap](https://skycocoo.github.io/Rank-Pairing-Heap/) diff --git a/src/heapz/benches/pairing_heap.rs b/src/heapz/benches/pairing_heap.rs new file mode 100644 index 00000000..07e10821 --- /dev/null +++ b/src/heapz/benches/pairing_heap.rs @@ -0,0 +1,72 @@ +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Bencher, Criterion}; +use heapz::{Heap, PairingHeap}; + +fn is_empty_benchmark(b: &mut Bencher) { + let mut heap = PairingHeap::min(); + heap.push(black_box(1), black_box(1)); + b.iter(|| heap.is_empty()); +} + +fn size_benchmark(b: &mut Bencher) { + let mut heap = PairingHeap::min(); + heap.push(1, 1); + b.iter(|| heap.size()); +} + +fn push_benchmark(b: &mut Bencher) { + let arr = vec![1, 3, 5, -2, 6, -7, 9, 10, 13, 4, 12, 115, 500, 132, 67, 334]; + b.iter_batched( + || PairingHeap::::min(), + |mut heap| { + arr.iter() + .for_each(|num| heap.push(black_box(*num), black_box(*num))) + }, + BatchSize::SmallInput, + ); +} + +fn top_benchmark(b: &mut Bencher) { + let mut heap = PairingHeap::min(); + heap.push(1, 1); + b.iter(|| { + let _ = heap.top(); + }); +} + +pub fn top_mut_benchmark(b: &mut Bencher) { + let mut heap = PairingHeap::min(); + heap.push(1, 1); + b.iter(|| { + let _ = heap.top_mut(); + }); +} + +pub fn pop_benchmark(b: &mut Bencher) { + b.iter_batched( + || { + let arr = vec![1, 3, 5, -2, 6, -7, 9, 10, 13, 4, 12, 115, 500, 132, 67, 334]; + let mut heap = PairingHeap::min(); + arr.iter() + .for_each(|num| heap.push(black_box(*num), black_box(*num))); + (heap, arr.len()) + }, + |(mut heap, len)| { + for _ in 0..len { + let _ = heap.pop(); + } + }, + BatchSize::SmallInput, + ); +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("PairingHeap.is_empty", is_empty_benchmark); + c.bench_function("PairingHeap.size", size_benchmark); + c.bench_function("PairingHeap.push", push_benchmark); + c.bench_function("PairingHeap.top", top_benchmark); + c.bench_function("PairingHeap.top_mut", top_mut_benchmark); + c.bench_function("PairingHeap.pop", pop_benchmark); +} + +criterion_group!(pairing_heap, criterion_benchmark); +criterion_main!(pairing_heap); diff --git a/src/heapz/benches/rank_pairing_heap.rs b/src/heapz/benches/rank_pairing_heap.rs new file mode 100644 index 00000000..86562c87 --- /dev/null +++ b/src/heapz/benches/rank_pairing_heap.rs @@ -0,0 +1,111 @@ +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Bencher, Criterion}; +use heapz::{DecreaseKey, Heap, RankPairingHeap}; + +fn is_empty_benchmark(b: &mut Bencher) { + let mut heap = RankPairingHeap::multi_pass_min(); + heap.push(black_box(1), black_box(1)); + b.iter(|| heap.is_empty()); +} + +fn size_benchmark(b: &mut Bencher) { + let mut heap = RankPairingHeap::multi_pass_min(); + heap.push(1, 1); + b.iter(|| heap.size()); +} + +fn push_benchmark(b: &mut Bencher) { + let arr = vec![1, 3, 5, -2, 6, -7, 9, 10, 13, 4, 12, 115, 500, 132, 67, 334]; + b.iter_batched( + || RankPairingHeap::::multi_pass_min(), + |mut heap| { + arr.iter() + .for_each(|num| heap.push(black_box(*num), black_box(*num))) + }, + BatchSize::SmallInput, + ); +} + +fn top_benchmark(b: &mut Bencher) { + let mut heap = RankPairingHeap::multi_pass_min(); + heap.push(1, 1); + b.iter(|| { + let _ = heap.top(); + }); +} + +fn top_mut_benchmark(b: &mut Bencher) { + let mut heap = RankPairingHeap::multi_pass_min(); + heap.push(1, 1); + b.iter(|| { + let _ = heap.top_mut(); + }); +} + +fn pop_benchmark(b: &mut Bencher) { + b.iter_batched( + || { + let arr = vec![1, 3, 5, -2, 6, -7, 9, 10, 13, 4, 12, 115, 500, 132, 67, 334]; + let mut heap = RankPairingHeap::multi_pass_min(); + arr.iter() + .for_each(|num| heap.push(black_box(*num), black_box(*num))); + (heap, arr.len()) + }, + |(mut heap, len)| { + for _ in 0..len { + let _ = heap.pop(); + } + }, + BatchSize::SmallInput, + ); +} + +fn update_benchmark(b: &mut Bencher) { + let mut i = 0; + b.iter_batched( + || { + let arr = vec![1, 3, 5, -2, 6, -7, 9, 10, 13, 4, 12, 115, 500, 132, 67, 334]; + let mut heap = RankPairingHeap::multi_pass_min(); + let key = arr[(i % arr.len()) as usize]; + let value = if i % 2 == 0 { -1 } else { 2 }; + arr.iter() + .for_each(|num| heap.push(black_box(*num), black_box(*num))); + i += 1; + (heap, (key, value)) + }, + |(mut heap, (key, value))| heap.update(&key, value), + BatchSize::SmallInput, + ); +} + +fn delete_benchmark(b: &mut Bencher) { + let mut i = 0; + b.iter_batched( + || { + let arr = vec![1, 3, 5, -2, 6, -7, 9, 10, 13, 4, 12, 115, 500, 132, 67, 334]; + let mut heap = RankPairingHeap::multi_pass_min(); + let key = arr[(i % arr.len()) as usize]; + arr.iter() + .for_each(|num| heap.push(black_box(*num), black_box(*num))); + i += 1; + (heap, key) + }, + |(mut heap, key)| { + let _ = heap.delete(&key); + }, + BatchSize::SmallInput, + ); +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("RankPairingHeap.is_empty", is_empty_benchmark); + c.bench_function("RankPairingHeap.size", size_benchmark); + c.bench_function("RankPairingHeap.push", push_benchmark); + c.bench_function("RankPairingHeap.top", top_benchmark); + c.bench_function("RankPairingHeap.top_mut", top_mut_benchmark); + c.bench_function("RankPairingHeap.pop", pop_benchmark); + c.bench_function("RankPairingHeap.update", update_benchmark); + c.bench_function("RankPairingHeap.delete", delete_benchmark); +} + +criterion_group!(rank_pairing_heap, criterion_benchmark); +criterion_main!(rank_pairing_heap); diff --git a/src/heapz/src/lib.rs b/src/heapz/src/lib.rs new file mode 100644 index 00000000..a1b5fd8e --- /dev/null +++ b/src/heapz/src/lib.rs @@ -0,0 +1,203 @@ +#![deny(missing_docs)] +#![deny(rustdoc::missing_doc_code_examples)] + +/*! +A collection of heap/priority queue implementations. + +### Heap types that have been implemented + - [Pairing Heap](https://en.wikipedia.org/wiki/Pairing_heap) + - [Rank Paring Heap](https://skycocoo.github.io/Rank-Pairing-Heap/) +*/ + +mod utils; +use std::hash::Hash; + +mod pairing_heap; +mod rank_pairing_heap; + +pub use pairing_heap::*; +pub use rank_pairing_heap::*; + +/// [`HeapType`] Represents whether a heap/queue is min ([`HeapType::Min`]) or max ([`HeapType::Max`]) priority +#[derive(PartialEq, Copy, Clone, Debug)] +enum HeapType { + /// represents a heap type which prioritizes elements with the maximum value + Max, + /// represents a heap type which prioritizes elements with the minimum value + Min, +} + +/// [`Heap`] contains all the methods common to heaps/queues +pub trait Heap +where + K: Hash + Eq, + V: PartialOrd, +{ + /// Indicates whether a [`Heap`] is empty or not + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// fn check_heap>(mut heap: T) { + /// + /// assert_eq!(heap.is_empty(), true); + /// + /// heap.push("Hello".to_string(), 5); + /// + /// assert_eq!(heap.is_empty(), false); + /// } + /// + /// check_heap(PairingHeap::min()); + /// ``` + fn is_empty(&self) -> bool; + + /// Returns the amount of elements in the [`Heap`] + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// fn check_heap>(mut heap: T) { + /// + /// assert_eq!(heap.size(), 0); + /// + /// heap.push("Hello".to_string(), 5); + /// + /// assert_eq!(heap.size(), 1); + /// } + /// + /// check_heap(PairingHeap::min()); + /// ``` + fn size(&self) -> usize; + + /// Adds an element to the [`Heap`] + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// fn check_heap>(mut heap: T) { + /// + /// let value = "Hello".to_string(); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&value)); + /// } + /// + /// check_heap(PairingHeap::min()); + /// ``` + fn push(&mut self, key: K, value: V); + + /// Returns the highest priority element of a [`Heap`] (or None) + /// + /// ``` + /// use heapz::{PairingHeap, Heap}; + /// fn check_heap>(mut heap: T) { + /// + /// let value = "Hello".to_string(); + /// + /// assert!(heap.top().is_none()); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&value)); + /// } + /// + /// check_heap(PairingHeap::min()); + /// ``` + fn top(&self) -> Option<&K>; + + /// Returns the highest priority element of a [`Heap`] (or None) as mutable + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// fn check_heap>(mut heap: T) { + /// + /// let value = "Hello".to_string(); + /// + /// assert!(heap.top_mut().is_none()); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top_mut(), Some(&mut value.clone())); + /// } + /// + /// check_heap(PairingHeap::min()); + /// ``` + fn top_mut(&mut self) -> Option<&mut K>; + + /// Removes and Returns the highest priority element of a [`Heap`] (or None) + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// fn check_heap>(mut heap: T) { + /// + /// let value = "Hello".to_string(); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.pop(), Some(value.clone())); + /// assert_eq!(heap.pop(), None); + /// } + /// + /// check_heap(PairingHeap::min()); + /// ``` + fn pop(&mut self) -> Option; +} + +/// [`DecreaseKey`] defines extra methods for a [`Heap`] that implement decrease-key and delete operations +pub trait DecreaseKey: Heap +where + K: Hash + Eq, + V: PartialOrd, +{ + /// Updates the priority of an element in the [`Heap`] (or None) + /// + /// ```rust + /// use heapz::{DecreaseKey, RankPairingHeap}; + /// + /// fn check_heap>(mut heap: T) { + /// let hello = "Hello".to_string(); + /// let world = "World".to_string(); + /// + /// heap.push(hello.clone(), 5); + /// heap.push(world.clone(), 2); + /// + /// assert_eq!(heap.top(), Some(&world)); + /// + /// heap.update(&hello, 1); + /// + /// assert_eq!(heap.top(), Some(&hello)); + /// } + /// + /// check_heap(RankPairingHeap::multi_pass_min2()); + /// ``` + fn update(&mut self, key: &K, value: V); + + /// Deletes an element from the [`Heap`] and returns it (or None) + /// + /// ```rust + /// use heapz::{DecreaseKey, RankPairingHeap}; + /// + /// fn check_heap>(mut heap: T) { + /// + /// let hello = "Hello".to_string(); + /// let world = "World".to_string(); + /// + /// heap.push(hello.clone(), 5); + /// heap.push(world.clone(), 2); + /// + /// assert_eq!(heap.top(), Some(&world)); + /// assert_eq!(heap.delete(&hello), Some(hello.clone())); + /// + /// heap.pop(); + /// + /// assert_eq!(heap.top(), None); + /// assert_eq!(heap.delete(&hello), None); + /// } + /// + /// check_heap(RankPairingHeap::multi_pass_min2()); + /// ``` + fn delete(&mut self, key: &K) -> Option; +} diff --git a/src/heapz/src/pairing_heap.rs b/src/heapz/src/pairing_heap.rs new file mode 100644 index 00000000..2b5d1c13 --- /dev/null +++ b/src/heapz/src/pairing_heap.rs @@ -0,0 +1,239 @@ +use crate::{Heap, HeapType}; +use std::hash::Hash; + +type BoxedNode = Box>; + +#[derive(Debug)] +struct Node { + pub value: V, + pub key: K, + left: Option>, + next: Option>, +} + +impl Node { + pub fn new(key: K, value: V) -> Self { + Node { + key, + value, + left: None, + next: None, + } + } + pub fn set_left(&mut self, node: Option>) { + self.left = node; + } + pub fn set_next(&mut self, node: Option>) { + self.next = node; + } +} + +/** +[`PairingHeap`] is an implementation of a [pairing heap](https://en.wikipedia.org/wiki/Pairing_heap). + +It can have either a min or max [`HeapType`] and is implemented using a pattern similar to [singly linked lists](https://en.wikipedia.org/wiki/Linked_list#Singly_linked_list) + */ +pub struct PairingHeap { + root: Option>, + heap_type: HeapType, + size: usize, +} + +impl PairingHeap { + /// Initializes a min priority ([`HeapType::Min`]) [`PairingHeap`] + /// + /// ```rust + /// use heapz::PairingHeap; + /// + /// let heap: PairingHeap<(usize, usize), i32> = PairingHeap::min(); + /// ``` + pub fn min() -> Self { + Self::new(HeapType::Min) + } + + /// Initializes a max priority ([`HeapType::Max`]) [`PairingHeap`] + /// + /// ```rust + /// use heapz::PairingHeap; + /// + /// let heap: PairingHeap<(usize, usize), i32> = PairingHeap::max(); + /// ``` + pub fn max() -> Self { + Self::new(HeapType::Max) + } + + fn new(heap_type: HeapType) -> Self { + PairingHeap { + root: None, + heap_type, + size: 0, + } + } + + fn compare(&self, a: &BoxedNode, b: &BoxedNode) -> bool { + match self.heap_type { + HeapType::Max => a.value >= b.value, + HeapType::Min => a.value <= b.value, + } + } + + fn add_child(mut parent: BoxedNode, mut child: BoxedNode) -> BoxedNode { + if parent.left.is_some() { + child.set_next(parent.left.take()); + } + parent.set_left(Some(child)); + parent + } + + fn merge( + &mut self, + node_a: Option>, + node_b: Option>, + ) -> Option> { + match (node_a, node_b) { + (Some(a), Some(b)) => Some(if self.compare(&a, &b) { + Self::add_child(a, b) + } else { + Self::add_child(b, a) + }), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(b), + _ => None, + } + } + + fn two_pass_merge(&mut self, node: Option>) -> Option> { + let mut root = node; + let mut merged: Option> = None; + + while let Some(mut parent) = root { + if let Some(mut child) = parent.next.take() { + root = child.next.take(); + let children = self.merge(Some(parent), Some(child)); + merged = self.merge(merged, children); + } else { + merged = self.merge(merged, Some(parent)); + root = None; + } + } + merged + } +} + +impl Heap for PairingHeap { + /// Indicates whether a [`PairingHeap`] is empty or not + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// let mut heap = PairingHeap::min(); + /// + /// assert_eq!(heap.is_empty(), true); + /// + /// heap.push("Hello".to_string(), 5); + /// + /// assert_eq!(heap.is_empty(), false); + /// ``` + fn is_empty(&self) -> bool { + self.root.is_none() + } + + /// Returns the amount of elements in the [`PairingHeap`] + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// let mut heap = PairingHeap::max(); + /// + /// assert_eq!(heap.size(), 0); + /// + /// heap.push("Hello".to_string(), 5); + /// + /// assert_eq!(heap.size(), 1); + /// ``` + fn size(&self) -> usize { + self.size.clone() + } + + /// Adds an element to the [`PairingHeap`] + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// let mut heap = PairingHeap::min(); + /// let value = "Hello".to_string(); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&value)); + /// ``` + fn push(&mut self, key: K, value: V) { + self.root = if self.root.is_some() { + let root = self.root.take(); + self.merge(root, Some(Box::new(Node::new(key, value)))) + } else { + Some(Box::new(Node::new(key, value))) + }; + self.size += 1; + } + + /// Returns the highest priority element of a [`PairingHeap`] (or None) + /// + /// ``` + /// use heapz::{PairingHeap, Heap}; + /// + /// let value = "Hello".to_string(); + /// let mut heap = PairingHeap::max(); + /// + /// assert!(heap.top().is_none()); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&value)); + /// ``` + fn top(&self) -> Option<&K> { + self.root.as_ref().map(|node| &node.key) + } + + /// Returns the highest priority element of a [`PairingHeap`] (or None) as mutable + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// let value = "Hello".to_string(); + /// let mut heap = PairingHeap::min(); + /// + /// assert!(heap.top_mut().is_none()); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top_mut(), Some(&mut value.clone())); + /// ``` + fn top_mut(&mut self) -> Option<&mut K> { + self.root.as_mut().map(|node| &mut node.key) + } + + /// Removes and Returns the highest priority element of a [`PairingHeap`] (or None) + /// + /// ```rust + /// use heapz::{PairingHeap, Heap}; + /// + /// let value1 = "Hello".to_string(); + /// let value2 = "World".to_string(); + /// let mut heap = PairingHeap::max(); + /// + /// heap.push(value1.clone(), 5); + /// heap.push(value2.clone(), 4); + /// + /// assert_eq!(heap.pop(), Some(value1.clone())); + /// assert_eq!(heap.pop(), Some(value2.clone())); + /// assert_eq!(heap.pop(), None); + /// ``` + fn pop(&mut self) -> Option { + self.root.take().map(|mut node| { + self.size -= 1; + self.root = self.two_pass_merge(node.left.take()); + node.key + }) + } +} diff --git a/src/heapz/src/rank_pairing_heap.rs b/src/heapz/src/rank_pairing_heap.rs new file mode 100644 index 00000000..2ddf1aa7 --- /dev/null +++ b/src/heapz/src/rank_pairing_heap.rs @@ -0,0 +1,912 @@ +use crate::utils::Bucket; +use crate::{DecreaseKey, Heap, HeapType}; +use std::{ + cmp::{max, Eq}, + collections::HashMap, + hash::Hash, +}; + +/**! +[`HeapRank`] represents which algorithm will be used to calculate the rank of a node/tree +*/ +#[derive(PartialEq, Clone, Debug)] +enum HeapRank { + /// [`HeapRank::One`] has larger constant factors in the time bounds than [`HeapRank::Two`] but is simpler + One, + /// [`HeapRank::Two`] has smaller constant factors in the time bounds than [`HeapRank::One`] + Two, +} + +/**! +[`HeapPasses`] represent how many passes will be made when restructuring a [`RankPairingHeap`] + +[Rank pairing heaps]() use a list of trees that can be combined if they have identical size (rank). +Combining all trees of identical size (rank) takes multiple passes but is not required for the [`RankPairingHeap`] to work. +*/ +#[derive(PartialEq, Clone, Debug)] +enum HeapPasses { + /// A single pass will cause the heap to restructure the heap lazily, only iterating over each node a single time and combining any nodes with matching size/ranks. + Single, + + /// Multiple passes restructure the heap eagerly, merging trees repeatedly until no two trees have matching size/rank. + Multi, +} + +type Position = Option; + +#[derive(Clone, Debug)] +struct Node { + key: K, + value: V, + left: Position, + next: Position, + parent: Position, + rank: usize, + root: bool, +} + +impl Node { + pub fn new(key: K, value: V) -> Self { + Node { + key, + value, + left: None, + next: None, + parent: None, + rank: 0, + root: true, + } + } +} + +/** +[`RankPairingHeap`] is an implementation of a [rank pairing heap](https://skycocoo.github.io/Rank-Pairing-Heap/) + +Due to the [difficulty](https://rcoh.me/posts/rust-linked-list-basically-impossible/) in creating [doubly linked lists](https://en.wikipedia.org/wiki/Doubly_linked_list) using safe rust, this [rank pairing heap](https://skycocoo.github.io/Rank-Pairing-Heap/) implementation uses an array to store nodes and uses their indices as pointers. + +[rank pairing heaps](https://skycocoo.github.io/Rank-Pairing-Heap/) have a few variations on how their ranks are calculated, how the heap is restructured and the order in which priority is determined. +To address these different options there are three properties that can be set in any combination for the [`RankPairingHeap`]: [`HeapType`], [`HeapRank`] and [`HeapPasses`] + */ +pub struct RankPairingHeap { + root: Position, + heap_rank: HeapRank, + heap_type: HeapType, + passes: HeapPasses, + list: Vec>, + keys: HashMap, +} + +// impelement clone +impl Clone for RankPairingHeap { + fn clone(&self) -> Self { + RankPairingHeap { + root: self.root, + heap_rank: self.heap_rank.clone(), + heap_type: self.heap_type, + passes: self.passes.clone(), + list: self.list.clone(), + keys: self.keys.clone(), + } + } +} + +// implement Debug +impl std::fmt::Debug + for RankPairingHeap +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RankPairingHeap") + .field("root", &self.root) + .field("heap_rank", &self.heap_rank) + .field("heap_type", &self.heap_type) + .field("passes", &self.passes) + .field("list", &self.list) + .field("keys", &self.keys) + .finish() + } +} + +// struct initialization +impl RankPairingHeap { + fn new(heap_type: HeapType, heap_rank: HeapRank, passes: HeapPasses) -> Self { + RankPairingHeap { + root: None, + heap_rank, + heap_type, + passes, + list: vec![], + keys: HashMap::new(), + } + } + + /// Initializes a max ([`HeapType::Max`]) heap using [`HeapRank::One`] and [`HeapPasses::Single`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::single_pass_max(); + /// ``` + pub fn single_pass_max() -> Self { + Self::new(HeapType::Max, HeapRank::One, HeapPasses::Single) + } + + /// Initializes a max ([`HeapType::Max`]) heap using [`HeapRank::Two`] and [`HeapPasses::Single`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::single_pass_max2(); + /// ``` + pub fn single_pass_max2() -> Self { + Self::new(HeapType::Max, HeapRank::Two, HeapPasses::Single) + } + + /// Initializes a min ([`HeapType::Min`]) heap using [`HeapRank::One`] and [`HeapPasses::Single`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::single_pass_min(); + /// ``` + pub fn single_pass_min() -> Self { + Self::new(HeapType::Min, HeapRank::One, HeapPasses::Single) + } + + /// Initializes a min ([`HeapType::Min`]) heap using [`HeapRank::Two`] and [`HeapPasses::Single`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::single_pass_min2(); + /// ``` + pub fn single_pass_min2() -> Self { + Self::new(HeapType::Min, HeapRank::Two, HeapPasses::Single) + } + + /// Initializes a min ([`HeapType::Max`]) heap using [`HeapRank::One`] and [`HeapPasses::Multi`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::multi_pass_max(); + /// ``` + pub fn multi_pass_max() -> Self { + Self::new(HeapType::Max, HeapRank::One, HeapPasses::Multi) + } + + /// Initializes a min ([`HeapType::Max`]) heap using [`HeapRank::Two`] and [`HeapPasses::Multi`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::multi_pass_max2(); + /// ``` + pub fn multi_pass_max2() -> Self { + Self::new(HeapType::Max, HeapRank::Two, HeapPasses::Multi) + } + + /// Initializes a min ([`HeapType::Min`]) heap using [`HeapRank::One`] and [`HeapPasses::Multi`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::multi_pass_min(); + /// ``` + pub fn multi_pass_min() -> Self { + Self::new(HeapType::Min, HeapRank::One, HeapPasses::Multi) + } + + /// Initializes a min ([`HeapType::Min`]) heap using [`HeapRank::Two`] and [`HeapPasses::Multi`] + /// + /// ```rust + /// use heapz::RankPairingHeap; + /// + /// let heap: RankPairingHeap<(usize, usize), i32> = RankPairingHeap::multi_pass_max2(); + /// ``` + pub fn multi_pass_min2() -> Self { + Self::new(HeapType::Min, HeapRank::Two, HeapPasses::Multi) + } +} + +// Ranking +impl RankPairingHeap +where + K: Hash + Eq + Clone + std::fmt::Debug, + V: PartialOrd + Clone + std::fmt::Debug, +{ + fn rank1(left: i32, next: i32) -> i32 { + if left != next { + max(left, next) + } else { + left + 1 + } + } + + fn rank2(left: i32, next: i32) -> i32 { + max(left, next) + (if (&left as &i32 - &next as &i32).abs() > 1 { 0 } else { 1 }) + } + + fn rank(&self, left: i32, next: i32) -> usize { + (match self.heap_rank { + HeapRank::One => Self::rank1(left, next), + HeapRank::Two => Self::rank2(left, next), + }) as usize + } + + fn rank_nodes(&self, left: Position, next: Position) -> usize { + let left_rank = self.get_rank(left); + let right_rank = self.get_rank(next); + self.rank(left_rank, right_rank) + } + + fn get_rank(&self, position: Position) -> i32 { + if let Some(n) = self.get_node(position) { + n.rank as i32 + } else { + 0 - 1 + } + } +} + +// storage interaction +impl RankPairingHeap +where + K: Hash + Eq + Clone + std::fmt::Debug, + V: PartialOrd + Clone + std::fmt::Debug, +{ + fn get_node(&self, position: Position) -> Option<&Node> { + position.map(|index| self.list.get(index)).unwrap_or(None) + } + + fn get_node_mut(&mut self, position: Position) -> Option<&mut Node> { + if let Some(index) = position { + self.list.get_mut(index) + } else { + None + } + } + + fn remove_array_node(&mut self, position: Position) -> Option> { + self.get_node(self.last_position()).map(|node| node.key.clone()).map(|key| { + self.keys.remove(&key); + self.keys.insert(key, position); + }); + position.map(|index| self.list.swap_remove(index)) + } + + fn add_node(&mut self, node: Node) -> Position { + let position = Some(self.list.len()); + self.keys.insert(node.key.clone(), position); + self.list.push(node); + position + } + + fn get_position(&self, key: &K) -> Position { + self.keys.get(key).cloned().unwrap_or(None) + } +} + +// utility functions +impl RankPairingHeap { + fn last_position(&self) -> Position { + let size = self.size(); + if size > 0 { + Some(size - 1) + } else { + None + } + } + + fn is_left(&self, position: Position, parent: Position) -> bool { + self.get_node(parent).map(|parent| parent.left == position).unwrap_or(false) + } + + fn is_root(&self, position: Position) -> bool { + self.get_node(position).map(|node| node.root).unwrap_or(false) + } + + fn get_value(&self, position: Position) -> Option<&V> { + self.get_node(position).map(|node| &node.value) + } + + fn get_key(&self, position: Position) -> Option<&K> { + self.get_node(position).map(|node| &node.key) + } + + fn get_index) -> Position>(&self, index: Position, get_adjacent: F) -> Position { + self.get_node(index).map(get_adjacent).unwrap_or(None) + } + + fn get_left_index(&self, index: Position) -> Position { + self.get_index(index, |node| node.left) + } + + fn get_next_index(&self, index: Position) -> Position { + self.get_index(index, |node| node.next) + } + + fn get_parent_index(&self, index: Position) -> Position { + self.get_index(index, |node| node.parent) + } + + fn get_links(&self, position: Position) -> Option<(Position, Position, Position)> { + self.get_node(position).map(|node| (node.parent, node.left, node.next)) + } + + fn get_siblings(&self, position: Position) -> Option<(Position, Position)> { + self.get_links(position).map(|(parent, _, next)| (parent, next)) + } + + fn set_next(&mut self, parent: Position, next: Position) { + self.get_node_mut(parent).map(|node| { + node.next = next; + }); + } + + fn set_left(&mut self, parent: Position, left: Position) { + self.get_node_mut(parent).map(|node| { + node.left = left; + }); + } + + fn set_parent(&mut self, child: Position, parent: Position) { + self.get_node_mut(child).map(|node| { + node.parent = parent; + }); + } + + fn link_next(&mut self, parent: Position, next: Position) { + self.set_next(parent, next); + self.set_parent(next, parent); + } + + fn link_left(&mut self, parent: Position, left: Position) { + self.set_left(parent, left); + self.set_parent(left, parent); + } + + fn compare_values(&self, value_a: T, value_b: T) -> bool { + if self.heap_type == HeapType::Max { + value_a > value_b + } else { + value_a < value_b + } + } + + fn compare(&self, a: Position, b: Position) -> bool { + self.get_value(a) + .zip(self.get_value(b)) + .map_or(false, |(value_a, value_b)| self.compare_values(value_a, value_b)) + } + + fn merge_trees(&mut self, node_a: Position, node_b: Position) -> Position { + assert_ne!(node_a, node_b); + let a = self.get_node_mut(node_a).unwrap() as *mut Node; + let b = self.get_node_mut(node_b).unwrap() as *mut Node; + let mut parent: Position; + let mut child: Position; + unsafe { + let mut parent_node: *mut Node; + let mut child_node: *mut Node; + let node_a_is_parent = if self.heap_type == HeapType::Max { + (*a).value > (*b).value + } else { + (*a).value < (*b).value + }; + if node_a_is_parent { + parent = node_a; + child = node_b; + parent_node = a; + child_node = b; + } else { + parent = node_b; + child = node_a; + parent_node = b; + child_node = a; + } + let left_of_parent = (*parent_node).left; + (*parent_node).left = child; + (*parent_node).rank = (*child_node).rank + 1; + (*child_node).parent = parent; + (*child_node).next = left_of_parent; + (*child_node).root = false; + self.set_parent(left_of_parent, child); + } + parent + } + + fn link(&mut self, node_a: Position, node_b: Position) -> Position { + if node_b != node_a { + match (node_a, node_b) { + (Some(_), Some(_)) => self.merge_trees(node_a, node_b), + (Some(_), None) => node_a, + (None, Some(_)) => node_b, + _ => None, + } + } else { + node_a.or(node_b) + } + } + + fn calculate_swapped_positions(position: Position, parent: Position, next: Position, removed: Position) -> Position { + if parent == position { + if next == position { + position + } else { + removed + } + } else { + parent + } + } + + fn swap_remove_with_tree(&mut self, position: Position) -> Option> { + let last = self.last_position(); + self.get_links(last) + .map(|(parent_of_last, left_of_last, next_of_last)| { + self.remove_array_node(position).map(|removed| { + if removed.next != position { + self.link_next(removed.parent, removed.next); + if last != position { + let parent = + Self::calculate_swapped_positions(position, parent_of_last, next_of_last, removed.parent); + let next = + Self::calculate_swapped_positions(position, next_of_last, parent_of_last, removed.next); + self.get_node_mut(position).map(|node| { + node.parent = parent; + node.next = next; + node.left = left_of_last; + }); + self.set_next(parent, position); + vec![next, left_of_last] + .into_iter() + .for_each(|sibling| self.set_parent(sibling, position)); + } else { + self.link_left(position, left_of_last); + } + } + removed + }) + }) + .unwrap_or(None) + } + + fn get_next_root(&mut self, position: Position) -> Position { + let last = self.last_position(); + if let Some((linked_to_self, next)) = self.get_node(position).map(|node| (node.next == position, node.next)) { + if linked_to_self { + None + } else if next == last { + position + } else { + next + } + } else { + None + } + } + + fn swap_remove_with_branch(&mut self, position: Position) -> Option> { + let last = self.last_position(); + self.get_links(last) + .map(|(parent, left, next)| { + let is_left = self.is_left(last, parent); + self.remove_array_node(position).map(|mut removed| { + self.link_next(removed.parent, removed.next); + let parent_of_last = if removed.left == last { + removed.left = position; + last + } else { + parent + }; + self.get_node_mut(position).map(|node| { + node.left = left; + node.next = next; + node.parent = parent_of_last; + }); + self.set_parent(left, position); + self.set_parent(next, position); + self.get_node_mut(parent_of_last).map(|node| { + if is_left { + node.left = position; + } else { + node.next = position; + } + }); + removed + }) + }) + .unwrap_or(None) + } + + fn remove(&mut self, position: Position) -> Option> { + if self.is_root(self.last_position()) { + self.swap_remove_with_tree(position) + } else { + self.swap_remove_with_branch(position) + } + } + + fn single_pass(&mut self, mut node: Position) -> Position { + let mut bucket = Bucket::new(self.size()); + let mut root = None; + while node.is_some() { + let (rank, next, parent) = self + .get_node_mut(node) + .map(|n| { + let parent = n.parent; + let next = n.next; + n.parent = None; + n.next = None; + (n.rank as usize, next, parent) + }) + .unwrap(); + self.link_next(parent, next); + if let Some(matched) = bucket.remove(rank) { + let linked = self.link(node, matched); + root = self.add_root_to_list(linked, root); + } else { + bucket.insert(rank, node); + } + node = next; + } + bucket.drain().fold(root, |list, node| self.add_root_to_list(node, list)) + } + + fn multi_pass(&mut self, mut node: Position) -> Position { + let mut bucket: Bucket = Bucket::new(self.size()); + let mut root = None; + while node.is_some() { + let (mut rank, next, parent) = self + .get_node_mut(node) + .map(|n| { + let parent = n.parent; + let next = n.next; + n.parent = None; + n.next = None; + (n.rank as usize, next, parent) + }) + .unwrap(); + self.link_next(parent, next); + if let Some(matched) = bucket.remove(rank) { + let (parent, next) = self + .get_node_mut(matched) + .map(|n| { + let parent = n.parent; + let next = n.next; + if root == matched { + root = if next == matched && parent == matched { None } else { next } + } + n.next = None; + n.parent = None; + (parent, next) + }) + .unwrap(); + self.link_next(parent, next); + node = self.link(node, matched); + rank += 1; + } + if bucket.contains_key(rank) { + self.link_next(node, next); + } else { + bucket.insert(rank, node); + root = self.add_root_to_list(node, root); + node = next; + } + } + root + } + + fn combine_ranks(&mut self, node: Position) -> Position { + if self.passes == HeapPasses::Single { + self.single_pass(node) + } else { + self.multi_pass(node) + } + } + + fn add_root_to_list(&mut self, root: Position, list: Position) -> Position { + if list.is_some() && root.is_some() { + let root_node = self.get_node_mut(root).unwrap() as *mut Node; + let list_node = self.get_node_mut(list).unwrap() as *mut Node; + unsafe { + let is_new_root = if self.heap_type == HeapType::Max { + (*root_node).value > (*list_node).value + } else { + (*root_node).value < (*list_node).value + }; + let mut parent = (*list_node).parent; + let mut next = (*list_node).next; + parent = if is_new_root { parent } else { list }; + next = if is_new_root { list } else { next }; + self.set_next(parent, root); + (*root_node).root = true; + (*root_node).next = next; + (*root_node).parent = parent; + self.set_parent(next, root); + if is_new_root { + root + } else { + list + } + } + } else { + self.get_node_mut(root).map(|node| { + node.root = true; + node.next = root; + node.parent = root; + }); + root + } + } + + fn concatenate_lists(&mut self, head_list: Position, tail_list: Position) -> Position { + let tail = self + .get_node_mut(head_list) + .map(|node| { + let parent = node.parent; + node.parent = None; + parent + }) + .unwrap_or(None); + self.link_next(tail, tail_list); + head_list.or(tail_list) + } + + fn unlink_tree(&mut self, position: Position, mut parent: Position, next: Position) { + let mut rank = self + .get_node_mut(next) + .map(|node| { + node.parent = parent; + node.rank + 1 + }) + .unwrap_or(0); + + parent = self + .get_node_mut(parent) + .map(|node| { + if node.left == position { + node.left = next; + } else { + node.next = next; + } + node.rank = rank; + if node.root { + None + } else { + node.parent + } + }) + .unwrap_or(None); + + while parent.is_some() { + rank += 1; + parent = self + .get_node_mut(parent) + .map(|node| { + node.rank = rank; + if node.root { + None + } else { + node.parent + } + }) + .unwrap_or(None); + } + } +} + +impl Heap for RankPairingHeap +where + K: Hash + Eq + Clone + std::fmt::Debug, + V: PartialOrd + Clone + std::fmt::Debug, +{ + /// Indicates whether a [`RankPairingHeap`] is empty or not + /// + /// ```rust + /// use heapz::{RankPairingHeap, Heap}; + /// + /// let mut heap = RankPairingHeap::multi_pass_min(); + /// + /// assert_eq!(heap.is_empty(), true); + /// + /// heap.push("Hello".to_string(), 5); + /// + /// assert_eq!(heap.is_empty(), false); + /// ``` + fn is_empty(&self) -> bool { + self.list.is_empty() + } + + /// Returns the amount of elements in the [`RankPairingHeap`] + /// + /// ```rust + /// use heapz::{RankPairingHeap, Heap}; + /// + /// let mut heap = RankPairingHeap::multi_pass_max2(); + /// + /// assert_eq!(heap.size(), 0); + /// + /// heap.push("Hello".to_string(), 5); + /// + /// assert_eq!(heap.size(), 1); + /// ``` + fn size(&self) -> usize { + self.list.len() + } + + /// Adds an element to the [`RankPairingHeap`] + /// + /// ```rust + /// use heapz::{RankPairingHeap, Heap}; + /// + /// let mut heap = RankPairingHeap::multi_pass_min(); + /// let value = "Hello".to_string(); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&value)); + /// ``` + fn push(&mut self, key: K, value: V) { + let node = Node::new(key, value); + let position = self.add_node(node); + self.root = self.add_root_to_list(position, self.root); + } + + /// Returns the highest priority element of a [`RankPairingHeap`] (or None) + /// + /// ``` + /// use heapz::{RankPairingHeap, Heap}; + /// + /// let value = "Hello".to_string(); + /// let mut heap = RankPairingHeap::multi_pass_min2(); + /// + /// assert!(heap.top().is_none()); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&value)); + /// ``` + fn top(&self) -> Option<&K> { + self.get_key(self.root) + } + + /// Returns the highest priority element of a [`RankPairingHeap`] (or None) as mutable + /// + /// ```rust + /// use heapz::{RankPairingHeap, Heap}; + /// + /// let value = "Hello".to_string(); + /// let mut heap = RankPairingHeap::single_pass_min(); + /// + /// assert!(heap.top_mut().is_none()); + /// + /// heap.push(value.clone(), 5); + /// + /// assert_eq!(heap.top_mut(), Some(&mut value.clone())); + /// ``` + fn top_mut(&mut self) -> Option<&mut K> { + self.get_node_mut(self.root).map(|node| &mut node.key) + } + + /// Removes and Returns the highest priority element of a [`RankPairingHeap`] (or None) + /// + /// ```rust + /// use heapz::{RankPairingHeap, Heap}; + /// + /// let value1 = "Hello".to_string(); + /// let value2 = "World".to_string(); + /// let mut heap = RankPairingHeap::single_pass_min2(); + /// + /// heap.push(value1.clone(), 4); + /// heap.push(value2.clone(), 5); + /// + /// assert_eq!(heap.pop(), Some(value1.clone())); + /// assert_eq!(heap.pop(), Some(value2.clone())); + /// assert_eq!(heap.pop(), None); + /// ``` + fn pop(&mut self) -> Option { + let root = self.root; + if root.is_some() { + let next_root = self.get_next_root(root); + self.remove(root).map(|removed| { + let head = self.concatenate_lists(next_root, removed.left); + self.root = self.combine_ranks(head); + removed.key + }) + } else { + None + } + } +} + +impl DecreaseKey for RankPairingHeap +where + K: Hash + Eq + Clone + std::fmt::Debug, + V: PartialOrd + Clone + std::fmt::Debug, +{ + /// Updates the priority of an element in the [`RankPairingHeap`] (or None) + /// + /// ```rust + /// use heapz::{DecreaseKey, Heap, RankPairingHeap}; + /// + /// let mut heap = RankPairingHeap::single_pass_max(); + /// let hello = "Hello".to_string(); + /// let world = "World".to_string(); + /// + /// heap.push(hello.clone(), 2); + /// heap.push(world.clone(), 5); + /// + /// assert_eq!(heap.top(), Some(&world)); + /// + /// heap.update(&hello, 6); + /// + /// assert_eq!(heap.top(), Some(&hello)); + /// ``` + fn update(&mut self, key: &K, value: V) { + let position = self.get_position(key); + let heap_type = self.heap_type; + self.get_node_mut(position) + .map(|node| { + let can_update = if heap_type == HeapType::Max { + value > node.value + } else { + value < node.value + }; + if can_update { + node.value = value; + } + (node.root, can_update, node.left, node.parent, node.next) + }) + .map(|(is_root, can_update, left, parent, next)| { + if can_update { + if is_root { + if self.compare(position, self.root) { + self.root = position; + } + } else { + let rank = (self.get_rank(left) + 1) as usize; + self.get_node_mut(position).map(|node| { + node.rank = rank; + }); + self.unlink_tree(position, parent, next); + self.root = self.add_root_to_list(position, self.root); + } + } + }); + } + + /// Deletes an element from the [`RankPairingHeap`] and returns it (or None) + /// + /// ```rust + /// use heapz::{DecreaseKey, Heap, RankPairingHeap}; + /// + /// let mut heap = RankPairingHeap::single_pass_max2(); + /// let hello = "Hello".to_string(); + /// let world = "World".to_string(); + /// + /// heap.push(hello.clone(), 2); + /// heap.push(world.clone(), 6); + /// + /// assert_eq!(heap.top(), Some(&world)); + /// assert_eq!(heap.delete(&hello), Some(hello.clone())); + /// + /// heap.pop(); + /// + /// assert_eq!(heap.top(), None); + /// assert_eq!(heap.delete(&hello), None); + /// ``` + fn delete(&mut self, key: &K) -> Option { + let position = self.get_position(key); + self.get_node(position) + .map(|node| (node.root, node.parent, node.next)) + .map(|(is_root, parent, next)| { + if !is_root { + self.unlink_tree(position, parent, next); + self.add_root_to_list(position, self.root); + } + }); + self.root = position; + self.pop() + } +} diff --git a/src/heapz/src/utils/bucket.rs b/src/heapz/src/utils/bucket.rs new file mode 100644 index 00000000..cb2f9687 --- /dev/null +++ b/src/heapz/src/utils/bucket.rs @@ -0,0 +1,34 @@ +use crate::utils::math::log; + +pub struct Bucket { + store: Vec>, +} + +impl Bucket { + pub fn new(size: usize) -> Self { + let fill_size = + (((if size > 0 { log(size) } else { 0 }) + 1) as f32 * 1.4).floor() as usize; + Bucket { + store: vec![vec![]; fill_size], + } + } + + pub fn insert(&mut self, key: usize, value: V) { + self.store[key].push(value); + } + + pub fn contains_key(&self, key: usize) -> bool { + !self.store[key].is_empty() + } + + pub fn remove(&mut self, key: usize) -> Option { + self.store[key].pop() + } + + pub fn drain(self) -> impl Iterator { + self.store + .into_iter() + .filter(|bucket| !bucket.is_empty()) + .map(|mut bucket| bucket.pop().unwrap()) + } +} diff --git a/src/heapz/src/utils/math.rs b/src/heapz/src/utils/math.rs new file mode 100644 index 00000000..2655f24d --- /dev/null +++ b/src/heapz/src/utils/math.rs @@ -0,0 +1,26 @@ +const fn num_bits() -> usize { + std::mem::size_of::() * 8 +} + +pub fn log(x: usize) -> u32 { + num_bits::() as u32 - (x as i32).leading_zeros() - 1 +} + +#[cfg(test)] +mod log { + use super::log; + + #[test] + fn returns_log_of_numbers_greater_than_zero() { + assert_eq!(log(1), 0); + assert_eq!(log(2), 1); + assert_eq!(log(4), 2); + assert_eq!(log(8), 3); + assert_eq!(log(16), 4); + assert_eq!(log(32), 5); + assert_eq!(log(64), 6); + assert_eq!(log(128), 7); + assert_eq!(log(256), 8); + assert_eq!(log(512), 9); + } +} diff --git a/src/heapz/src/utils/mod.rs b/src/heapz/src/utils/mod.rs new file mode 100644 index 00000000..5147313e --- /dev/null +++ b/src/heapz/src/utils/mod.rs @@ -0,0 +1,4 @@ +pub mod bucket; +pub mod math; + +pub use bucket::*; diff --git a/src/heapz/tests/common/mod.rs b/src/heapz/tests/common/mod.rs new file mode 100644 index 00000000..538cebce --- /dev/null +++ b/src/heapz/tests/common/mod.rs @@ -0,0 +1,389 @@ +extern crate heapz; + +use heapz::{DecreaseKey, Heap}; +use rand; +use rand::Rng; + +#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)] +pub enum Element { + Target, + Node, +} + +fn generate_numbers() -> Vec { + let size = 1000; + let mut rng = rand::thread_rng(); + (0..size).map(|_| rng.gen::()).collect() +} + +pub mod pop { + use super::{generate_numbers, Element, Heap}; + use std::cmp::{max, min}; + + pub fn returns_the_first_value_from_min_heap>(mut heap: T) { + let numbers = generate_numbers(); + let mut smallest = numbers[0]; + numbers.into_iter().for_each(|n| { + smallest = min(smallest, n); + let _ = &mut heap.push(n, n); + }); + assert_eq!(heap.pop(), Some(smallest)); + } + + pub fn returns_the_first_value_from_max_heap>(mut heap: T) { + let numbers = generate_numbers(); + let mut largest = numbers[0]; + numbers.into_iter().for_each(|n| { + largest = max(largest, n); + let _ = &mut heap.push(n, n); + }); + assert_eq!(heap.pop(), Some(largest)); + } + + pub fn removes_the_first_value_from_min_heap>(mut heap: T) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + cloned.sort_by(|a, b| b.cmp(a)); + let _ = cloned.pop(); + let _ = heap.pop(); + assert_eq!(heap.top(), cloned.get(cloned.len() - 1)); + } + + pub fn removes_the_first_value_from_max_heap>(mut heap: T) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + let mut largest = numbers[0]; + let mut second_largest = largest; + cloned.sort_by(|a, b| a.cmp(b)); + numbers.into_iter().for_each(|n| { + second_largest = largest; + largest = max(largest, n); + let _ = &mut heap.push(n, n); + }); + let _ = cloned.pop(); + let _ = heap.pop(); + assert_eq!(heap.top(), cloned.get(cloned.len() - 1)); + } + + pub fn returns_none_if_the_heap_is_empty>(mut heap: T) { + assert_eq!(heap.pop(), None); + } + + pub fn returns_all_elements_from_smallest_to_largest_in_a_min_heap>( + mut heap: T, + ) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + cloned.sort_by(|a, b| b.cmp(a)); + while !cloned.is_empty() { + assert_eq!(heap.pop(), cloned.pop()); + } + assert_eq!(heap.pop(), None); + } + + pub fn returns_all_elements_from_largest_to_smallest_in_a_max_heap>( + mut heap: T, + ) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + cloned.sort_by(|a, b| a.cmp(b)); + while !cloned.is_empty() { + assert_eq!(heap.pop(), cloned.pop()); + } + assert_eq!(heap.pop(), None); + } +} + +pub mod push { + use super::{Element, Heap}; + + pub fn adds_a_value_to_the_heap>(mut heap: T) { + let value = 1; + let key = Element::Target; + heap.push(key, value); + assert_eq!(heap.top(), Some(&key)); + } + + pub fn adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap>( + mut heap: T, + ) { + let lower = 1; + let higher = 2; + heap.push(Element::Target, lower); + heap.push(Element::Node, higher); + assert_eq!(heap.top(), Some(&Element::Target)); + } + + pub fn adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap>( + mut heap: T, + ) { + let lower = 1; + let higher = 2; + heap.push(Element::Node, lower); + heap.push(Element::Target, higher); + assert_eq!(heap.top(), Some(&Element::Target)); + } + + pub fn adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap>( + mut heap: T, + ) { + let lower = 1; + let higher = 2; + heap.push(Element::Node, higher); + heap.push(Element::Target, lower); + assert_eq!(heap.top(), Some(&Element::Target)); + } + + pub fn adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap>( + mut heap: T, + ) { + let lower = 1; + let higher = 2; + heap.push(Element::Target, higher); + heap.push(Element::Node, lower); + assert_eq!(heap.top(), Some(&Element::Target)); + } +} + +#[cfg(test)] +pub mod top { + use super::{generate_numbers, Element, Heap}; + + pub fn returns_the_first_value_in_min_a_heap>(mut heap: T) { + let mut numbers = generate_numbers(); + numbers.sort(); + numbers.reverse(); + let smallest = numbers.pop().unwrap(); + heap.push(Element::Target, smallest); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(Element::Node, n); + }); + assert_eq!(heap.top(), Some(&Element::Target)); + } + + pub fn returns_the_first_value_in_max_a_heap>(mut heap: T) { + let mut numbers = generate_numbers(); + numbers.sort(); + let largest = numbers.pop().unwrap(); + heap.push(Element::Target, largest); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(Element::Node, n); + }); + assert_eq!(heap.top(), Some(&Element::Target)); + } + + pub fn returns_none_if_the_heap_is_empty>(heap: T) { + assert_eq!(heap.top(), None); + } +} + +pub mod size { + use super::{generate_numbers, Heap}; + + pub fn returns_the_correct_size_of_a_heap_after_adding_elements>( + mut heap: T, + ) { + let numbers = generate_numbers(); + let len = numbers.len(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + assert_eq!(heap.size(), len); + } + + pub fn returns_the_correct_size_of_a_heap_after_removing_an_element>( + mut heap: T, + ) { + let numbers = generate_numbers(); + let len = numbers.len(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + let _ = heap.pop(); + let _ = heap.pop(); + assert_eq!(heap.size(), len - 2); + } +} + +pub mod update { + use super::{generate_numbers, DecreaseKey}; + use std::cmp::min; + + pub fn will_update_a_specific_element_by_key_in_a_min_heap>( + mut heap: T, + ) { + let mut numbers = generate_numbers(); + let target = numbers.pop().unwrap(); + let mut cloned = numbers.clone(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + cloned.sort_by(|a, b| b.cmp(a)); + let smallest = cloned[cloned.len() - 1]; + let next_smallest = smallest - 1; + heap.update(&target, next_smallest); + assert_eq!(heap.pop(), Some(target)); + while !cloned.is_empty() { + assert_eq!(heap.pop(), cloned.pop()); + } + } + + pub fn will_update_a_specific_element_by_key_in_a_min_heap_after_pop< + T: DecreaseKey, + >( + mut heap: T, + ) { + let mut numbers = generate_numbers(); + let mut cloned = numbers.clone(); + cloned.sort_by(|a, b| b.cmp(a)); + let target = cloned.remove(0); + let index = numbers.iter().position(|n| n == &target).unwrap(); + numbers.remove(index); + let mut smallest = target; + numbers.into_iter().for_each(|n| { + smallest = min(smallest, n); + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + let prev_smallest = smallest + 1; + heap.update(&target, prev_smallest); + assert_eq!(heap.pop(), cloned.pop()); + assert_eq!(heap.pop(), Some(target)); + while !cloned.is_empty() { + assert_eq!(heap.pop(), cloned.pop()); + } + } + + pub fn will_update_a_specific_element_by_key_in_a_max_heap>( + mut heap: T, + ) { + let mut numbers = generate_numbers(); + let target = numbers.pop().unwrap(); + let mut cloned = numbers.clone(); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + cloned.sort_by(|a, b| a.cmp(b)); + let largest = cloned[cloned.len() - 1]; + let next_largest = largest + 1; + heap.update(&target, next_largest); + assert_eq!(heap.pop(), Some(target)); + while !cloned.is_empty() { + assert_eq!(heap.pop(), cloned.pop()); + } + } + + pub fn will_update_a_specific_element_by_key_in_a_max_heap_after_pop< + T: DecreaseKey, + >( + mut heap: T, + ) { + let mut numbers = generate_numbers(); + let mut cloned = numbers.clone(); + cloned.sort_by(|a, b| a.cmp(b)); + let target = cloned.remove(0); + let index = numbers.iter().position(|n| n == &target).unwrap(); + numbers.remove(index); + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + let largest = cloned[cloned.len() - 1]; + let prev_largest = largest - 1; + heap.update(&target, prev_largest); + assert_eq!(heap.pop(), cloned.pop()); + assert_eq!(heap.pop(), Some(target)); + while !heap.is_empty() { + assert_eq!(heap.pop(), cloned.pop()); + } + } +} + +pub mod delete { + use super::{generate_numbers, DecreaseKey}; + + pub fn will_delete_a_specific_element_by_key_from_min_heap>( + mut heap: T, + ) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + cloned.sort_by(|a, b| b.cmp(a)); + let target = cloned[0] + 100; + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + heap.delete(&target); + while !cloned.is_empty() && !heap.is_empty() { + assert_eq!(heap.pop(), cloned.pop()) + } + } + + pub fn will_delete_a_specific_element_by_key_from_min_heap_after_pop< + T: DecreaseKey, + >( + mut heap: T, + ) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + cloned.sort_by(|a, b| b.cmp(a)); + let target = cloned[0] + 100; + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + assert_eq!(heap.pop(), cloned.pop()); + heap.delete(&target); + while !cloned.is_empty() && !heap.is_empty() { + assert_eq!(heap.pop(), cloned.pop()) + } + } + + pub fn will_delete_a_specific_element_by_key_from_max_heap>( + mut heap: T, + ) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + cloned.sort_by(|a, b| a.cmp(b)); + let target = cloned[0] - 100; + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + heap.delete(&target); + while !cloned.is_empty() && !heap.is_empty() { + assert_eq!(heap.pop(), cloned.pop()) + } + } + + pub fn will_delete_a_specific_element_by_key_from_max_heap_after_pop< + T: DecreaseKey, + >( + mut heap: T, + ) { + let numbers = generate_numbers(); + let mut cloned = numbers.clone(); + cloned.sort_by(|a, b| a.cmp(b)); + let target = cloned[0] - 100; + numbers.into_iter().for_each(|n| { + let _ = &mut heap.push(n, n); + }); + heap.push(target, target); + assert_eq!(heap.pop(), cloned.pop()); + heap.delete(&target); + while !cloned.is_empty() && !heap.is_empty() { + assert_eq!(heap.pop(), cloned.pop()) + } + } +} diff --git a/src/heapz/tests/linked.rs b/src/heapz/tests/linked.rs new file mode 100644 index 00000000..5efc7cc2 --- /dev/null +++ b/src/heapz/tests/linked.rs @@ -0,0 +1,123 @@ +extern crate heapz; + +mod common; + +mod pop { + use super::common; + use heapz::PairingHeap; + + #[test] + fn returns_the_first_value_from_the_min_heap() { + common::pop::returns_the_first_value_from_min_heap(PairingHeap::min()); + } + + #[test] + fn returns_the_first_value_from_the_max_heap() { + common::pop::returns_the_first_value_from_max_heap(PairingHeap::max()); + } + + #[test] + fn removes_the_first_value_from_min_heap() { + common::pop::removes_the_first_value_from_min_heap(PairingHeap::min()); + } + + #[test] + fn removes_the_first_value_from_max_heap() { + common::pop::removes_the_first_value_from_max_heap(PairingHeap::max()); + } + + #[test] + fn returns_none_if_the_min_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(PairingHeap::min()); + } + + #[test] + fn returns_none_if_the_max_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(PairingHeap::max()); + } + + #[test] + fn returns_all_elements_from_smallest_to_largest_in_a_min_heap() { + common::pop::returns_all_elements_from_smallest_to_largest_in_a_min_heap(PairingHeap::min()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest_in_a_max_heap() { + common::pop::returns_all_elements_from_largest_to_smallest_in_a_max_heap(PairingHeap::max()); + } +} + +mod push { + use super::common; + use heapz::PairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(PairingHeap::min()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap() { + common::push::adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap( + PairingHeap::min(), + ); + } + + #[test] + fn adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap() { + common::push::adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap( + PairingHeap::max(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap() { + common::push::adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap( + PairingHeap::min(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap() { + common::push::adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap( + PairingHeap::max(), + ); + } +} + +mod top { + use super::common; + use heapz::PairingHeap; + + #[test] + fn returns_the_first_value_in_a_max_heap() { + common::top::returns_the_first_value_in_max_a_heap(PairingHeap::max()); + } + + #[test] + fn returns_the_first_value_in_a_min_heap() { + common::top::returns_the_first_value_in_min_a_heap(PairingHeap::min()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(PairingHeap::max()); + } +} + +mod size { + use super::common; + use heapz::PairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements(PairingHeap::max()); + } + + #[test] + fn returns_the_correct_size_of_a_heap_after_removing_an_element() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + PairingHeap::min(), + ); + } +} diff --git a/src/heapz/tests/ranked.rs b/src/heapz/tests/ranked.rs new file mode 100644 index 00000000..8dba1c52 --- /dev/null +++ b/src/heapz/tests/ranked.rs @@ -0,0 +1,1002 @@ +extern crate heapz; + +mod common; + +mod multi_pass_min { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap( + RankPairingHeap::multi_pass_min(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap_after_pop( + RankPairingHeap::multi_pass_min(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap( + RankPairingHeap::multi_pass_min(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap_after_pop( + RankPairingHeap::multi_pass_min(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_min_heap(RankPairingHeap::multi_pass_min()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_min_heap(RankPairingHeap::multi_pass_min()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_min()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_smallest_to_largest_in_a_min_heap( + RankPairingHeap::multi_pass_min(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::multi_pass_min()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap( + RankPairingHeap::multi_pass_min(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap( + RankPairingHeap::multi_pass_min(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_min()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_min_a_heap(RankPairingHeap::multi_pass_min()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::multi_pass_min(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::multi_pass_min(), + ); + } + } +} + +mod multi_pass_min2 { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap( + RankPairingHeap::multi_pass_min2(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap_after_pop( + RankPairingHeap::multi_pass_min2(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap( + RankPairingHeap::multi_pass_min2(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap_after_pop( + RankPairingHeap::multi_pass_min2(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_min_heap(RankPairingHeap::multi_pass_min2()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_min_heap(RankPairingHeap::multi_pass_min2()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_min2()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_smallest_to_largest_in_a_min_heap( + RankPairingHeap::multi_pass_min2(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::multi_pass_min2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap( + RankPairingHeap::multi_pass_min2(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap( + RankPairingHeap::multi_pass_min2(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_min2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_min_a_heap(RankPairingHeap::multi_pass_min2()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::multi_pass_min2(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::multi_pass_min2(), + ); + } + } +} + +mod single_pass_min { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap( + RankPairingHeap::single_pass_min(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap_after_pop( + RankPairingHeap::single_pass_min(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap( + RankPairingHeap::single_pass_min(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap_after_pop( + RankPairingHeap::single_pass_min(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_min_heap(RankPairingHeap::single_pass_min()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_min_heap(RankPairingHeap::single_pass_min()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_min()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_smallest_to_largest_in_a_min_heap( + RankPairingHeap::single_pass_min(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::single_pass_min()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap( + RankPairingHeap::single_pass_min(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap( + RankPairingHeap::single_pass_min(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_min()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_min_a_heap(RankPairingHeap::single_pass_min()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::single_pass_min(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::single_pass_min(), + ); + } + } +} + +mod single_pass_min2 { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap( + RankPairingHeap::single_pass_min2(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_min_heap_after_pop( + RankPairingHeap::single_pass_min2(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap( + RankPairingHeap::single_pass_min2(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_min_heap_after_pop( + RankPairingHeap::single_pass_min2(), + ); + } + } + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_min_heap(RankPairingHeap::single_pass_min2()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_min_heap(RankPairingHeap::single_pass_min2()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_min2()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_smallest_to_largest_in_a_min_heap( + RankPairingHeap::single_pass_min2(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::single_pass_min2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_behind_a_lower_in_a_min_heap( + RankPairingHeap::single_pass_min2(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_before_a_higher_in_a_min_heap( + RankPairingHeap::single_pass_min2(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_min2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_min_a_heap(RankPairingHeap::single_pass_min2()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::single_pass_min2(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::single_pass_min2(), + ); + } + } +} + +mod multi_pass_max { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap( + RankPairingHeap::multi_pass_max(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap_after_pop( + RankPairingHeap::multi_pass_max(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap( + RankPairingHeap::multi_pass_max(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap_after_pop( + RankPairingHeap::multi_pass_max(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_max_heap(RankPairingHeap::multi_pass_max()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_max_heap(RankPairingHeap::multi_pass_max()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_max()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_largest_to_smallest_in_a_max_heap( + RankPairingHeap::multi_pass_max(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::multi_pass_max()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap( + RankPairingHeap::multi_pass_max(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap( + RankPairingHeap::multi_pass_max(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_max()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_max_a_heap(RankPairingHeap::multi_pass_max()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::multi_pass_max(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::multi_pass_max(), + ); + } + } +} + +mod multi_pass_max2 { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap( + RankPairingHeap::multi_pass_max2(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap_after_pop( + RankPairingHeap::multi_pass_max2(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap( + RankPairingHeap::multi_pass_max2(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap_after_pop( + RankPairingHeap::multi_pass_max2(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_max_heap(RankPairingHeap::multi_pass_max2()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_max_heap(RankPairingHeap::multi_pass_max2()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_max2()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_largest_to_smallest_in_a_max_heap( + RankPairingHeap::multi_pass_max2(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::multi_pass_max2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap( + RankPairingHeap::multi_pass_max2(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap( + RankPairingHeap::multi_pass_max2(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::multi_pass_max2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_max_a_heap(RankPairingHeap::multi_pass_max2()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::multi_pass_max2(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::multi_pass_max2(), + ); + } + } +} + +mod single_pass_max { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap( + RankPairingHeap::single_pass_max(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap_after_pop( + RankPairingHeap::single_pass_max(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap( + RankPairingHeap::single_pass_max(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap_after_pop( + RankPairingHeap::single_pass_max(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_max_heap(RankPairingHeap::single_pass_max()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_max_heap(RankPairingHeap::single_pass_max()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_max()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_largest_to_smallest_in_a_max_heap( + RankPairingHeap::single_pass_max(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::single_pass_max()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap( + RankPairingHeap::single_pass_max(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap( + RankPairingHeap::single_pass_max(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_max()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_max_a_heap(RankPairingHeap::single_pass_max()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::single_pass_max(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::single_pass_max(), + ); + } + } +} + +mod single_pass_max2 { + mod delete { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn delete_an_element_by_key() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap( + RankPairingHeap::single_pass_max2(), + ); + } + + #[test] + fn delete_an_element_by_key_after_pop() { + common::delete::will_delete_a_specific_element_by_key_from_max_heap_after_pop( + RankPairingHeap::single_pass_max2(), + ); + } + } + + mod update { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn updates_an_element_by_key() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap( + RankPairingHeap::single_pass_max2(), + ); + } + + #[test] + fn updates_an_element_by_key_after_pop() { + common::update::will_update_a_specific_element_by_key_in_a_max_heap_after_pop( + RankPairingHeap::single_pass_max2(), + ); + } + } + + mod pop { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn removes_the_first_value_from_heap() { + common::pop::removes_the_first_value_from_max_heap(RankPairingHeap::single_pass_max2()); + } + + #[test] + fn returns_the_first_value_from_the_heap() { + common::pop::returns_the_first_value_from_max_heap(RankPairingHeap::single_pass_max2()); + } + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::pop::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_max2()); + } + + #[test] + fn returns_all_elements_from_largest_to_smallest() { + common::pop::returns_all_elements_from_largest_to_smallest_in_a_max_heap( + RankPairingHeap::single_pass_max2(), + ); + } + } + + mod push { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn adds_a_value_to_the_heap() { + common::push::adds_a_value_to_the_heap(RankPairingHeap::single_pass_max2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::push::adds_a_higher_item_to_the_heap_before_a_lower_in_a_max_heap( + RankPairingHeap::single_pass_max2(), + ); + } + + #[test] + fn adds_a_lower_item_to_the_heap_before_a_higher() { + common::push::adds_a_lower_item_to_the_heap_behind_a_higher_in_a_max_heap( + RankPairingHeap::single_pass_max2(), + ); + } + } + + mod top { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_none_if_the_heap_is_empty() { + common::top::returns_none_if_the_heap_is_empty(RankPairingHeap::single_pass_max2()); + } + + #[test] + fn adds_a_higher_item_to_the_heap_behind_a_lower() { + common::top::returns_the_first_value_in_max_a_heap(RankPairingHeap::single_pass_max2()); + } + } + + mod size { + use super::super::common; + use heapz::RankPairingHeap; + + #[test] + fn returns_the_correct_size_of_a_heap_after_adding_elements() { + common::size::returns_the_correct_size_of_a_heap_after_adding_elements( + RankPairingHeap::single_pass_max2(), + ); + } + + #[test] + fn returns_the_first_value_in_a_heap() { + common::size::returns_the_correct_size_of_a_heap_after_removing_an_element( + RankPairingHeap::single_pass_max2(), + ); + } + } +} diff --git a/src/highs/.gitignore b/src/highs/.gitignore new file mode 100644 index 00000000..ef05b694 --- /dev/null +++ b/src/highs/.gitignore @@ -0,0 +1,25 @@ +# Generated by Cargo +# will have compiled files and executables +/target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + + +# Added by cargo + +/target + + +# Added by cargo +# +# already existing elements were commented out + +#/target + +.idea +Highs.log \ No newline at end of file diff --git a/src/highs/Cargo.toml b/src/highs/Cargo.toml new file mode 100644 index 00000000..2c32098b --- /dev/null +++ b/src/highs/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "highs" +version = "1.6.1" +authors = ["Ophir LOJKINE", "rust-or"] +edition = "2021" +description = "Safe rust bindings for the HiGHS linear programming solver. See http://highs.dev." +license = "MIT" +repository = "https://github.com/rust-or/highs" +keywords = ["linear-programming", "optimization", "math", "solver"] + +[dependencies] +highs-sys = "1.6.1" +log = "0.4.17" diff --git a/src/highs/LICENSE b/src/highs/LICENSE new file mode 100644 index 00000000..ccc01352 --- /dev/null +++ b/src/highs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Rust Operations Research + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/highs/README.md b/src/highs/README.md new file mode 100644 index 00000000..8666d92d --- /dev/null +++ b/src/highs/README.md @@ -0,0 +1,44 @@ +# highs + +[![highs docs badge](https://docs.rs/highs/badge.svg)](https://docs.rs/highs) + +Safe rust bindings to the Highs MILP Solver. Best used from the [**good_lp**](https://crates.io/crates/good_lp) linear +programming modeler. + +## Usage examples + +#### Building a problem variable by variable + +```rust +use highs::{ColProblem, Sense}; + +fn main() { + let mut pb = ColProblem::new(); + // We cannot use more then 5 units of sugar in total. + let sugar = pb.add_row(..=5); + // We cannot use more then 3 units of milk in total. + let milk = pb.add_row(..=3); + // We have a first cake that we can sell for 2€. Baking it requires 1 unit of milk and 2 of sugar. + pb.add_integer_column(2., 0.., &[(sugar, 2.), (milk, 1.)]); + // We have a second cake that we can sell for 8€. Baking it requires 2 units of milk and 3 of sugar. + pb.add_integer_column(8., 0.., &[(sugar, 3.), (milk, 2.)]); + // Find the maximal possible profit + let solution = pb.optimise(Sense::Maximise).solve().get_solution(); + // The solution is to bake one cake of each sort + assert_eq!(solution.columns(), vec![1., 1.]); +} +``` + +#### Building a problem constraint by constraint + +```rust +use highs::*; + +fn main() { + let mut pb = RowProblem::new(); + // Optimize 3x - 2y with x<=6 and y>=5 + let x = pb.add_column(3., ..6); + let y = pb.add_column(-2., 5..); + pb.add_row(2.., &[(x, 3.), (y, 8.)]); // 2 <= x*3 + y*8 +} +``` diff --git a/src/highs/fuzz/.gitignore b/src/highs/fuzz/.gitignore new file mode 100644 index 00000000..a0925114 --- /dev/null +++ b/src/highs/fuzz/.gitignore @@ -0,0 +1,3 @@ +target +corpus +artifacts diff --git a/src/highs/fuzz/Cargo.toml b/src/highs/fuzz/Cargo.toml new file mode 100644 index 00000000..27c70428 --- /dev/null +++ b/src/highs/fuzz/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "highs-fuzz" +version = "0.0.0" +authors = ["Ophir Lojkine"] +publish = false +edition = "2021" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = {version="0.4", features=["arbitrary-derive"]} + +[dependencies.highs] +path = ".." + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_1" +path = "fuzz_targets/fuzz_target_1.rs" +test = false +doc = false diff --git a/src/highs/fuzz/fuzz_targets/fuzz_target_1.rs b/src/highs/fuzz/fuzz_targets/fuzz_target_1.rs new file mode 100644 index 00000000..3b207e16 --- /dev/null +++ b/src/highs/fuzz/fuzz_targets/fuzz_target_1.rs @@ -0,0 +1,52 @@ +#![no_main] +use highs::{RowProblem, Sense}; +use libfuzzer_sys::arbitrary; +use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured}; +use libfuzzer_sys::fuzz_target; +use std::ops::Range; + +#[derive(Arbitrary)] +struct ColData { + val: f64, + range: Range, + integrality: bool, +} + +fn test(u: &mut Unstructured) -> arbitrary::Result<()> { + let mut pb = RowProblem::default(); + let vars = u + .arbitrary_iter::()? + .map(|cd| { + let cd = cd?; + Ok(pb.add_column_with_integrality(cd.val, cd.range, cd.integrality)) + }) + .collect::, _>>()?; + + let num_rows = u.arbitrary::()? as usize; + + for _ in 0..num_rows { + let range = u.arbitrary::>()?; + pb.add_row( + range, + &[ + (*u.choose(&vars)?, u.arbitrary()?), + (*u.choose(&vars)?, u.arbitrary()?), + (*u.choose(&vars)?, u.arbitrary()?), + ], + ); + } + if let Ok(solved) = pb + .try_optimise(*u.choose(&[Sense::Maximise, Sense::Minimise])?) + .and_then(|p| p.try_solve()) + { + let solution = solved.get_solution(); + assert_eq!(solution.columns().len(), vars.len()); + assert_eq!(solution.rows().len(), num_rows); + } + Ok(()) +} + +fuzz_target!(|data: &[u8]| { + let mut u = Unstructured::new(data); + let _ = test(&mut u); +}); diff --git a/src/highs/src/lib.rs b/src/highs/src/lib.rs new file mode 100644 index 00000000..ba942e18 --- /dev/null +++ b/src/highs/src/lib.rs @@ -0,0 +1,759 @@ +#![forbid(missing_docs)] +//! Safe rust binding to the [HiGHS](https://highs.dev) linear programming solver. +//! +//! ## Usage example +//! +//! ### Building a problem constraint by constraint with [RowProblem] +//! +//! Useful for traditional problem modelling where you first declare your variables, then add +//!constraints one by one. +//! +//! ``` +//! use highs::{Sense, Model, HighsModelStatus, RowProblem}; +//! // max: x + 2y + z +//! // under constraints: +//! // c1: 3x + y <= 6 +//! // c2: y + 2z <= 7 +//! let mut pb = RowProblem::default(); +//! // Create a variable named x, with a coefficient of 1 in the objective function, +//! // that is bound between 0 and +∞. +//! let x = pb.add_column(1., 0..); +//! let y = pb.add_column(2., 0..); +//! let z = pb.add_column(1., 0..); +//! // constraint c1: x*3 + y*1 is bound to ]-∞; 6] +//! pb.add_row(..=6, &[(x, 3.), (y, 1.)]); +//! // constraint c2: y*1 + z*2 is bound to ]-∞; 7] +//! pb.add_row(..=7, &[(y, 1.), (z, 2.)]); +//! +//! let solved = pb.optimise(Sense::Maximise).solve(); +//! +//! assert_eq!(solved.status(), HighsModelStatus::Optimal); +//! +//! let solution = solved.get_solution(); +//! // The expected solution is x=0 y=6 z=0.5 +//! assert_eq!(solution.columns(), vec![0., 6., 0.5]); +//! // All the constraints are at their maximum +//! assert_eq!(solution.rows(), vec![6., 7.]); +//! ``` +//! +//! ### Building a problem variable by variable with [ColProblem] +//! +//! Useful for resource allocation problems and other problems when you know in advance the number +//! of constraints and their bounds, but dynamically add new variables to the problem. +//! +//! This is slightly more efficient than building the problem constraint by constraint. +//! +//! ``` +//! use highs::{ColProblem, Sense}; +//! let mut pb = ColProblem::new(); +//! // We cannot use more then 5 units of sugar in total. +//! let sugar = pb.add_row(..=5); +//! // We cannot use more then 3 units of milk in total. +//! let milk = pb.add_row(..=3); +//! // We have a first cake that we can sell for 2€. Baking it requires 1 unit of milk and 2 of sugar. +//! pb.add_integer_column(2., 0.., &[(sugar, 2.), (milk, 1.)]); +//! // We have a second cake that we can sell for 8€. Baking it requires 2 units of milk and 3 of sugar. +//! pb.add_integer_column(8., 0.., &[(sugar, 3.), (milk, 2.)]); +//! // Find the maximal possible profit +//! let solution = pb.optimise(Sense::Maximise).solve().get_solution(); +//! // The solution is to bake 1 cake of each sort +//! assert_eq!(solution.columns(), vec![1., 1.]); +//! ``` +//! +//! ``` +//! use highs::{Sense, Model, HighsModelStatus, ColProblem}; +//! // max: x + 2y + z +//! // under constraints: +//! // c1: 3x + y <= 6 +//! // c2: y + 2z <= 7 +//! let mut pb = ColProblem::default(); +//! let c1 = pb.add_row(..6.); +//! let c2 = pb.add_row(..7.); +//! // x +//! pb.add_column(1., 0.., &[(c1, 3.)]); +//! // y +//! pb.add_column(2., 0.., &[(c1, 1.), (c2, 1.)]); +//! // z +//! pb.add_column(1., 0.., vec![(c2, 2.)]); +//! +//! let solved = pb.optimise(Sense::Maximise).solve(); +//! +//! assert_eq!(solved.status(), HighsModelStatus::Optimal); +//! +//! let solution = solved.get_solution(); +//! // The expected solution is x=0 y=6 z=0.5 +//! assert_eq!(solution.columns(), vec![0., 6., 0.5]); +//! // All the constraints are at their maximum +//! assert_eq!(solution.rows(), vec![6., 7.]); +//! ``` +//! +//! ### Integer variables +//! +//! HiGHS supports mixed integer-linear programming. +//! You can use `add_integer_column` to add an integer variable to the problem, +//! and the solution is then guaranteed to contain a whole number as a value for this variable. +//! +//! ``` +//! use highs::{Sense, Model, HighsModelStatus, ColProblem}; +//! // maximize: x + 2y under constraints x + y <= 3.5 and x - y >= 1 +//! let mut pb = ColProblem::default(); +//! let c1 = pb.add_row(..3.5); +//! let c2 = pb.add_row(1..); +//! // x (continuous variable) +//! pb.add_column(1., 0.., &[(c1, 1.), (c2, 1.)]); +//! // y (integer variable) +//! pb.add_integer_column(2., 0.., &[(c1, 1.), (c2, -1.)]); +//! let solved = pb.optimise(Sense::Maximise).solve(); +//! // The expected solution is x=2.5 y=1 +//! assert_eq!(solved.get_solution().columns(), vec![2.5, 1.]); +//! ``` + +use std::convert::{TryFrom, TryInto}; +use std::ffi::{c_void, CString}; +use std::num::TryFromIntError; +use std::ops::{Bound, Index, RangeBounds}; +use std::os::raw::c_int; + +use highs_sys::*; + +pub use matrix_col::{ColMatrix, Row}; +pub use matrix_row::{Col, RowMatrix}; +pub use status::{HighsModelStatus, HighsStatus}; + +use crate::options::HighsOptionValue; + +/// A problem where variables are declared first, and constraints are then added dynamically. +/// See [`Problem`](Problem#impl-1). +pub type RowProblem = Problem; +/// A problem where constraints are declared first, and variables are then added dynamically. +/// See [`Problem`](Problem#impl). +pub type ColProblem = Problem; + +mod matrix_col; +mod matrix_row; +mod options; +mod status; + +/// A complete optimization problem. +/// Depending on the `MATRIX` type parameter, the problem will be built +/// constraint by constraint (with [ColProblem]), or +/// variable by variable (with [RowProblem]) +#[derive(Debug, Clone, PartialEq, Default)] +pub struct Problem { + // columns + colcost: Vec, + collower: Vec, + colupper: Vec, + // rows + rowlower: Vec, + rowupper: Vec, + integrality: Option>, + matrix: MATRIX, +} + +impl Problem +where + Problem: From>, +{ + /// Number of variables in the problem + pub fn num_cols(&self) -> usize { + self.colcost.len() + } + + /// Number of constraints in the problem + pub fn num_rows(&self) -> usize { + self.rowlower.len() + } + + fn add_row_inner + Copy, B: RangeBounds>(&mut self, bounds: B) -> Row { + let r = Row(self.num_rows().try_into().expect("too many rows")); + let low = bound_value(bounds.start_bound()).unwrap_or(f64::NEG_INFINITY); + let high = bound_value(bounds.end_bound()).unwrap_or(f64::INFINITY); + self.rowlower.push(low); + self.rowupper.push(high); + r + } + + fn add_column_inner + Copy, B: RangeBounds>(&mut self, col_factor: f64, bounds: B, is_integral: bool) { + if is_integral && self.integrality.is_none() { + self.integrality = Some(vec![0; self.num_cols()]); + } + if let Some(integrality) = &mut self.integrality { + integrality.push(if is_integral { 1 } else { 0 }); + } + self.colcost.push(col_factor); + let low = bound_value(bounds.start_bound()).unwrap_or(f64::NEG_INFINITY); + let high = bound_value(bounds.end_bound()).unwrap_or(f64::INFINITY); + self.collower.push(low); + self.colupper.push(high); + } + + /// Create a model based on this problem. Don't solve it yet. + /// If the problem is a [RowProblem], it will have to be converted to a [ColProblem] first, + /// which takes an amount of time proportional to the size of the problem. + /// If the problem is invalid (according to HiGHS), this function will panic. + pub fn optimise(self, sense: Sense) -> Model { + self.try_optimise(sense).expect("invalid problem") + } + + /// Create a model based on this problem. Don't solve it yet. + /// If the problem is a [RowProblem], it will have to be converted to a [ColProblem] first, + /// which takes an amount of time proportional to the size of the problem. + pub fn try_optimise(self, sense: Sense) -> Result { + let mut m = Model::try_new(self)?; + m.set_sense(sense); + Ok(m) + } + + /// Create a new problem instance + pub fn new() -> Self { + Self::default() + } +} + +fn bound_value + Copy>(b: Bound<&N>) -> Option { + match b { + Bound::Included(v) | Bound::Excluded(v) => Some((*v).into()), + Bound::Unbounded => None, + } +} + +fn c(n: usize) -> HighsInt { + n.try_into().expect("size too large for HiGHS") +} + +macro_rules! highs_call { + ($function_name:ident ($($param:expr),+)) => { + try_handle_status( + $function_name($($param),+), + stringify!($function_name) + ) + } +} + +/// A model to solve +#[derive(Debug)] +pub struct Model { + highs: HighsPtr, +} + +/// A solved model +#[derive(Debug)] +pub struct SolvedModel { + highs: HighsPtr, +} + +/// Whether to maximize or minimize the objective function +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Debug)] +pub enum Sense { + /// max + Maximise = OBJECTIVE_SENSE_MAXIMIZE as isize, + /// min + Minimise = OBJECTIVE_SENSE_MINIMIZE as isize, +} + +impl Model { + /// Set the optimization sense (minimize by default) + pub fn set_sense(&mut self, sense: Sense) { + let ret = unsafe { Highs_changeObjectiveSense(self.highs.mut_ptr(), sense as c_int) }; + assert_eq!(ret, STATUS_OK, "changeObjectiveSense failed"); + } + + /// Create a Highs model to be optimized (but don't solve it yet). + /// If the given problem is a [RowProblem], it will have to be converted to a [ColProblem] first, + /// which takes an amount of time proportional to the size of the problem. + /// Panics if the problem is incoherent + pub fn new>>(problem: P) -> Self { + Self::try_new(problem).expect("incoherent problem") + } + + /// Create a Highs model to be optimized (but don't solve it yet). + /// If the given problem is a [RowProblem], it will have to be converted to a [ColProblem] first, + /// which takes an amount of time proportional to the size of the problem. + /// Returns an error if the problem is incoherent + pub fn try_new>>(problem: P) -> Result { + let mut highs = HighsPtr::default(); + highs.make_quiet(); + let problem = problem.into(); + log::debug!( + "Adding a problem with {} variables and {} constraints to HiGHS", + problem.num_cols(), + problem.num_rows() + ); + let offset = 0.0; + unsafe { + if let Some(integrality) = &problem.integrality { + highs_call!(Highs_passMip( + highs.mut_ptr(), + c(problem.num_cols()), + c(problem.num_rows()), + c(problem.matrix.avalue.len()), + MATRIX_FORMAT_COLUMN_WISE, + OBJECTIVE_SENSE_MINIMIZE, + offset, + problem.colcost.as_ptr(), + problem.collower.as_ptr(), + problem.colupper.as_ptr(), + problem.rowlower.as_ptr(), + problem.rowupper.as_ptr(), + problem.matrix.astart.as_ptr(), + problem.matrix.aindex.as_ptr(), + problem.matrix.avalue.as_ptr(), + integrality.as_ptr() + )) + } else { + highs_call!(Highs_passLp( + highs.mut_ptr(), + c(problem.num_cols()), + c(problem.num_rows()), + c(problem.matrix.avalue.len()), + MATRIX_FORMAT_COLUMN_WISE, + OBJECTIVE_SENSE_MINIMIZE, + offset, + problem.colcost.as_ptr(), + problem.collower.as_ptr(), + problem.colupper.as_ptr(), + problem.rowlower.as_ptr(), + problem.rowupper.as_ptr(), + problem.matrix.astart.as_ptr(), + problem.matrix.aindex.as_ptr(), + problem.matrix.avalue.as_ptr() + )) + } + .map(|_| Self { highs }) + } + } + + /// Prevents writing anything to the standard output or to files when solving the model + pub fn make_quiet(&mut self) { + self.highs.make_quiet() + } + + /// Set a custom parameter on the model. + /// For the list of available options and their documentation, see: + /// + /// + /// ``` + /// # use highs::ColProblem; + /// # use highs::Sense::Maximise; + /// let mut model = ColProblem::default().optimise(Maximise); + /// model.set_option("presolve", "off"); // disable the presolver + /// model.set_option("solver", "ipm"); // use the ipm solver + /// model.set_option("time_limit", 30.0); // stop after 30 seconds + /// model.set_option("parallel", "on"); // use multiple cores + /// model.set_option("threads", 4); // solve on 4 threads + /// ``` + pub fn set_option>, V: HighsOptionValue>(&mut self, option: STR, value: V) { + self.highs.set_option(option, value) + } + + /// Find the optimal value for the problem, panic if the problem is incoherent + pub fn solve(self) -> SolvedModel { + self.try_solve().expect("HiGHS error: invalid problem") + } + + /// Find the optimal value for the problem, return an error if the problem is incoherent + pub fn try_solve(mut self) -> Result { + unsafe { highs_call!(Highs_run(self.highs.mut_ptr())) }.map(|_| SolvedModel { highs: self.highs }) + } + + /// Changes the bounds of a row. + /// + /// # Panics + /// + /// If HIGHS returns an error status value. + pub fn change_row_bounds(&mut self, row: Row, bounds: impl RangeBounds) { + self.try_change_row_bounds(row, bounds) + .unwrap_or_else(|e| panic!("HiGHS error: {:?}", e)) + } + + /// Tries to change the bounds of a row in the highs model. + /// + /// Returns Ok(()), or the error status value if HIGHS returned an error status. + pub fn try_change_row_bounds(&mut self, row: Row, bounds: impl RangeBounds) -> Result<(), HighsStatus> { + unsafe { + highs_call!(Highs_changeRowBounds( + self.highs.mut_ptr(), + row.0.try_into().unwrap(), + bound_value(bounds.start_bound()).unwrap_or(f64::NEG_INFINITY), + bound_value(bounds.end_bound()).unwrap_or(f64::INFINITY) + ))?; + } + Ok(()) + } + + /// Changes the bounds of a row. + /// + /// # Panics + /// + /// If HIGHS returns an error status value. + pub fn change_col_bounds(&mut self, col: Col, bounds: impl RangeBounds) { + self.try_change_col_bounds(col, bounds) + .unwrap_or_else(|e| panic!("HiGHS error: {:?}", e)) + } + + /// Tries to change the bounds of a row in the highs model. + /// + /// Returns Ok(()), or the error status value if HIGHS returned an error status. + pub fn try_change_col_bounds(&mut self, col: Col, bounds: impl RangeBounds) -> Result<(), HighsStatus> { + unsafe { + highs_call!(Highs_changeColBounds( + self.highs.mut_ptr(), + col.0.try_into().unwrap(), + bound_value(bounds.start_bound()).unwrap_or(f64::NEG_INFINITY), + bound_value(bounds.end_bound()).unwrap_or(f64::INFINITY) + ))?; + } + Ok(()) + } + + /// Change a coefficient in the constraint matrix. + /// + /// # Panics + /// + /// If HIGHS returns an error status value. + pub fn change_matrix_coefficient(&mut self, row: Row, col: Col, value: f64) { + self.try_change_matrix_coefficient(row, col, value) + .unwrap_or_else(|e| panic!("HiGHS error: {:?}", e)) + } + + /// Tries to change a coefficient in the constraint matrix. + /// + /// Returns Ok(()), or the error status value if HIGHS returned an error status. + pub fn try_change_matrix_coefficient(&mut self, row: Row, col: Col, value: f64) -> Result<(), HighsStatus> { + unsafe { + highs_call!(Highs_changeCoeff( + self.highs.mut_ptr(), + row.0.try_into().unwrap(), + col.0.try_into().unwrap(), + value + ))?; + } + Ok(()) + } + + /// Adds a new constraint to the highs model. + /// + /// Returns the added row index. + /// + /// # Panics + /// + /// If HIGHS returns an error status value. + pub fn add_row(&mut self, bounds: impl RangeBounds, row_factors: impl IntoIterator) -> Row { + self.try_add_row(bounds, row_factors) + .unwrap_or_else(|e| panic!("HiGHS error: {:?}", e)) + } + + /// Tries to add a new constraint to the highs model. + /// + /// Returns the added row index, or the error status value if HIGHS returned an error status. + pub fn try_add_row( + &mut self, + bounds: impl RangeBounds, + row_factors: impl IntoIterator, + ) -> Result { + let (cols, factors): (Vec<_>, Vec<_>) = row_factors.into_iter().unzip(); + + unsafe { + highs_call!(Highs_addRow( + self.highs.mut_ptr(), + bound_value(bounds.start_bound()).unwrap_or(f64::NEG_INFINITY), + bound_value(bounds.end_bound()).unwrap_or(f64::INFINITY), + cols.len().try_into().unwrap(), + cols.into_iter().map(|c| c.0.try_into().unwrap()).collect::>().as_ptr(), + factors.as_ptr() + )) + }?; + + Ok(Row((self.highs.num_rows()? - 1) as c_int)) + } + + /// Adds a new variable to the highs model. + /// + /// Returns the added column index. + /// + /// # Panics + /// + /// If HIGHS returns an error status value. + pub fn add_col( + &mut self, + col_factor: f64, + bounds: impl RangeBounds, + row_factors: impl IntoIterator, + ) -> Col { + self.try_add_column(col_factor, bounds, row_factors) + .unwrap_or_else(|e| panic!("HiGHS error: {:?}", e)) + } + + /// Tries to add a new variable to the highs model. + /// + /// Returns the added column index, or the error status value if HIGHS returned an error status. + pub fn try_add_column( + &mut self, + col_factor: f64, + bounds: impl RangeBounds, + row_factors: impl IntoIterator, + ) -> Result { + let (rows, factors): (Vec<_>, Vec<_>) = row_factors.into_iter().unzip(); + unsafe { + highs_call!(Highs_addCol( + self.highs.mut_ptr(), + col_factor, + bound_value(bounds.start_bound()).unwrap_or(f64::NEG_INFINITY), + bound_value(bounds.end_bound()).unwrap_or(f64::INFINITY), + rows.len().try_into().unwrap(), + rows.into_iter().map(|r| r.0.try_into().unwrap()).collect::>().as_ptr(), + factors.as_ptr() + )) + }?; + + Ok(Col(self.highs.num_cols()? - 1)) + } +} + +impl From for Model { + fn from(solved: SolvedModel) -> Self { + Self { highs: solved.highs } + } +} + +#[derive(Debug)] +struct HighsPtr(*mut c_void); + +impl Drop for HighsPtr { + fn drop(&mut self) { + unsafe { Highs_destroy(self.0) } + } +} + +impl Default for HighsPtr { + fn default() -> Self { + Self(unsafe { Highs_create() }) + } +} + +impl HighsPtr { + // To be used instead of unsafe_mut_ptr wherever possible + #[allow(dead_code)] + const fn ptr(&self) -> *const c_void { + self.0 + } + + // Needed until https://github.com/ERGO-Code/HiGHS/issues/479 is fixed + unsafe fn unsafe_mut_ptr(&self) -> *mut c_void { + self.0 + } + + fn mut_ptr(&mut self) -> *mut c_void { + self.0 + } + + /// Prevents writing anything to the standard output when solving the model + pub fn make_quiet(&mut self) { + // setting log_file seems to cause a double free in Highs. + // See https://github.com/rust-or/highs/issues/3 + // self.set_option(&b"log_file"[..], ""); + self.set_option(&b"output_flag"[..], false); + self.set_option(&b"log_to_console"[..], false); + } + + /// Set a custom parameter on the model + pub fn set_option>, V: HighsOptionValue>(&mut self, option: STR, value: V) { + let c_str = CString::new(option).expect("invalid option name"); + let status = unsafe { value.apply_to_highs(self.mut_ptr(), c_str.as_ptr()) }; + try_handle_status(status, "Highs_setOptionValue").expect("An error was encountered in HiGHS."); + } + + /// Number of variables + fn num_cols(&self) -> Result { + let n = unsafe { Highs_getNumCols(self.0) }; + n.try_into() + } + + /// Number of constraints + fn num_rows(&self) -> Result { + let n = unsafe { Highs_getNumRows(self.0) }; + n.try_into() + } +} + +impl SolvedModel { + /// The status of the solution. Should be Optimal if everything went well + pub fn status(&self) -> HighsModelStatus { + let model_status = unsafe { Highs_getModelStatus(self.highs.unsafe_mut_ptr()) }; + HighsModelStatus::try_from(model_status).unwrap() + } + + /// Get the solution to the problem + pub fn get_solution(&self) -> Solution { + let cols = self.num_cols(); + let rows = self.num_rows(); + let mut colvalue: Vec = vec![0.; cols]; + let mut coldual: Vec = vec![0.; cols]; + let mut rowvalue: Vec = vec![0.; rows]; + let mut rowdual: Vec = vec![0.; rows]; + + // Get the primal and dual solution + unsafe { + Highs_getSolution( + self.highs.unsafe_mut_ptr(), + colvalue.as_mut_ptr(), + coldual.as_mut_ptr(), + rowvalue.as_mut_ptr(), + rowdual.as_mut_ptr(), + ); + } + + Solution { + colvalue, + coldual, + rowvalue, + rowdual, + } + } + + /// Number of variables + fn num_cols(&self) -> usize { + self.highs.num_cols().expect("invalid number of columns") + } + + /// Number of constraints + fn num_rows(&self) -> usize { + self.highs.num_rows().expect("invalid number of rows") + } +} + +/// Concrete values of the solution +#[derive(Clone, Debug)] +pub struct Solution { + colvalue: Vec, + coldual: Vec, + rowvalue: Vec, + rowdual: Vec, +} + +impl Solution { + /// The optimal values for each variables (in the order they were added) + pub fn columns(&self) -> &[f64] { + &self.colvalue + } + /// The optimal values for each variables in the dual problem (in the order they were added) + pub fn dual_columns(&self) -> &[f64] { + &self.coldual + } + /// The value of the constraint functions + pub fn rows(&self) -> &[f64] { + &self.rowvalue + } + /// The value of the constraint functions in the dual problem + pub fn dual_rows(&self) -> &[f64] { + &self.rowdual + } +} + +impl Index for Solution { + type Output = f64; + fn index(&self, col: Col) -> &f64 { + &self.colvalue[col.0] + } +} + +fn try_handle_status(status: c_int, msg: &str) -> Result { + let status_enum = HighsStatus::try_from(status).expect( + "HiGHS returned an unexpected status value. Please report it as a bug to https://github.com/rust-or/highs/issues", + ); + match status_enum { + status @ HighsStatus::OK => Ok(status), + status @ HighsStatus::Warning => { + log::warn!("HiGHS emitted a warning: {}", msg); + Ok(status) + } + error => Err(error), + } +} + +/// Releases all resources held by the global scheduler instance. +/// +/// It is not thread-safe to call this function while calling Highs_run or one of the Highs_XXXcall +/// methods on any other Highs instance in any thread. +/// +/// After this function has terminated, it is guaranteed that eventually all previously created scheduler +/// threads will terminate and allocated memory will be released. +/// +/// After this function has returned, the option value for the number of threads may be altered to a new +/// value before the next call to Highs_run or one of the Highs_XXXcall methods. +/// +/// * param : blocking If the blocking parameter has a nonzero value, then this function will not return +/// until all memory is freed, which might be desirable when debugging heap memory, but it +/// requires the calling thread to wait for all scheduler threads to wake-up which is usually +/// not necessary. +/// +/// * return : No status is returned since the function call cannot fail. Calling this function while +/// any Highs instance is in use on any thread is undefined behavior and may cause crashes, +/// but cannot be detected and hence is fully in the callers responsibility. +/// +/// note: this should be invoked when using multiple cores/threads to avoid memory leaks +pub unsafe fn highs_release_resources(blocking: bool) { + Highs_resetGlobalScheduler(match blocking { + true => 1, + false => 0, + }); +} + +#[cfg(test)] +mod test { + use super::*; + + fn test_coefs(coefs: [f64; 2]) { + // See: https://github.com/rust-or/highs/issues/5 + let mut problem = RowProblem::new(); + // Minimize x + y subject to x ≥ 0, y ≥ 0. + let x = problem.add_column(1., -1..); + let y = problem.add_column(1., 0..); + problem.add_row(..1, [x, y].iter().copied().zip(coefs)); // 1 ≥ x + c y. + let solution = problem.optimise(Sense::Minimise).solve().get_solution(); + assert_eq!([-1., 0.], solution.columns()); + } + + #[test] + fn test_single_zero_coef() { + test_coefs([1.0, 0.0]); + test_coefs([0.0, 1.0]); + } + + #[test] + fn test_all_zero_coefs() { + test_coefs([0.0, 0.0]) + } + + #[test] + fn test_no_zero_coefs() { + test_coefs([1.0, 1.0]) + } + + #[test] + fn test_infeasible_empty_row() { + let mut problem = RowProblem::new(); + let row_factors: &[(Col, f64)] = &[]; + problem.add_row(2..3, row_factors); + let _ = problem.optimise(Sense::Minimise).try_solve(); + } + + #[test] + fn test_add_row_and_col() { + let mut model = Model::new::>(Problem::default()); + let col = model.add_col(1., 1.0.., vec![]); + model.add_row(..1.0, vec![(col, 1.0)]); + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Optimal); + let solution = solved.get_solution(); + assert_eq!(solution.columns(), vec![1.0]); + + let mut model = Model::from(solved); + let new_col = model.add_col(1., ..1.0, vec![]); + model.add_row(2.0.., vec![(new_col, 1.0)]); + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Infeasible); + } +} diff --git a/src/highs/src/matrix_col.rs b/src/highs/src/matrix_col.rs new file mode 100644 index 00000000..570e0dc6 --- /dev/null +++ b/src/highs/src/matrix_col.rs @@ -0,0 +1,110 @@ +//! col-oriented matrix to build a problem variable by variable +use std::borrow::Borrow; +use std::convert::TryInto; +use std::ops::RangeBounds; +use std::os::raw::c_int; + +use crate::Problem; + +/// Represents a constraint +#[derive(Debug, Clone, Copy)] +pub struct Row(pub(crate) c_int); + +/// A constraint matrix to build column-by-column +#[derive(Debug, Clone, PartialEq, Default)] +pub struct ColMatrix { + // column-wise sparse constraints matrix + pub(crate) astart: Vec, + pub(crate) aindex: Vec, + pub(crate) avalue: Vec, +} + +/// To use these functions, you need to first add all your constraints, and then add variables +/// one by one using the [Row] objects. +impl Problem { + /// Add a row (a constraint) to the problem. + /// The concrete factors are added later, when creating columns. + pub fn add_row + Copy, B: RangeBounds>(&mut self, bounds: B) -> Row { + self.add_row_inner(bounds) + } + + /// Add a continuous variable to the problem. + /// - `col_factor` represents the factor in front of the variable in the objective function. + /// - `bounds` represents the maximal and minimal allowed values of the variable. + /// - `row_factors` defines how much this variable weights in each constraint. + /// + /// ``` + /// use highs::{ColProblem, Sense}; + /// let mut pb = ColProblem::new(); + /// let constraint = pb.add_row(..=5); // adds a constraint that cannot take a value over 5 + /// // add a variable that has a coefficient 2 in the objective function, is >=0, and has a coefficient + /// // 2 in the constraint + /// pb.add_column(2., 0.., &[(constraint, 2.)]); + /// ``` + pub fn add_column< + N: Into + Copy, + B: RangeBounds, + ITEM: Borrow<(Row, f64)>, + I: IntoIterator, + >( + &mut self, + col_factor: f64, + bounds: B, + row_factors: I, + ) { + self.add_column_with_integrality(col_factor, bounds, row_factors, false); + } + + /// Same as add_column, but forces the solution to contain an integer value for this variable. + /// + /// ``` + /// use highs::{ColProblem, Sense}; + /// let mut pb = ColProblem::new(); + /// let constraint = pb.add_row(..=5); // adds a constraint that cannot take a value over 5 + /// // add an integer variable that has a coefficient 2 in the objective function, is >=0, and has a coefficient + /// // 2 in the constraint + /// pb.add_integer_column(2., 0.., &[(constraint, 2.)]); + /// ``` + pub fn add_integer_column< + N: Into + Copy, + B: RangeBounds, + ITEM: Borrow<(Row, f64)>, + I: IntoIterator, + >( + &mut self, + col_factor: f64, + bounds: B, + row_factors: I, + ) { + self.add_column_with_integrality(col_factor, bounds, row_factors, true); + } + + /// Same as add_column, but lets you define whether the new variable should be integral or continuous. + #[inline] + pub fn add_column_with_integrality< + N: Into + Copy, + B: RangeBounds, + ITEM: Borrow<(Row, f64)>, + I: IntoIterator, + >( + &mut self, + col_factor: f64, + bounds: B, + row_factors: I, + is_integer: bool, + ) { + self.matrix + .astart + .push(self.matrix.aindex.len().try_into().unwrap()); + let iter = row_factors.into_iter(); + let (size, _) = iter.size_hint(); + self.matrix.aindex.reserve(size); + self.matrix.avalue.reserve(size); + for r in iter { + let &(row, factor) = r.borrow(); + self.matrix.aindex.push(row.0); + self.matrix.avalue.push(factor); + } + self.add_column_inner(col_factor, bounds, is_integer); + } +} diff --git a/src/highs/src/matrix_row.rs b/src/highs/src/matrix_row.rs new file mode 100644 index 00000000..6f27fd33 --- /dev/null +++ b/src/highs/src/matrix_row.rs @@ -0,0 +1,174 @@ +//! row-oriented matrix to build a problem constraint by constraint +use std::borrow::Borrow; +use std::convert::TryInto; +use std::ops::RangeBounds; +use std::os::raw::c_int; + +use crate::matrix_col::ColMatrix; +use crate::Problem; + +/// Represents a variable +#[derive(Debug, Clone, Copy)] +pub struct Col(pub(crate) usize); + +/// A complete optimization problem stored by row +#[derive(Debug, Clone, PartialEq, Default)] +pub struct RowMatrix { + /// column-wise sparse constraints matrix + /// Each element in the outer vector represents a column (a variable) + columns: Vec<(Vec, Vec)>, +} + +/// Functions to use when first declaring variables, then constraints. +impl Problem { + /// add a variable to the problem. + /// - `col_factor` is the coefficient in front of the variable in the objective function. + /// - `bounds` are the maximal and minimal values that the variable can take. + pub fn add_column + Copy, B: RangeBounds>( + &mut self, + col_factor: f64, + bounds: B, + ) -> Col { + self.add_column_with_integrality(col_factor, bounds, false) + } + + /// Same as add_column, but forces the solution to contain an integer value for this variable. + pub fn add_integer_column + Copy, B: RangeBounds>( + &mut self, + col_factor: f64, + bounds: B, + ) -> Col { + self.add_column_with_integrality(col_factor, bounds, true) + } + + /// Same as add_column, but lets you define whether the new variable should be integral or continuous. + #[inline] + pub fn add_column_with_integrality + Copy, B: RangeBounds>( + &mut self, + col_factor: f64, + bounds: B, + is_integer: bool, + ) -> Col { + let col = Col(self.num_cols()); + self.add_column_inner(col_factor, bounds, is_integer); + self.matrix.columns.push((vec![], vec![])); + col + } + + /// Add a constraint to the problem. + /// - `bounds` are the maximal and minimal allowed values for the linear expression in the constraint + /// - `row_factors` are the coefficients in the linear expression expressing the constraint + /// + /// ``` + /// use highs::*; + /// let mut pb = RowProblem::new(); + /// // Optimize 3x - 2y with x<=6 and y>=5 + /// let x = pb.add_column(3., ..6); + /// let y = pb.add_column(-2., 5..); + /// pb.add_row(2.., &[(x, 3.), (y, 8.)]); // 2 <= x*3 + y*8 + /// ``` + pub fn add_row< + N: Into + Copy, + B: RangeBounds, + ITEM: Borrow<(Col, f64)>, + I: IntoIterator, + >( + &mut self, + bounds: B, + row_factors: I, + ) { + let num_rows: c_int = self.num_rows().try_into().expect("too many rows"); + for r in row_factors { + let &(col, factor) = r.borrow(); + let c = &mut self.matrix.columns[col.0]; + c.0.push(num_rows); + c.1.push(factor); + } + self.add_row_inner(bounds); + } +} + +impl From for ColMatrix { + fn from(m: RowMatrix) -> Self { + let mut astart = Vec::with_capacity(m.columns.len()); + astart.push(0); + let size: usize = m.columns.iter().map(|(v, _)| v.len()).sum(); + let mut aindex = Vec::with_capacity(size); + let mut avalue = Vec::with_capacity(size); + for (row_indices, factors) in m.columns { + aindex.extend_from_slice(&row_indices); + avalue.extend_from_slice(&factors); + astart.push(aindex.len().try_into().expect("invalid matrix size")); + } + Self { + astart, + aindex, + avalue, + } + } +} + +#[allow(clippy::float_cmp)] +#[test] +fn test_conversion() { + use crate::status::HighsModelStatus::Optimal; + use crate::{ColProblem, Model, RowProblem, Sense}; + let inf = f64::INFINITY; + let neg_inf = f64::NEG_INFINITY; + let mut p = RowProblem::default(); + let x: Col = p.add_column(1., -1..2); + let y: Col = p.add_column(9., 4f64..inf); + p.add_row(-999f64..inf, &[(x, 666.), (y, 777.)]); + p.add_row(neg_inf..8880f64, &[(y, 888.)]); + assert_eq!( + p, + RowProblem { + colcost: vec![1., 9.], + collower: vec![-1., 4.], + colupper: vec![2., inf], + rowlower: vec![-999., neg_inf], + rowupper: vec![inf, 8880.], + integrality: None, + matrix: RowMatrix { + columns: vec![(vec![0], vec![666.]), (vec![0, 1], vec![777., 888.])], + }, + } + ); + let colpb = ColProblem::from(p.clone()); + assert_eq!( + colpb, + ColProblem { + colcost: vec![1., 9.], + collower: vec![-1., 4.], + colupper: vec![2., inf], + rowlower: vec![-999., neg_inf], + rowupper: vec![inf, 8880.], + integrality: None, + matrix: ColMatrix { + astart: vec![0, 1, 3], + aindex: vec![0, 0, 1], + avalue: vec![666., 777., 888.], + }, + } + ); + let mut m = Model::new(p); + m.make_quiet(); + m.set_sense(Sense::Maximise); + let solved = m.solve(); + assert_eq!(solved.status(), Optimal); + assert_eq!(solved.get_solution().columns(), &[2., 10.]); +} + +impl From> for Problem { + fn from(pb: Problem) -> Problem { + Self { + colcost: pb.colcost, + collower: pb.collower, + colupper: pb.colupper, + rowlower: pb.rowlower, + rowupper: pb.rowupper, + integrality: pb.integrality, + matrix: pb.matrix.into(), + } + } +} diff --git a/src/highs/src/options.rs b/src/highs/src/options.rs new file mode 100644 index 00000000..41309ab8 --- /dev/null +++ b/src/highs/src/options.rs @@ -0,0 +1,42 @@ +use std::ffi::{c_void, CString, CStr}; +use std::os::raw::{c_char, c_int}; + +pub trait HighsOptionValue { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int; +} + +impl HighsOptionValue for bool { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int { + highs_sys::Highs_setBoolOptionValue(highs, option, if self { 1 } else { 0 }) + } +} + +impl HighsOptionValue for i32 { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int { + highs_sys::Highs_setIntOptionValue(highs, option, self) + } +} + +impl HighsOptionValue for f64 { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int { + highs_sys::Highs_setDoubleOptionValue(highs, option, self) + } +} + +impl<'a> HighsOptionValue for &'a CStr { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int { + highs_sys::Highs_setStringOptionValue(highs, option, self.as_ptr()) + } +} + +impl<'a> HighsOptionValue for &'a [u8] { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int { + CString::new(self).expect("invalid highs option value").apply_to_highs(highs, option) + } +} + +impl<'a> HighsOptionValue for &'a str { + unsafe fn apply_to_highs(self, highs: *mut c_void, option: *const c_char) -> c_int { + self.as_bytes().apply_to_highs(highs, option) + } +} diff --git a/src/highs/src/status.rs b/src/highs/src/status.rs new file mode 100644 index 00000000..6b317861 --- /dev/null +++ b/src/highs/src/status.rs @@ -0,0 +1,121 @@ +use std::convert::TryFrom; +use std::fmt::{Debug, Formatter}; +use std::num::TryFromIntError; +use std::os::raw::c_int; + +use highs_sys::*; + +/// The kinds of results of an optimization +#[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Ord, Eq)] +pub enum HighsModelStatus { + /// not initialized + NotSet = MODEL_STATUS_NOTSET as isize, + /// Unable to load model + LoadError = MODEL_STATUS_LOAD_ERROR as isize, + /// invalid model + ModelError = MODEL_STATUS_MODEL_ERROR as isize, + /// Unable to run the pre-solve phase + PresolveError = MODEL_STATUS_PRESOLVE_ERROR as isize, + /// Unable to solve + SolveError = MODEL_STATUS_SOLVE_ERROR as isize, + /// Unable to clean after solve + PostsolveError = MODEL_STATUS_POSTSOLVE_ERROR as isize, + /// No variables in the model: nothing to optimize + /// ``` + /// use highs::*; + /// let solved = ColProblem::new().optimise(Sense::Maximise).solve(); + /// assert_eq!(solved.status(), HighsModelStatus::ModelEmpty); + /// ``` + ModelEmpty = MODEL_STATUS_MODEL_EMPTY as isize, + /// There is no solution to the problem + Infeasible = MODEL_STATUS_INFEASIBLE as isize, + /// The problem in unbounded or infeasible + UnboundedOrInfeasible = MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE as isize, + /// The problem is unbounded: there is no single optimal value + Unbounded = MODEL_STATUS_UNBOUNDED as isize, + /// An optimal solution was found + Optimal = MODEL_STATUS_OPTIMAL as isize, + /// objective bound + ObjectiveBound = MODEL_STATUS_OBJECTIVE_BOUND as isize, + /// objective target + ObjectiveTarget = MODEL_STATUS_OBJECTIVE_TARGET as isize, + /// reached limit + ReachedTimeLimit = MODEL_STATUS_REACHED_TIME_LIMIT as isize, + /// reached limit + ReachedIterationLimit = MODEL_STATUS_REACHED_ITERATION_LIMIT as isize, + /// Unknown model status + Unknown = MODEL_STATUS_UNKNOWN as isize, +} + +/// This error should never happen: an unexpected status was returned +#[derive(PartialEq, Clone, Copy)] +pub struct InvalidStatus(pub c_int); + +impl Debug for InvalidStatus { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{} is not a valid HiGHS model status. \ + This error comes from a bug in highs rust bindings. \ + Please report it.", + self.0 + ) + } +} + +impl TryFrom for HighsModelStatus { + type Error = InvalidStatus; + + fn try_from(value: c_int) -> Result { + use highs_sys::*; + match value { + MODEL_STATUS_NOTSET => Ok(Self::NotSet), + MODEL_STATUS_LOAD_ERROR => Ok(Self::LoadError), + MODEL_STATUS_MODEL_ERROR => Ok(Self::ModelError), + MODEL_STATUS_PRESOLVE_ERROR => Ok(Self::PresolveError), + MODEL_STATUS_SOLVE_ERROR => Ok(Self::SolveError), + MODEL_STATUS_POSTSOLVE_ERROR => Ok(Self::PostsolveError), + MODEL_STATUS_MODEL_EMPTY => Ok(Self::ModelEmpty), + MODEL_STATUS_INFEASIBLE => Ok(Self::Infeasible), + MODEL_STATUS_UNBOUNDED => Ok(Self::Unbounded), + MODEL_STATUS_UNBOUNDED_OR_INFEASIBLE => Ok(Self::UnboundedOrInfeasible), + MODEL_STATUS_OPTIMAL => Ok(Self::Optimal), + MODEL_STATUS_OBJECTIVE_BOUND => Ok(Self::ObjectiveBound), + MODEL_STATUS_OBJECTIVE_TARGET => Ok(Self::ObjectiveTarget), + MODEL_STATUS_REACHED_TIME_LIMIT => Ok(Self::ReachedTimeLimit), + MODEL_STATUS_REACHED_ITERATION_LIMIT => Ok(Self::ReachedIterationLimit), + MODEL_STATUS_UNKNOWN => Ok(Self::Unknown), + n => Err(InvalidStatus(n)), + } + } +} + +/// The status of a highs operation +#[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Ord, Eq)] +pub enum HighsStatus { + /// Success + OK = 0, + /// Done, with warning + Warning = 1, + /// An error occurred + Error = 2, +} + +impl From for HighsStatus { + fn from(_: TryFromIntError) -> Self { + Self::Error + } +} + +impl TryFrom for HighsStatus { + type Error = InvalidStatus; + + fn try_from(value: c_int) -> Result { + match value { + STATUS_OK => Ok(Self::OK), + STATUS_WARNING => Ok(Self::Warning), + STATUS_ERROR => Ok(Self::Error), + n => Err(InvalidStatus(n)), + } + } +} diff --git a/src/invalid_subgraph.rs b/src/invalid_subgraph.rs index f459bccb..fa7ff40c 100644 --- a/src/invalid_subgraph.rs +++ b/src/invalid_subgraph.rs @@ -1,5 +1,6 @@ use crate::decoding_hypergraph::*; use crate::derivative::Derivative; +use crate::dual_module::DualModuleImpl; use crate::matrix::*; use crate::plugin::EchelonMatrix; use crate::util::*; @@ -9,6 +10,12 @@ use std::collections::BTreeSet; use std::hash::{Hash, Hasher}; use std::sync::Arc; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak}; + + /// an invalid subgraph $S = (V_S, E_S)$, also store the hair $\delta(S)$ #[derive(Clone, PartialEq, Eq, Derivative)] #[derivative(Debug)] @@ -16,12 +23,12 @@ pub struct InvalidSubgraph { /// the hash value calculated by other fields #[derivative(Debug = "ignore")] pub hash_value: u64, - /// subset of vertices - pub vertices: BTreeSet, - /// subset of edges - pub edges: BTreeSet, + /// subset of vertex weak pointers, nota that the vertex struct is from dual_module_pq + pub vertices: BTreeSet, + /// subset of edge weak pointers, note that the edge struct is from dual_module_pq + pub edges: BTreeSet, /// the hair of the invalid subgraph, to avoid repeated computation - pub hair: BTreeSet, + pub hair: BTreeSet, } impl Hash for InvalidSubgraph { @@ -39,6 +46,18 @@ impl Ord for InvalidSubgraph { } else { // rare cases: same hash value but different state (&self.vertices, &self.edges, &self.hair).cmp(&(&other.vertices, &other.edges, &other.hair)) + // // Compare vertices, then edges, then hair + // let vertices_cmp = self.vertices.iter().cmp(other.vertices.iter()); + // if vertices_cmp != Ordering::Equal { + // return vertices_cmp; + // } + + // let edges_cmp = self.edges.iter().cmp(other.edges.iter()); + // if edges_cmp != Ordering::Equal { + // return edges_cmp; + // } + + // self.hair.iter().cmp(other.hair.iter()) } } } @@ -50,47 +69,73 @@ impl PartialOrd for InvalidSubgraph { } impl InvalidSubgraph { - /// construct an invalid subgraph using only $E_S$, and constructing the $V_S$ by $\cup E_S$ + /// construct an invalid subgraph using only $E_S$, and constructing the $V_S$ by $\cup E_S$ for given dual_module + /// the invalid subgraph generated is a local graph if the decoding_graph is a local graph + /// delete decoding_graph: &DecodingHyperGraph when release, it is here merely to run sanity_check() #[allow(clippy::unnecessary_cast)] - pub fn new(edges: BTreeSet, decoding_graph: &DecodingHyperGraph) -> Self { - let mut vertices = BTreeSet::new(); - for &edge_index in edges.iter() { - let hyperedge = &decoding_graph.model_graph.initializer.weighted_edges[edge_index as usize]; - for &vertex_index in hyperedge.vertices.iter() { - vertices.insert(vertex_index); + pub fn new(edges: &BTreeSet) -> Self { + // println!("edges input: {:?}", edges); + let mut vertices: BTreeSet = BTreeSet::new(); + for edge_ptr in edges.iter() { + for vertex_ptr in edge_ptr.get_vertex_neighbors().iter() { + vertices.insert(vertex_ptr.upgrade_force().clone()); } } - Self::new_complete(vertices, edges, decoding_graph) + // let mut vertices: BTreeSet = BTreeSet::new(); + // for edge_ptr in edges.iter() { + // for vertex_ptr in edge_ptr.read_recursive().vertices.iter() { + // vertices.insert(vertex_ptr.upgrade_force().clone()); + // } + // } + Self::new_complete(&vertices, edges) } /// complete definition of invalid subgraph $S = (V_S, E_S)$ #[allow(clippy::unnecessary_cast)] pub fn new_complete( - vertices: BTreeSet, - edges: BTreeSet, - decoding_graph: &DecodingHyperGraph, + vertices: &BTreeSet, + edges: &BTreeSet ) -> Self { - let mut hair = BTreeSet::new(); - for &vertex_index in vertices.iter() { - let vertex = &decoding_graph.model_graph.vertices[vertex_index as usize]; - for &edge_index in vertex.edges.iter() { - if !edges.contains(&edge_index) { - hair.insert(edge_index); + // current implementation with using helper function + // println!("input vertex to new_complete: {:?}", vertices); + let mut hair: BTreeSet = BTreeSet::new(); + for vertex_ptr in vertices.iter() { + // println!("vertex index in new_complete: {:?}", vertex_ptr.read_recursive().vertex_index); + for edge_ptr in vertex_ptr.get_edge_neighbors().iter() { + // println!("edges near vertex {:?}", edge_ptr.upgrade_force().read_recursive().edge_index); + if !edges.contains(&edge_ptr.upgrade_force()) { + hair.insert(edge_ptr.upgrade_force()); } } } - let invalid_subgraph = Self::new_raw(vertices, edges, hair); - debug_assert_eq!(invalid_subgraph.sanity_check(decoding_graph), Ok(())); + let invalid_subgraph = Self::new_raw(vertices, edges, &hair); + // debug_assert_eq!(invalid_subgraph.sanity_check(decoding_graph), Ok(())); invalid_subgraph + + // previous implementation with directly finding the incident edges of a vertex + // // println!("input vertex to new_complete: {:?}", vertices); + // let mut hair: BTreeSet = BTreeSet::new(); + // for vertex_ptr in vertices.iter() { + // // println!("vertex index in new_complete: {:?}", vertex_ptr.read_recursive().vertex_index); + // for edge_ptr in vertex_ptr.read_recursive().edges.iter() { + // // println!("edges near vertex {:?}", edge_ptr.upgrade_force().read_recursive().edge_index); + // if !edges.contains(&edge_ptr.upgrade_force()) { + // hair.insert(edge_ptr.upgrade_force()); + // } + // } + // } + // let invalid_subgraph = Self::new_raw(vertices, edges, &hair); + // // debug_assert_eq!(invalid_subgraph.sanity_check(decoding_graph), Ok(())); + // invalid_subgraph } /// create $S = (V_S, E_S)$ and $\delta(S)$ directly, without any checks - pub fn new_raw(vertices: BTreeSet, edges: BTreeSet, hair: BTreeSet) -> Self { + pub fn new_raw(vertices: &BTreeSet, edges: &BTreeSet, hair: &BTreeSet) -> Self { let mut invalid_subgraph = Self { hash_value: 0, - vertices, - edges, - hair, + vertices: vertices.clone(), + edges: edges.clone(), + hair: hair.clone(), }; invalid_subgraph.update_hash(); invalid_subgraph @@ -98,6 +143,9 @@ impl InvalidSubgraph { pub fn update_hash(&mut self) { let mut hasher = DefaultHasher::new(); + // let _ = self.vertices.iter().map(|e|e.hash(&mut hasher)); + // let _ = self.edges.iter().map(|e|e.hash(&mut hasher)); + // let _ = self.hair.iter().map(|e|e.hash(&mut hasher)); self.vertices.hash(&mut hasher); self.edges.hash(&mut hasher); self.hair.hash(&mut hasher); @@ -111,57 +159,64 @@ impl InvalidSubgraph { return Err("an invalid subgraph must contain at least one vertex".to_string()); } // check if all vertices are valid - for &vertex_index in self.vertices.iter() { + for vertex_ptr in self.vertices.iter() { + let vertex_index = vertex_ptr.read_recursive().vertex_index; if vertex_index >= decoding_graph.model_graph.initializer.vertex_num { return Err(format!("vertex {vertex_index} is not a vertex in the model graph")); } } // check if every edge is subset of its vertices - for &edge_index in self.edges.iter() { + for edge_ptr in self.edges.iter() { + let edge = edge_ptr.read_recursive(); + let edge_index = edge.edge_index; if edge_index as usize >= decoding_graph.model_graph.initializer.weighted_edges.len() { return Err(format!("edge {edge_index} is not an edge in the model graph")); } - let hyperedge = &decoding_graph.model_graph.initializer.weighted_edges[edge_index as usize]; - for &vertex_index in hyperedge.vertices.iter() { - if !self.vertices.contains(&vertex_index) { + // let hyperedge = &decoding_graph.model_graph.initializer.weighted_edges[edge_index as usize]; + for vertex_weak in edge.vertices.iter() { + if !self.vertices.contains(&vertex_weak.upgrade_force()) { return Err(format!( "hyperedge {edge_index} connects vertices {:?}, \ - but vertex {vertex_index} is not in the invalid subgraph vertices {:?}", - hyperedge.vertices, self.vertices + but vertex {:?} is not in the invalid subgraph vertices {:?}", + edge.vertices, vertex_weak.upgrade_force().read_recursive().vertex_index, self.vertices )); } } } // check the edges indeed cannot satisfy the requirement of the vertices let mut matrix = Echelon::::new(); - for &edge_index in self.edges.iter() { - matrix.add_variable(edge_index); + for edge_ptr in self.edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - for &vertex_index in self.vertices.iter() { - let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); - let parity = decoding_graph.is_vertex_defect(vertex_index); - matrix.add_constraint(vertex_index, incident_edges, parity); + for vertex_ptr in self.vertices.iter() { + let vertex = vertex_ptr.read_recursive(); + let incident_edges = &vertex.edges; + let parity = vertex.is_defect; + matrix.add_constraint(vertex_ptr.downgrade(), &incident_edges, parity); } if matrix.get_echelon_info().satisfiable { + let temp = matrix.get_solution().unwrap().into_iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(); return Err(format!( "it's a valid subgraph because edges {:?} ⊆ {:?} can satisfy the parity requirement from vertices {:?}", - matrix.get_solution().unwrap(), - self.edges, - self.vertices + temp, + self.edges.iter().map(|e| e.upgradable_read().edge_index).collect::>(), + self.vertices.iter().map(|e| e.upgradable_read().vertex_index).collect::>(), )); } Ok(()) } - pub fn generate_matrix(&self, decoding_graph: &DecodingHyperGraph) -> EchelonMatrix { + pub fn generate_matrix(&self) -> EchelonMatrix { let mut matrix = EchelonMatrix::new(); - for &edge_index in self.hair.iter() { - matrix.add_variable(edge_index); + for edge_ptr in self.hair.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - for &vertex_index in self.vertices.iter() { - let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); - let parity = decoding_graph.is_vertex_defect(vertex_index); - matrix.add_constraint(vertex_index, incident_edges, parity); + for vertex_ptr in self.vertices.iter() { + let vertex = vertex_ptr.read_recursive(); + // let incident_edges = &vertex.edges; + let incident_edges = &vertex_ptr.get_edge_neighbors(); + let parity = vertex.is_defect; + matrix.add_constraint(vertex_ptr.downgrade(), &incident_edges, parity); } matrix } @@ -169,103 +224,168 @@ impl InvalidSubgraph { // shortcuts for easier code writing at debugging impl InvalidSubgraph { - pub fn new_ptr(edges: BTreeSet, decoding_graph: &DecodingHyperGraph) -> Arc { - Arc::new(Self::new(edges, decoding_graph)) + pub fn new_ptr(edges: &BTreeSet) -> Arc { + Arc::new(Self::new(edges)) } - pub fn new_vec_ptr(edges: &[EdgeIndex], decoding_graph: &DecodingHyperGraph) -> Arc { - Self::new_ptr(edges.iter().cloned().collect(), decoding_graph) + pub fn new_vec_ptr(edges: &[EdgePtr]) -> Arc { + let strong_edges: BTreeSet = edges.iter().cloned().collect(); + Self::new_ptr(&strong_edges) } pub fn new_complete_ptr( - vertices: BTreeSet, - edges: BTreeSet, - decoding_graph: &DecodingHyperGraph, + vertices: &BTreeSet, + edges: &BTreeSet ) -> Arc { - Arc::new(Self::new_complete(vertices, edges, decoding_graph)) + Arc::new(Self::new_complete(vertices, edges)) } pub fn new_complete_vec_ptr( - vertices: BTreeSet, - edges: &[EdgeIndex], - decoding_graph: &DecodingHyperGraph, + vertices: &BTreeSet, + edges: &[EdgePtr], ) -> Arc { + // let strong_edges = edges.iter() + // .filter_map(|weak_edge| weak_edge.upgrade()) + // .collect(); + let strong_edges: BTreeSet = edges.iter().cloned().collect(); Self::new_complete_ptr( - vertices.iter().cloned().collect(), - edges.iter().cloned().collect(), - decoding_graph, + vertices, + &strong_edges ) } } +/// below are the original test based on indices, now we cannot test invalid subgraph alone since any invalid subgraph requires +/// the VertexPtr and EdgePtr created at the initialization of dual_module_pq. + #[cfg(test)] pub mod tests { use super::*; use crate::decoding_hypergraph::tests::*; + use num_traits::Zero; + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; + use crate::num_traits::FromPrimitive; + use std::collections::HashSet; #[test] fn invalid_subgraph_good() { // cargo test invalid_subgraph_good -- --nocapture let visualize_filename = "invalid_subgraph_good.json".to_string(); let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); - let invalid_subgraph_1 = InvalidSubgraph::new(vec![13].into_iter().collect(), decoding_graph.as_ref()); + let initializer = &decoding_graph.model_graph.initializer; + // create vertices + let vertices: Vec = (0..initializer.vertex_num) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + // set defect vertices + vertices[7].write().is_defect = true; + vertices[1].write().is_defect = true; + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // set edges + let mut edges = Vec::::new(); + for hyperedge in initializer.weighted_edges.iter() { + let edge_ptr = EdgePtr::new_value(Edge { + edge_index: edges.len() as EdgeIndex, + weight: Rational::from_usize(hyperedge.weight).unwrap(), + dual_nodes: vec![], + vertices: hyperedge + .vertices + .iter() + .map(|i| vertices[*i as usize].downgrade()) + .collect::>(), + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + for &vertex_index in hyperedge.vertices.iter() { + vertices[vertex_index as usize].write().edges.push(edge_ptr.downgrade()); + } + edges.push(edge_ptr); + } + + let mut invalid_subgraph_edges = BTreeSet::new(); + invalid_subgraph_edges.insert(edges[13].clone()); + + let invalid_subgraph_1 = InvalidSubgraph::new(&invalid_subgraph_edges); println!("invalid_subgraph_1: {invalid_subgraph_1:?}"); - assert_eq!(invalid_subgraph_1.vertices, vec![2, 6, 7].into_iter().collect()); - assert_eq!(invalid_subgraph_1.edges, vec![13].into_iter().collect()); + + let temp_vertices: HashSet<_> = invalid_subgraph_1.vertices.into_iter().map(|v| v.read_recursive().vertex_index).collect(); + let temp_edges: HashSet<_> = invalid_subgraph_1.edges.into_iter().map(|e| e.read_recursive().edge_index).collect(); + let temp_hair: HashSet<_> = invalid_subgraph_1.hair.into_iter().map(|e| e.read_recursive().edge_index).collect(); + + assert_eq!(temp_vertices, [2, 6, 7].into()); + assert_eq!(temp_edges, [13].into()); assert_eq!( - invalid_subgraph_1.hair, - vec![5, 6, 9, 10, 11, 12, 14, 15, 16, 17].into_iter().collect() + temp_hair, + [5, 6, 9, 10, 11, 12, 14, 15, 16, 17].into() ); } - #[test] - #[should_panic] - fn invalid_subgraph_bad() { - // cargo test invalid_subgraph_bad -- --nocapture - let visualize_filename = "invalid_subgraph_bad.json".to_string(); - let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); - let invalid_subgraph = InvalidSubgraph::new(vec![6, 10].into_iter().collect(), decoding_graph.as_ref()); - println!("invalid_subgraph: {invalid_subgraph:?}"); // should not print because it panics - } +// #[test] +// #[should_panic] +// fn invalid_subgraph_bad() { +// // cargo test invalid_subgraph_bad -- --nocapture +// let visualize_filename = "invalid_subgraph_bad.json".to_string(); +// let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); +// let invalid_subgraph = InvalidSubgraph::new(vec![6, 10].into_iter().collect(), decoding_graph.as_ref()); +// println!("invalid_subgraph: {invalid_subgraph:?}"); // should not print because it panics +// } - pub fn get_default_hash_value(object: &impl Hash) -> u64 { - let mut hasher = DefaultHasher::new(); - object.hash(&mut hasher); - hasher.finish() - } +// pub fn get_default_hash_value(object: &impl Hash) -> u64 { +// let mut hasher = DefaultHasher::new(); +// object.hash(&mut hasher); +// hasher.finish() +// } - #[test] - fn invalid_subgraph_hash() { - // cargo test invalid_subgraph_hash -- --nocapture - let vertices: BTreeSet = [1, 2, 3].into(); - let edges: BTreeSet = [4, 5].into(); - let hair: BTreeSet = [6, 7, 8].into(); - let invalid_subgraph_1 = InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), hair.clone()); - let invalid_subgraph_2 = InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), hair.clone()); - assert_eq!(invalid_subgraph_1, invalid_subgraph_2); - // they should have the same hash value - assert_eq!( - get_default_hash_value(&invalid_subgraph_1), - get_default_hash_value(&invalid_subgraph_1.hash_value) - ); - assert_eq!( - get_default_hash_value(&invalid_subgraph_1), - get_default_hash_value(&invalid_subgraph_2) - ); - // the pointer should also have the same hash value - let ptr_1 = Arc::new(invalid_subgraph_1.clone()); - let ptr_2 = Arc::new(invalid_subgraph_2); - assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_1.hash_value)); - assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_2)); - // any different value would generate a different invalid subgraph - assert_ne!( - invalid_subgraph_1, - InvalidSubgraph::new_raw([1, 2].into(), edges.clone(), hair.clone()) - ); - assert_ne!( - invalid_subgraph_1, - InvalidSubgraph::new_raw(vertices.clone(), [4, 5, 6].into(), hair.clone()) - ); - assert_ne!( - invalid_subgraph_1, - InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), [6, 7].into()) - ); - } +// #[test] +// fn invalid_subgraph_hash() { +// // cargo test invalid_subgraph_hash -- --nocapture +// let vertices: BTreeSet = [1, 2, 3].into(); +// let edges: BTreeSet = [4, 5].into(); +// let hair: BTreeSet = [6, 7, 8].into(); +// let invalid_subgraph_1 = InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), hair.clone()); +// let invalid_subgraph_2 = InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), hair.clone()); +// assert_eq!(invalid_subgraph_1, invalid_subgraph_2); +// // they should have the same hash value +// assert_eq!( +// get_default_hash_value(&invalid_subgraph_1), +// get_default_hash_value(&invalid_subgraph_1.hash_value) +// ); +// assert_eq!( +// get_default_hash_value(&invalid_subgraph_1), +// get_default_hash_value(&invalid_subgraph_2) +// ); +// // the pointer should also have the same hash value +// let ptr_1 = Arc::new(invalid_subgraph_1.clone()); +// let ptr_2 = Arc::new(invalid_subgraph_2); +// assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_1.hash_value)); +// assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_2)); +// // any different value would generate a different invalid subgraph +// assert_ne!( +// invalid_subgraph_1, +// InvalidSubgraph::new_raw([1, 2].into(), edges.clone(), hair.clone()) +// ); +// assert_ne!( +// invalid_subgraph_1, +// InvalidSubgraph::new_raw(vertices.clone(), [4, 5, 6].into(), hair.clone()) +// ); +// assert_ne!( +// invalid_subgraph_1, +// InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), [6, 7].into()) +// ); +// } } diff --git a/src/lib.rs b/src/lib.rs index 6b1aeffe..060c7709 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ extern crate more_asserts; extern crate num_rational; extern crate num_traits; extern crate parking_lot; +extern crate core_affinity; #[cfg(feature = "cli")] extern crate pbr; extern crate prettytable; @@ -21,28 +22,35 @@ extern crate prettytable; extern crate pyo3; extern crate rand; extern crate rand_xoshiro; +#[cfg(feature = "slp")] extern crate slp; extern crate urlencoding; #[cfg(feature = "wasm_binding")] extern crate wasm_bindgen; +extern crate rayon; +extern crate weak_table; + #[cfg(feature = "cli")] pub mod cli; pub mod decoding_hypergraph; pub mod dual_module; pub mod dual_module_pq; pub mod dual_module_serial; +pub mod dual_module_parallel; pub mod example_codes; pub mod invalid_subgraph; pub mod matrix; pub mod model_hypergraph; pub mod mwpf_solver; +pub mod ordered_float; pub mod plugin; pub mod plugin_single_hair; pub mod plugin_union_find; pub mod pointers; pub mod primal_module; pub mod primal_module_serial; +pub mod primal_module_parallel; pub mod primal_module_union_find; pub mod relaxer; pub mod relaxer_forest; @@ -71,30 +79,32 @@ use wasm_bindgen::prelude::*; pub fn get_version() -> String { use decoding_hypergraph::*; use dual_module::*; - use dual_module_serial::*; + use dual_module_pq::*; use example_codes::*; use primal_module::*; use primal_module_serial::*; + use crate::util::Rational; + // TODO: I'm just testing basic functionality let defect_vertices = vec![23, 24, 29, 30]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); // create dual module let model_graph = code.get_model_graph(); - let mut dual_module = DualModuleSerial::new_empty(&model_graph.initializer); + let mut dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); // create primal module let mut primal_module = PrimalModuleSerial::new_empty(&model_graph.initializer); primal_module.growing_strategy = GrowingStrategy::SingleCluster; primal_module.plugins = std::sync::Arc::new(vec![]); // try to work on a simple syndrome let decoding_graph = DecodingHyperGraph::new_defects(model_graph, defect_vertices.clone()); - let interface_ptr = DualModuleInterfacePtr::new(decoding_graph.model_graph.clone()); + let interface_ptr = DualModuleInterfacePtr::new(); primal_module.solve_visualizer( &interface_ptr, decoding_graph.syndrome_pattern.clone(), &mut dual_module, None, ); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, 0); println!("subgraph: {subgraph:?}"); // env!("CARGO_PKG_VERSION").to_string() format!("subgraph: {subgraph:?}, weight_range: {weight_range:?}") diff --git a/src/main.rs b/src/main.rs index 2c0bedff..a3cf1f21 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,5 +5,8 @@ use crate::clap::Parser; use mwpf::cli::*; pub fn main() { + #[cfg(all(feature = "slp", feature = "incr_lp"))] + panic!("slp does not support incr_lp!"); + Cli::parse().run(); } diff --git a/src/matrix/basic.rs b/src/matrix/basic.rs index 4fd1e3ff..bdf88b10 100644 --- a/src/matrix/basic.rs +++ b/src/matrix/basic.rs @@ -5,52 +5,57 @@ use crate::util::*; use derivative::Derivative; use std::collections::{BTreeMap, BTreeSet}; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + #[derive(Clone, Derivative)] #[derivative(Default(new = "true"))] pub struct BasicMatrix { /// the vertices already maintained by this parity check - pub vertices: BTreeSet, + pub vertices: BTreeSet, /// the edges maintained by this parity check, mapping to the local indices - pub edges: BTreeMap, + pub edges: BTreeMap, /// variable index map to edge index - pub variables: Vec, + pub variables: Vec, pub constraints: Vec, } impl MatrixBasic for BasicMatrix { - fn add_variable(&mut self, edge_index: EdgeIndex) -> Option { - if self.edges.contains_key(&edge_index) { + fn add_variable(&mut self, edge_weak: EdgeWeak) -> Option { + if self.edges.contains_key(&edge_weak.upgrade_force()) { // variable already exists return None; } let var_index = self.variables.len(); - self.edges.insert(edge_index, var_index); - self.variables.push(edge_index); + self.edges.insert(edge_weak.upgrade_force(), var_index); + self.variables.push(edge_weak.clone()); ParityRow::add_one_variable(&mut self.constraints, self.variables.len()); Some(var_index) } fn add_constraint( &mut self, - vertex_index: VertexIndex, - incident_edges: &[EdgeIndex], + vertex_weak: VertexWeak, + incident_edges: &[EdgeWeak], parity: bool, ) -> Option> { - if self.vertices.contains(&vertex_index) { + if self.vertices.contains(&vertex_weak.upgrade_force()) { // no need to add repeat constraint return None; } let mut var_indices = None; - self.vertices.insert(vertex_index); - for &edge_index in incident_edges.iter() { - if let Some(var_index) = self.add_variable(edge_index) { + self.vertices.insert(vertex_weak.upgrade_force()); + for edge_weak in incident_edges.iter() { + if let Some(var_index) = self.add_variable(edge_weak.clone()) { // this is a newly added edge var_indices.get_or_insert_with(Vec::new).push(var_index); } } let mut row = ParityRow::new_length(self.variables.len()); - for &edge_index in incident_edges.iter() { - let var_index = self.edges[&edge_index]; + for edge_weak in incident_edges.iter() { + let var_index = self.edges[&edge_weak.upgrade_force()]; row.set_left(var_index, true); } row.set_right(parity); @@ -75,15 +80,15 @@ impl MatrixBasic for BasicMatrix { self.constraints[row].get_right() } - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex { - self.variables[var_index] + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak { + self.variables[var_index].clone() } - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option { - self.edges.get(&edge_index).cloned() + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option { + self.edges.get(&edge_weak.upgrade_force()).cloned() } - fn get_vertices(&self) -> BTreeSet { + fn get_vertices(&self) -> BTreeSet { self.vertices.clone() } } @@ -110,7 +115,13 @@ impl VizTrait for BasicMatrix { #[cfg(test)] pub mod tests { + use hashbrown::HashSet; + use num_traits::Zero; + use super::*; + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; + #[test] fn basic_matrix_1() { @@ -126,10 +137,45 @@ pub mod tests { └┴───┘ " ); - matrix.add_variable(1); - matrix.add_variable(4); - matrix.add_variable(12); - matrix.add_variable(345); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 12, 345].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_variable(edges[0].downgrade()); + matrix.add_variable(edges[1].downgrade()); + matrix.add_variable(edges[2].downgrade()); + matrix.add_variable(edges[3].downgrade()); matrix.printstd(); assert_eq!( matrix.printstd_str(), @@ -142,9 +188,9 @@ pub mod tests { └┴─┴─┴─┴─┴───┘ " ); - matrix.add_constraint(0, &[1, 4, 12], true); - matrix.add_constraint(1, &[4, 345], false); - matrix.add_constraint(2, &[1, 345], true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -162,17 +208,54 @@ pub mod tests { └─┴─┴─┴─┴─┴───┘ " ); - assert_eq!(matrix.get_vertices(), [0, 1, 2].into()); - assert_eq!(matrix.get_view_edges(), [1, 4, 12, 345]); + let matrix_vertices: HashSet<_> = matrix.get_vertices().into_iter().map(|v| v.upgradable_read().vertex_index).collect(); + assert_eq!(matrix_vertices, [0, 1, 2].into()); + assert_eq!(matrix.get_view_edges().into_iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [1, 4, 12, 345]); } #[test] fn basic_matrix_should_not_add_repeated_constraint() { // cargo test --features=colorful basic_matrix_should_not_add_repeated_constraint -- --nocapture let mut matrix = BasicMatrix::new(); - assert_eq!(matrix.add_constraint(0, &[1, 4, 8], false), Some(vec![0, 1, 2])); - assert_eq!(matrix.add_constraint(1, &[4, 8], true), None); - assert_eq!(matrix.add_constraint(0, &[4], true), None); // repeated + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 8].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + assert_eq!(matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], false), Some(vec![0, 1, 2])); + assert_eq!(matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[2].downgrade()], true), None); + assert_eq!(matrix.add_constraint(vertices[0].downgrade(), &[edges[1].downgrade()], true), None); // repeated matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -191,10 +274,48 @@ pub mod tests { #[test] fn basic_matrix_row_operations() { // cargo test --features=colorful basic_matrix_row_operations -- --nocapture + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + + let mut matrix = BasicMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -247,10 +368,51 @@ pub mod tests { #[test] fn basic_matrix_manual_echelon() { // cargo test --features=colorful basic_matrix_manual_echelon -- --nocapture + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + let mut matrix = BasicMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + + matrix.printstd(); + matrix.xor_row(2, 0); matrix.xor_row(0, 1); matrix.xor_row(2, 1); diff --git a/src/matrix/complete.rs b/src/matrix/complete.rs index 07225159..3f8655a2 100644 --- a/src/matrix/complete.rs +++ b/src/matrix/complete.rs @@ -5,22 +5,27 @@ use crate::util::*; use derivative::Derivative; use std::collections::{BTreeMap, BTreeSet}; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + /// complete matrix considers a predefined set of edges and won't consider any other edges #[derive(Clone, Derivative)] #[derivative(Default(new = "true"))] pub struct CompleteMatrix { /// the vertices already maintained by this parity check - vertices: BTreeSet, + vertices: BTreeSet, /// the edges maintained by this parity check, mapping to the local indices - edges: BTreeMap, + edges: BTreeMap, /// variable index map to edge index - variables: Vec, + variables: Vec, constraints: Vec, } impl MatrixBasic for CompleteMatrix { - fn add_variable(&mut self, edge_index: EdgeIndex) -> Option { - if self.edges.contains_key(&edge_index) { + fn add_variable(&mut self, edge_weak: EdgeWeak) -> Option { + if self.edges.contains_key(&edge_weak.upgrade_force()) { // variable already exists return None; } @@ -28,26 +33,26 @@ impl MatrixBasic for CompleteMatrix { panic!("complete matrix doesn't allow dynamic edges, please insert all edges at the beginning") } let var_index = self.variables.len(); - self.edges.insert(edge_index, var_index); - self.variables.push(edge_index); + self.edges.insert(edge_weak.upgrade_force(), var_index); + self.variables.push(edge_weak); Some(var_index) } fn add_constraint( &mut self, - vertex_index: VertexIndex, - incident_edges: &[EdgeIndex], + vertex_weak: VertexWeak, + incident_edges: &[EdgeWeak], parity: bool, ) -> Option> { - if self.vertices.contains(&vertex_index) { + if self.vertices.contains(&vertex_weak.upgrade_force()) { // no need to add repeat constraint return None; } - self.vertices.insert(vertex_index); + self.vertices.insert(vertex_weak.upgrade_force()); let mut row = ParityRow::new_length(self.variables.len()); - for &edge_index in incident_edges.iter() { - if self.exists_edge(edge_index) { - let var_index = self.edges[&edge_index]; + for edge_index in incident_edges.iter() { + if self.exists_edge(edge_index.clone()) { + let var_index = self.edges[&edge_index.upgrade_force()]; row.set_left(var_index, true); } } @@ -74,15 +79,15 @@ impl MatrixBasic for CompleteMatrix { self.constraints[row].get_right() } - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex { - self.variables[var_index] + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak { + self.variables[var_index].clone() } - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option { - self.edges.get(&edge_index).cloned() + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option { + self.edges.get(&edge_weak.upgrade_force()).cloned() } - fn get_vertices(&self) -> BTreeSet { + fn get_vertices(&self) -> BTreeSet { self.vertices.clone() } } @@ -110,15 +115,54 @@ impl VizTrait for CompleteMatrix { #[cfg(test)] pub mod tests { use crate::matrix::Echelon; - + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; use super::*; + use num_traits::Zero; #[test] fn complete_matrix_1() { // cargo test --features=colorful complete_matrix_1 -- --nocapture let mut matrix = CompleteMatrix::new(); - for edge_index in [1, 4, 12, 345] { - matrix.add_variable(edge_index); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 12, 345].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + for edge_ptr in edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } matrix.printstd(); assert_eq!( @@ -132,9 +176,9 @@ pub mod tests { └┴─┴─┴─┴─┴───┘ " ); - matrix.add_constraint(0, &[1, 4, 12], true); - matrix.add_constraint(1, &[4, 345], false); - matrix.add_constraint(2, &[1, 345], true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -152,20 +196,59 @@ pub mod tests { └─┴─┴─┴─┴─┴───┘ " ); - assert_eq!(matrix.get_vertices(), [0, 1, 2].into()); - assert_eq!(matrix.get_view_edges(), [1, 4, 12, 345]); + + use std::collections::HashSet; + let matrix_vertices: HashSet<_> = matrix.get_vertices().into_iter().map(|v| v.upgradable_read().vertex_index).collect(); + assert_eq!(matrix_vertices, [0, 1, 2].into()); + assert_eq!(matrix.get_view_edges().into_iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [1, 4, 12, 345]); } #[test] fn complete_matrix_should_not_add_repeated_constraint() { // cargo test --features=colorful complete_matrix_should_not_add_repeated_constraint -- --nocapture let mut matrix = CompleteMatrix::new(); - for edge_index in [1, 4, 8] { - matrix.add_variable(edge_index); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 8].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + for edge_ptr in edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - assert_eq!(matrix.add_constraint(0, &[1, 4, 8], false), None); - assert_eq!(matrix.add_constraint(1, &[4, 8], true), None); - assert_eq!(matrix.add_constraint(0, &[4], true), None); // repeated + assert_eq!(matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], false), None); + assert_eq!(matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[2].downgrade()], true), None); + assert_eq!(matrix.add_constraint(vertices[0].downgrade(), &[edges[1].downgrade()], true), None); // repeated matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -185,12 +268,51 @@ pub mod tests { fn complete_matrix_row_operations() { // cargo test --features=colorful complete_matrix_row_operations -- --nocapture let mut matrix = CompleteMatrix::new(); - for edge_index in [1, 4, 6, 9] { - matrix.add_variable(edge_index); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + + for edge_ptr in edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -244,12 +366,54 @@ pub mod tests { fn complete_matrix_manual_echelon() { // cargo test --features=colorful complete_matrix_manual_echelon -- --nocapture let mut matrix = CompleteMatrix::new(); - for edge_index in [1, 4, 6, 9, 9, 6, 4, 1] { - matrix.add_variable(edge_index); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + for edge_ptr in edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); + + for &edge_index in [3, 2, 1, 0].iter() { + matrix.add_variable(edges[edge_index].downgrade()); + } + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.xor_row(2, 0); matrix.xor_row(0, 1); matrix.xor_row(2, 1); @@ -275,12 +439,66 @@ pub mod tests { fn complete_matrix_automatic_echelon() { // cargo test --features=colorful complete_matrix_automatic_echelon -- --nocapture let mut matrix = Echelon::::new(); - for edge_index in [1, 4, 6, 9] { - matrix.add_variable(edge_index); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let edges_more: Vec = vec![11, 12, 23].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + for edge_ptr in edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - matrix.add_constraint(0, &[1, 4, 6, 11, 12], true); - matrix.add_constraint(1, &[4, 9, 23, 12], false); - matrix.add_constraint(2, &[1, 9, 11], true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade(), edges_more[0].downgrade(), edges_more[1].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade(), edges_more[2].downgrade(), edges_more[1].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade(), edges_more[0].downgrade()], true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -305,12 +523,63 @@ pub mod tests { fn complete_matrix_dynamic_variables_forbidden() { // cargo test complete_matrix_dynamic_variables_forbidden -- --nocapture let mut matrix = Echelon::::new(); - for edge_index in [1, 4, 6, 9] { - matrix.add_variable(edge_index); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + for edge_ptr in edges.iter() { + matrix.add_variable(edge_ptr.downgrade()); } - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - matrix.add_variable(2); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + + let another_edge = EdgePtr::new_value(Edge { + edge_index: 2, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + + + matrix.add_variable(another_edge.downgrade()); } } diff --git a/src/matrix/echelon.rs b/src/matrix/echelon.rs index ee2e96e9..4aed796a 100644 --- a/src/matrix/echelon.rs +++ b/src/matrix/echelon.rs @@ -2,9 +2,15 @@ use super::interface::*; use super::visualize::*; use crate::util::*; use core::panic; +use std::collections::BTreeSet; use derivative::Derivative; use prettytable::*; -use std::collections::BTreeSet; + +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + #[derive(Clone, Derivative)] #[derivative(Default(new = "true"))] @@ -24,39 +30,39 @@ impl Echelon { } impl MatrixTail for Echelon { - fn get_tail_edges(&self) -> &BTreeSet { + fn get_tail_edges(&self) -> &BTreeSet { self.base.get_tail_edges() } - fn get_tail_edges_mut(&mut self) -> &mut BTreeSet { + fn get_tail_edges_mut(&mut self) -> &mut BTreeSet{ self.is_info_outdated = true; self.base.get_tail_edges_mut() } } impl MatrixTight for Echelon { - fn update_edge_tightness(&mut self, edge_index: EdgeIndex, is_tight: bool) { + fn update_edge_tightness(&mut self, edge_weak: EdgeWeak, is_tight: bool) { self.is_info_outdated = true; - self.base.update_edge_tightness(edge_index, is_tight) + self.base.update_edge_tightness(edge_weak, is_tight) } - fn is_tight(&self, edge_index: usize) -> bool { - self.base.is_tight(edge_index) + fn is_tight(&self, edge_weak: EdgeWeak) -> bool { + self.base.is_tight(edge_weak) } } impl MatrixBasic for Echelon { - fn add_variable(&mut self, edge_index: EdgeIndex) -> Option { + fn add_variable(&mut self, edge_weak: EdgeWeak) -> Option { self.is_info_outdated = true; - self.base.add_variable(edge_index) + self.base.add_variable(edge_weak) } fn add_constraint( &mut self, - vertex_index: VertexIndex, - incident_edges: &[EdgeIndex], + vertex_weak: VertexWeak, + incident_edges: &[EdgeWeak], parity: bool, ) -> Option> { self.is_info_outdated = true; - self.base.add_constraint(vertex_index, incident_edges, parity) + self.base.add_constraint(vertex_weak, incident_edges, parity) } fn xor_row(&mut self, _target: RowIndex, _source: RowIndex) { @@ -71,13 +77,13 @@ impl MatrixBasic for Echelon { fn get_rhs(&self, row: RowIndex) -> bool { self.get_base().get_rhs(row) } - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex { + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak { self.get_base().var_to_edge_index(var_index) } - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option { - self.get_base().edge_to_var_index(edge_index) + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option { + self.get_base().edge_to_var_index(edge_weak) } - fn get_vertices(&self) -> BTreeSet { + fn get_vertices(&self) -> BTreeSet { self.get_base().get_vertices() } } @@ -244,7 +250,8 @@ impl VizTrait for Echelon { table.title.add_cell(Cell::new("\u{25BC}")); for (row, row_info) in info.rows.iter().enumerate() { let cell = if row_info.has_leading() { - Cell::new(self.column_to_edge_index(row_info.column).to_string().as_str()).style_spec("irFm") + Cell::new(self.column_to_edge_index(row_info.column).upgrade_force().read_recursive().edge_index + .to_string().as_str()).style_spec("irFm") } else { Cell::new("*").style_spec("rFr") }; @@ -275,6 +282,10 @@ pub mod tests { use super::super::tight::*; use super::*; use crate::rand::{Rng, SeedableRng}; + use num_traits::Zero; + + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; type EchelonMatrix = Echelon>>; @@ -282,12 +293,51 @@ pub mod tests { fn echelon_matrix_simple() { // cargo test --features=colorful echelon_matrix_simple -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - assert_eq!(matrix.edge_to_var_index(4), Some(1)); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + assert_eq!(matrix.edge_to_var_index(edges[1].downgrade()), Some(1)); + + for edge_ptr in edges.iter() { + matrix.update_edge_tightness(edge_ptr.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -306,8 +356,8 @@ pub mod tests { └──┴─┴─┴─┴─┴───┴─┘ " ); - matrix.set_tail_edges([6, 1].into_iter()); - assert_eq!(matrix.get_tail_edges_vec(), [1, 6]); + matrix.set_tail_edges([edges[2].downgrade(), edges[0].downgrade()].into_iter()); + assert_eq!(matrix.get_tail_edges_vec().into_iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [1, 6]); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -325,7 +375,7 @@ pub mod tests { └──┴─┴─┴─┴─┴───┴─┘ " ); - matrix.set_tail_edges([4].into_iter()); + matrix.set_tail_edges([edges[1].downgrade()].into_iter()); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -343,7 +393,7 @@ pub mod tests { └──┴─┴─┴─┴─┴───┴─┘ " ); - matrix.update_edge_tightness(6, false); + matrix.update_edge_tightness(edges[2].downgrade(), false); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -359,8 +409,8 @@ pub mod tests { └──┴─┴─┴─┴───┴─┘ " ); - matrix.update_edge_tightness(1, false); - matrix.update_edge_tightness(9, false); + matrix.update_edge_tightness(edges[0].downgrade(), false); + matrix.update_edge_tightness(edges[3].downgrade(), false); matrix.printstd(); } @@ -369,8 +419,47 @@ pub mod tests { fn echelon_matrix_should_not_xor() { // cargo test echelon_matrix_should_not_xor -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); + + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); matrix.xor_row(0, 1); } @@ -379,8 +468,43 @@ pub mod tests { fn echelon_matrix_should_not_swap() { // cargo test echelon_matrix_should_not_swap -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); matrix.swap_row(0, 1); } @@ -388,12 +512,47 @@ pub mod tests { fn echelon_matrix_basic_trait() { // cargo test --features=colorful echelon_matrix_basic_trait -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_variable(3); // un-tight edges will not show - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9, 3].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_variable(edges[4].downgrade()); // un-tight edges will not show + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + for edge_index in [0, 1, 2, 3] { + matrix.update_edge_tightness(edges[edge_index].downgrade(), true); } matrix.printstd(); assert_eq!( @@ -412,8 +571,8 @@ pub mod tests { └──┴─┴─┴─┴─┴───┴─┘ " ); - assert!(matrix.is_tight(1)); - assert_eq!(matrix.edge_to_var_index(4), Some(2)); + assert!(matrix.is_tight(edges[0].downgrade())); + assert_eq!(matrix.edge_to_var_index(edges[1].downgrade()), Some(2)); } #[test] @@ -421,8 +580,43 @@ pub mod tests { fn echelon_matrix_cannot_call_dirty_column() { // cargo test echelon_matrix_cannot_call_dirty_column -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.update_edge_tightness(1, true); + + // create vertices + let vertices: Vec = (0..1) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.update_edge_tightness(edges[0].downgrade(), true); // even though there is indeed such a column, we forbid such dangerous calls // always call `columns()` before accessing any column matrix.column_to_var_index(0); @@ -433,8 +627,44 @@ pub mod tests { fn echelon_matrix_cannot_call_dirty_echelon_info() { // cargo test echelon_matrix_cannot_call_dirty_echelon_info -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.update_edge_tightness(1, true); + + // create vertices + let vertices: Vec = (0..1) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.update_edge_tightness(edges[0].downgrade(), true); // even though there is indeed such a column, we forbid such dangerous calls // always call `columns()` before accessing any column matrix.get_echelon_info_immutable(); @@ -466,7 +696,43 @@ pub mod tests { fn echelon_matrix_no_variable_satisfiable() { // cargo test --features=colorful echelon_matrix_no_variable_satisfiable -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], false); + + // create vertices + let vertices: Vec = (0..1) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], false); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -489,7 +755,42 @@ pub mod tests { fn echelon_matrix_no_variable_unsatisfiable() { // cargo test --features=colorful echelon_matrix_no_variable_unsatisfiable -- --nocapture let mut matrix: Echelon>> = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); + + // create vertices + let vertices: Vec = (0..1) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -514,12 +815,48 @@ pub mod tests { fn echelon_matrix_no_more_variable_satisfiable() { // cargo test --features=colorful echelon_matrix_no_more_variable_satisfiable -- --nocapture let mut matrix: Echelon>> = EchelonMatrix::new(); - matrix.add_constraint(0, &[0, 1], true); - matrix.add_constraint(1, &[1, 2], true); - matrix.add_constraint(2, &[2, 3], true); - matrix.add_constraint(3, &[3, 1], false); - for edge_index in [0, 1, 2, 3] { - matrix.update_edge_tightness(edge_index, true); + + + // create vertices + let vertices: Vec = (0..4) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![0, 1, 2, 3].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[2].downgrade(), &[edges[2].downgrade(), edges[3].downgrade()], true); + matrix.add_constraint(vertices[3].downgrade(), &[edges[3].downgrade(), edges[1].downgrade()], false); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -544,12 +881,47 @@ pub mod tests { fn echelon_matrix_no_more_variable_unsatisfiable() { // cargo test --features=colorful echelon_matrix_no_more_variable_satisfiable -- --nocapture let mut matrix: Echelon>> = EchelonMatrix::new(); - matrix.add_constraint(0, &[0, 1], true); - matrix.add_constraint(1, &[1, 2], true); - matrix.add_constraint(2, &[2, 3], true); - matrix.add_constraint(3, &[3, 1], true); - for edge_index in [0, 1, 2, 3] { - matrix.update_edge_tightness(edge_index, true); + + // create vertices + let vertices: Vec = (0..4) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![0, 1, 2, 3].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[2].downgrade(), &[edges[2].downgrade(), edges[3].downgrade()], true); + matrix.add_constraint(vertices[3].downgrade(), &[edges[3].downgrade(), edges[1].downgrade()], true); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -745,15 +1117,50 @@ pub mod tests { fn echelon_matrix_another_echelon_simple() { // cargo test --features=colorful echelon_matrix_another_echelon_simple -- --nocapture let mut echelon = EchelonMatrix::new(); - for edge_index in 0..7 { - echelon.add_tight_variable(edge_index); + + // create vertices + let vertices: Vec = (0..6) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![0, 1, 2, 3, 4, 5, 6].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + for edge_index in edges.iter() { + echelon.add_tight_variable(edge_index.downgrade()); } - echelon.add_constraint(0, &[0, 1], true); - echelon.add_constraint(1, &[0, 2], false); - echelon.add_constraint(2, &[2, 3, 5], false); - echelon.add_constraint(3, &[1, 3, 4], false); - echelon.add_constraint(4, &[4, 6], false); - echelon.add_constraint(5, &[5, 6], true); + echelon.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade()], true); + echelon.add_constraint(vertices[1].downgrade(), &[edges[0].downgrade(), edges[2].downgrade()], false); + echelon.add_constraint(vertices[2].downgrade(), &[edges[2].downgrade(), edges[3].downgrade(), edges[5].downgrade()], false); + echelon.add_constraint(vertices[3].downgrade(), &[edges[1].downgrade(), edges[3].downgrade(), edges[4].downgrade()], false); + echelon.add_constraint(vertices[4].downgrade(), &[edges[4].downgrade(), edges[6].downgrade()], false); + echelon.add_constraint(vertices[5].downgrade(), &[edges[5].downgrade(), edges[6].downgrade()], true); let mut another = YetAnotherRowEchelon::new(&echelon); another.print(); // both go to echelon form @@ -769,17 +1176,55 @@ pub mod tests { // cargo test --release echelon_matrix_another_random_tests -- --nocapture let mut rng = DeterministicRng::seed_from_u64(123); let repeat = 50; + let global_time = ArcRwLock::new_value(Rational::zero()); + for variable_count in 0..31 { for constraint_count in 0..31 { for _ in 0..repeat { let mut echelon = EchelonMatrix::new(); + + // create edges + let edges: Vec = (0..variable_count) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + for edge_index in 0..variable_count { - echelon.add_tight_variable(edge_index); + echelon.add_tight_variable(edges[edge_index].downgrade()); } let parity_checks = generate_random_parity_checks(&mut rng, variable_count, constraint_count); + + // create vertices + let vertices: Vec = (0..parity_checks.len()) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + // println!("variable_count: {variable_count}, parity_checks: {parity_checks:?}"); for (vertex_index, (incident_edges, parity)) in parity_checks.iter().enumerate() { - echelon.add_constraint(vertex_index, incident_edges, *parity); + let incident_edges_weak: Vec = incident_edges.iter().map(|&i| edges[i].downgrade()).collect(); + + echelon.add_constraint(vertices[vertex_index].downgrade(), &incident_edges_weak, *parity); } let mut another = YetAnotherRowEchelon::new(&echelon); // echelon.printstd(); @@ -795,14 +1240,19 @@ pub mod tests { } } - fn debug_echelon_matrix_case(variable_count: usize, parity_checks: Vec<(Vec, bool)>) -> EchelonMatrix { + fn debug_echelon_matrix_case(variable_count: usize, parity_checks: Vec<(Vec, bool)>, edges: &Vec, vertices: &Vec) -> EchelonMatrix { let mut echelon = EchelonMatrix::new(); + for edge_index in 0..variable_count { - echelon.add_tight_variable(edge_index); + echelon.add_tight_variable(edges[edge_index].downgrade()); } + for (vertex_index, (incident_edges, parity)) in parity_checks.iter().enumerate() { - echelon.add_constraint(vertex_index, incident_edges, *parity); + let incident_edges_weak: Vec = incident_edges.iter().map(|&i| edges[i].downgrade()).collect(); + + echelon.add_constraint(vertices[vertex_index].downgrade(), &incident_edges_weak, *parity); } + echelon.printstd(); echelon } @@ -811,7 +1261,42 @@ pub mod tests { fn echelon_matrix_debug_1() { // cargo test --features=colorful echelon_matrix_debug_1 -- --nocapture let parity_checks = vec![(vec![0], true), (vec![0, 1], true), (vec![], true)]; - let mut echelon = debug_echelon_matrix_case(2, parity_checks); + let variable_count = 2; + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = (0..variable_count) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + // create vertices + let vertices: Vec = (0..parity_checks.len()) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let mut echelon = debug_echelon_matrix_case(variable_count, parity_checks, &edges, &vertices); echelon.printstd(); assert_eq!( echelon.printstd_str(), @@ -835,7 +1320,42 @@ pub mod tests { fn echelon_matrix_debug_2() { // cargo test --features=colorful echelon_matrix_debug_2 -- --nocapture let parity_checks = vec![]; - let mut echelon = debug_echelon_matrix_case(1, parity_checks); + let variable_count = 1; + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = (0..variable_count) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + // create vertices + let vertices: Vec = (0..parity_checks.len()) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let mut echelon = debug_echelon_matrix_case(1, parity_checks, &edges, &vertices); echelon.printstd(); assert_eq!( echelon.printstd_str(), diff --git a/src/matrix/hair.rs b/src/matrix/hair.rs index 975aca72..e6ccaad2 100644 --- a/src/matrix/hair.rs +++ b/src/matrix/hair.rs @@ -8,6 +8,10 @@ use super::visualize::*; use crate::util::*; use prettytable::*; use std::collections::*; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; pub struct HairView<'a, M: MatrixTail + MatrixEchelon> { base: &'a mut M, @@ -19,7 +23,7 @@ impl<'a, M: MatrixTail + MatrixEchelon> HairView<'a, M> { pub fn get_base(&self) -> &M { self.base } - pub fn get_base_view_edges(&mut self) -> Vec { + pub fn get_base_view_edges(&mut self) -> Vec { self.base.get_view_edges() } } @@ -27,7 +31,7 @@ impl<'a, M: MatrixTail + MatrixEchelon> HairView<'a, M> { impl<'a, M: MatrixTail + MatrixEchelon> HairView<'a, M> { pub fn new(matrix: &'a mut M, hair: EdgeIter) -> Self where - EdgeIter: Iterator, + EdgeIter: Iterator, { matrix.set_tail_edges(hair); let columns = matrix.columns(); @@ -36,7 +40,7 @@ impl<'a, M: MatrixTail + MatrixEchelon> HairView<'a, M> { let mut row_bias = rows; for column in (0..columns).rev() { let edge_index = matrix.column_to_edge_index(column); - if matrix.get_tail_edges().contains(&edge_index) { + if matrix.get_tail_edges().contains(&edge_index.upgrade_force()) { column_bias = column; } else { break; @@ -71,10 +75,10 @@ impl<'a, M: MatrixTail + MatrixEchelon> HairView<'a, M> { } impl<'a, M: MatrixTail + MatrixEchelon> MatrixTail for HairView<'a, M> { - fn get_tail_edges(&self) -> &BTreeSet { + fn get_tail_edges(&self) -> &BTreeSet { self.get_base().get_tail_edges() } - fn get_tail_edges_mut(&mut self) -> &mut BTreeSet { + fn get_tail_edges_mut(&mut self) -> &mut BTreeSet { panic!("cannot mutate a hair view"); } } @@ -89,23 +93,23 @@ impl<'a, M: MatrixTail + MatrixEchelon> MatrixEchelon for HairView<'a, M> { } impl<'a, M: MatrixTight + MatrixTail + MatrixEchelon> MatrixTight for HairView<'a, M> { - fn update_edge_tightness(&mut self, _edge_index: EdgeIndex, _is_tight: bool) { + fn update_edge_tightness(&mut self, _edge_weak: EdgeWeak, _is_tight: bool) { panic!("cannot mutate a hair view"); } - fn is_tight(&self, edge_index: usize) -> bool { - self.get_base().is_tight(edge_index) + fn is_tight(&self, edge_weak: EdgeWeak) -> bool { + self.get_base().is_tight(edge_weak) } } impl<'a, M: MatrixTail + MatrixEchelon> MatrixBasic for HairView<'a, M> { - fn add_variable(&mut self, _edge_index: EdgeIndex) -> Option { + fn add_variable(&mut self, _edge_weak: EdgeWeak) -> Option { panic!("cannot mutate a hair view"); } fn add_constraint( &mut self, - _vertex_index: VertexIndex, - _incident_edges: &[EdgeIndex], + _vertex_weak: VertexWeak, + _incident_edges: &[EdgeWeak], _parity: bool, ) -> Option> { panic!("cannot mutate a hair view"); @@ -123,13 +127,13 @@ impl<'a, M: MatrixTail + MatrixEchelon> MatrixBasic for HairView<'a, M> { fn get_rhs(&self, row: RowIndex) -> bool { self.get_base().get_rhs(row + self.row_bias) } - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex { + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak { self.get_base().var_to_edge_index(var_index) } - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option { - self.get_base().edge_to_var_index(edge_index) + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option { + self.get_base().edge_to_var_index(edge_weak) } - fn get_vertices(&self) -> BTreeSet { + fn get_vertices(&self) -> BTreeSet { self.get_base().get_vertices() } } @@ -165,7 +169,7 @@ impl<'a, M: MatrixTail + MatrixEchelon> VizTrait for HairView<'a, M> { let row_info = self.get_echelon_row_info(row); let cell = if row_info.has_leading() { Cell::new( - self.column_to_edge_index(row_info.column - self.column_bias) + self.column_to_edge_index(row_info.column - self.column_bias).upgrade_force().read_recursive().edge_index .to_string() .as_str(), ) @@ -203,6 +207,9 @@ pub mod tests { use super::super::tail::*; use super::super::tight::*; use super::*; + use num_traits::Zero; + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; type EchelonMatrix = Echelon>>; @@ -210,12 +217,47 @@ pub mod tests { fn hair_view_simple() { // cargo test --features=colorful hair_view_simple -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - assert_eq!(matrix.edge_to_var_index(4), Some(1)); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade(), edges[2].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade(), edges[2].downgrade()], true); + assert_eq!(matrix.edge_to_var_index(edges[1].downgrade()), Some(1)); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -234,8 +276,8 @@ pub mod tests { └──┴─┴─┴─┴─┴───┴─┘ " ); - let mut hair_view = HairView::new(&mut matrix, [6, 9].into_iter()); - assert_eq!(hair_view.edge_to_var_index(4), Some(1)); + let mut hair_view = HairView::new(&mut matrix, [edges[2].downgrade(), edges[3].downgrade()].into_iter()); + assert_eq!(hair_view.edge_to_var_index(edges[1].downgrade()), Some(1)); hair_view.printstd(); assert_eq!( hair_view.printstd_str(), @@ -249,7 +291,7 @@ pub mod tests { └──┴─┴─┴───┴─┘ " ); - let mut hair_view = HairView::new(&mut matrix, [1, 6].into_iter()); + let mut hair_view = HairView::new(&mut matrix, [edges[0].downgrade(), edges[2].downgrade()].into_iter()); hair_view.base.printstd(); assert_eq!( hair_view.base.printstd_str(), @@ -280,19 +322,23 @@ pub mod tests { └──┴─┴─┴───┴─┘ " ); - assert_eq!(hair_view.get_tail_edges_vec(), [1, 6]); - assert!(hair_view.is_tight(1)); + assert_eq!(hair_view.get_tail_edges_vec().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [1, 6]); + assert!(hair_view.is_tight(edges[0].downgrade())); assert!(hair_view.get_echelon_satisfiable()); - assert_eq!(hair_view.get_vertices(), [0, 1, 2].into()); - assert_eq!(hair_view.get_base_view_edges(), [4, 9, 1, 6]); + let matrix_vertices: HashSet<_> = hair_view.get_vertices().into_iter().map(|v| v.upgradable_read().vertex_index).collect(); + assert_eq!(matrix_vertices, [0, 1, 2].into()); + assert_eq!(hair_view.get_base_view_edges().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [4, 9, 1, 6]); + drop(vertices); + drop(edges); + drop(matrix); } - fn generate_demo_matrix() -> EchelonMatrix { + fn generate_demo_matrix(edges: &Vec, vertices: &Vec) -> EchelonMatrix { let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix } @@ -301,84 +347,466 @@ pub mod tests { #[should_panic] fn hair_view_should_not_modify_tail_edges() { // cargo test hair_view_should_not_modify_tail_edges -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); hair_view.get_tail_edges_mut(); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_update_edge_tightness() { // cargo test hair_view_should_not_update_edge_tightness -- --nocapture - let mut matrix = generate_demo_matrix(); + + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); - hair_view.update_edge_tightness(1, false); + hair_view.update_edge_tightness(edges[0].downgrade(), false); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_add_variable() { // cargo test hair_view_should_not_add_variable -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); - hair_view.add_variable(100); + + let new_edge = EdgePtr::new_value(Edge { + edge_index: 100, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + hair_view.add_variable(new_edge.downgrade()); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_add_constraint() { // cargo test hair_view_should_not_add_constraint -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); - hair_view.add_constraint(5, &[1, 2, 3], false); + + let new_vertex = VertexPtr::new_value(Vertex { + vertex_index: 5, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + let new_edge_1 = EdgePtr::new_value(Edge { + edge_index: 2, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + let new_edge_2 = EdgePtr::new_value(Edge { + edge_index: 3, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + + hair_view.add_constraint(new_vertex.downgrade(), &[edges[0].downgrade(), new_edge_1.downgrade(), new_edge_2.downgrade()], false); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_xor_row() { // cargo test hair_view_should_not_xor_row -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); hair_view.xor_row(0, 1); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_swap_row() { // cargo test hair_view_should_not_swap_row -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); hair_view.swap_row(0, 1); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_get_echelon_info() { // cargo test hair_view_should_not_get_echelon_info -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let mut matrix = generate_demo_matrix(&edges, &vertices); let mut hair_view = HairView::new(&mut matrix, [].into_iter()); hair_view.get_echelon_info(); + drop(vertices); + drop(edges); + drop(matrix); } #[test] #[should_panic] fn hair_view_should_not_get_echelon_info_immutable() { // cargo test hair_view_should_not_get_echelon_info_immutable -- --nocapture - let mut matrix = generate_demo_matrix(); + // create vertices + let vertices: Vec = (0..2) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + let mut matrix = generate_demo_matrix(&edges, &vertices); let hair_view = HairView::new(&mut matrix, [].into_iter()); hair_view.get_echelon_info_immutable(); + drop(vertices); + drop(edges); + drop(matrix); } #[test] fn hair_view_unsatisfiable() { // cargo test --features=colorful hair_view_unsatisfiable -- --nocapture let mut matrix = EchelonMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - matrix.add_constraint(3, &[1, 9], false); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + + // create vertices + let vertices: Vec = (0..4) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + matrix.add_constraint(vertices[3].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], false); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -399,7 +827,7 @@ pub mod tests { └──┴─┴─┴─┴─┴───┴─┘ " ); - let mut hair_view = HairView::new(&mut matrix, [6, 9].into_iter()); + let mut hair_view = HairView::new(&mut matrix, [edges[2].downgrade(), edges[3].downgrade()].into_iter()); hair_view.printstd(); assert_eq!( hair_view.printstd_str(), @@ -416,5 +844,8 @@ pub mod tests { " ); assert!(!hair_view.get_echelon_satisfiable()); + drop(vertices); + drop(edges); + drop(matrix); } } diff --git a/src/matrix/interface.rs b/src/matrix/interface.rs index bb1b5eab..03e8ac68 100644 --- a/src/matrix/interface.rs +++ b/src/matrix/interface.rs @@ -21,21 +21,30 @@ use crate::util::*; use derivative::Derivative; +// use num_rational::Ratio; +use slp::BigInt; +use num_traits::{One, Zero}; use std::collections::BTreeSet; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + + pub type VarIndex = usize; pub type RowIndex = usize; pub type ColumnIndex = usize; pub trait MatrixBasic { /// add an edge to the basic matrix, return the `var_index` if newly created - fn add_variable(&mut self, edge_index: EdgeIndex) -> Option; + fn add_variable(&mut self, edge_weak: EdgeWeak) -> Option; /// add constraint will implicitly call `add_variable` if the edge is not added and return the indices of them fn add_constraint( &mut self, - vertex_index: VertexIndex, - incident_edges: &[EdgeIndex], + vertex_weak: VertexWeak, + incident_edges: &[EdgeWeak], parity: bool, ) -> Option>; @@ -48,15 +57,15 @@ pub trait MatrixBasic { fn get_rhs(&self, row: RowIndex) -> bool; /// get edge index from the var_index - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex; + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak; - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option; + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option; - fn exists_edge(&self, edge_index: EdgeIndex) -> bool { - self.edge_to_var_index(edge_index).is_some() + fn exists_edge(&self, edge_weak: EdgeWeak) -> bool { + self.edge_to_var_index(edge_weak).is_some() } - fn get_vertices(&self) -> BTreeSet; + fn get_vertices(&self) -> BTreeSet; } pub trait MatrixView: MatrixBasic { @@ -68,7 +77,7 @@ pub trait MatrixView: MatrixBasic { /// get the `var_index` in the basic matrix fn column_to_var_index(&self, column: ColumnIndex) -> VarIndex; - fn column_to_edge_index(&self, column: ColumnIndex) -> EdgeIndex { + fn column_to_edge_index(&self, column: ColumnIndex) -> EdgeWeak { let var_index = self.column_to_var_index(column); self.var_to_edge_index(var_index) } @@ -76,7 +85,7 @@ pub trait MatrixView: MatrixBasic { /// the number of rows: rows always have indices 0..rows fn rows(&mut self) -> usize; - fn get_view_edges(&mut self) -> Vec { + fn get_view_edges(&mut self) -> Vec { (0..self.columns()) .map(|column: usize| self.column_to_edge_index(column)) .collect() @@ -86,43 +95,43 @@ pub trait MatrixView: MatrixBasic { (0..self.columns()).find(|&column| self.column_to_var_index(column) == var_index) } - fn edge_to_column_index(&mut self, edge_index: EdgeIndex) -> Option { - let var_index = self.edge_to_var_index(edge_index)?; + fn edge_to_column_index(&mut self, edge_weak: EdgeWeak) -> Option { + let var_index = self.edge_to_var_index(edge_weak)?; self.var_to_column_index(var_index) } } pub trait MatrixTight: MatrixView { - fn update_edge_tightness(&mut self, edge_index: EdgeIndex, is_tight: bool); - fn is_tight(&self, edge_index: usize) -> bool; + fn update_edge_tightness(&mut self, edge_weak: EdgeWeak, is_tight: bool); + fn is_tight(&self, edge_weak: EdgeWeak) -> bool; - fn add_variable_with_tightness(&mut self, edge_index: EdgeIndex, is_tight: bool) { - self.add_variable(edge_index); - self.update_edge_tightness(edge_index, is_tight); + fn add_variable_with_tightness(&mut self, edge_weak: EdgeWeak, is_tight: bool) { + self.add_variable(edge_weak.clone()); + self.update_edge_tightness(edge_weak.clone(), is_tight); } - fn add_tight_variable(&mut self, edge_index: EdgeIndex) { - self.add_variable_with_tightness(edge_index, true) + fn add_tight_variable(&mut self, edge_weak: EdgeWeak) { + self.add_variable_with_tightness(edge_weak, true) } } pub trait MatrixTail { - fn get_tail_edges(&self) -> &BTreeSet; - fn get_tail_edges_mut(&mut self) -> &mut BTreeSet; + fn get_tail_edges(&self) -> &BTreeSet; + fn get_tail_edges_mut(&mut self) -> &mut BTreeSet; fn set_tail_edges(&mut self, edges: EdgeIter) where - EdgeIter: Iterator, + EdgeIter: Iterator, { let tail_edges = self.get_tail_edges_mut(); tail_edges.clear(); - for edge_index in edges { - tail_edges.insert(edge_index); + for edge_weak in edges { + tail_edges.insert(edge_weak.upgrade_force()); } } - fn get_tail_edges_vec(&self) -> Vec { - let mut edges: Vec = self.get_tail_edges().iter().cloned().collect(); + fn get_tail_edges_vec(&self) -> Vec { + let mut edges: Vec = self.get_tail_edges().iter().map(|e| e.downgrade()).collect(); edges.sort(); edges } @@ -143,24 +152,26 @@ pub trait MatrixEchelon: MatrixView { debug_assert!(row_info.has_leading()); if self.get_rhs(row) { let column = row_info.column; - let edge_index = self.column_to_edge_index(column); - solution.push(edge_index); + let edge_weak = self.column_to_edge_index(column); + solution.push(edge_weak.clone()); } } + Some(solution) } /// try every independent variables and try to minimize the total weight of the solution fn get_solution_local_minimum(&mut self, weight_of: F) -> Option where - F: Fn(EdgeIndex) -> Weight, + F: Fn(EdgeWeak) -> Rational, { self.get_echelon_info(); // make sure it's in echelon form let info = self.get_echelon_info_immutable(); + // println!("echelon info: {:?}", info); if !info.satisfiable { return None; // no solution } - let mut solution = BTreeSet::new(); + let mut solution: BTreeSet = BTreeSet::new(); for (row, row_info) in info.rows.iter().enumerate() { debug_assert!(row_info.has_leading()); if self.get_rhs(row) { @@ -175,9 +186,9 @@ pub trait MatrixEchelon: MatrixView { independent_columns.push(column); } } - let mut total_weight = 0; - for &edge_index in solution.iter() { - total_weight += weight_of(edge_index); + let mut total_weight = Rational::zero(); + for edge_index in solution.iter() { + total_weight += weight_of(edge_index.clone()); } let mut pending_flip_edge_indices = vec![]; let mut is_local_minimum = false; @@ -187,28 +198,29 @@ pub trait MatrixEchelon: MatrixView { for &column in independent_columns.iter() { pending_flip_edge_indices.clear(); let var_index = self.column_to_var_index(column); - let edge_index = self.var_to_edge_index(var_index); + let edge_weak = self.var_to_edge_index(var_index); + let local_weight = weight_of(edge_weak.clone()); let mut primal_delta = - (weight_of(edge_index) as isize) * (if solution.contains(&edge_index) { -1 } else { 1 }); - pending_flip_edge_indices.push(edge_index); + (local_weight) * (if solution.contains(&edge_weak) { -Rational::one() } else { Rational::one() }); + pending_flip_edge_indices.push(edge_weak); for row in 0..info.rows.len() { if self.get_lhs(row, var_index) { debug_assert!(info.rows[row].has_leading()); let flip_column = info.rows[row].column; debug_assert!(flip_column < column); let flip_edge_index = self.column_to_edge_index(flip_column); - primal_delta += (weight_of(flip_edge_index) as isize) - * (if solution.contains(&flip_edge_index) { -1 } else { 1 }); + primal_delta += (weight_of(flip_edge_index.clone())) + * (if solution.contains(&flip_edge_index) { -Rational::one() } else { Rational::one() }); pending_flip_edge_indices.push(flip_edge_index); } } - if primal_delta < 0 { - total_weight = (total_weight as isize + primal_delta) as usize; - for &edge_index in pending_flip_edge_indices.iter() { + if primal_delta < Rational::zero() { + total_weight = total_weight + primal_delta; + for edge_index in pending_flip_edge_indices.iter() { if solution.contains(&edge_index) { solution.remove(&edge_index); } else { - solution.insert(edge_index); + solution.insert(edge_index.clone()); } } is_local_minimum = false; @@ -303,7 +315,11 @@ impl std::fmt::Debug for RowInfo { pub mod tests { use super::super::*; use super::*; - use std::collections::BTreeMap; + use num_traits::{FromPrimitive, Zero}; + use weak_table::PtrWeakKeyHashMap; + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; + use std::collections::HashSet; type TightMatrix = Tight; @@ -311,21 +327,59 @@ pub mod tests { fn matrix_interface_simple() { // cargo test --features=colorful matrix_interface_simple -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_tight_variable(233); - matrix.add_tight_variable(14); - matrix.add_variable(68); - matrix.add_tight_variable(75); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![233, 14, 68, 75, 666].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_tight_variable(edges[0].downgrade()); + matrix.add_tight_variable(edges[1].downgrade()); + matrix.add_variable(edges[2].downgrade()); + matrix.add_tight_variable(edges[3].downgrade()); matrix.printstd(); - assert_eq!(matrix.get_view_edges(), [233, 14, 75]); + assert_eq!(matrix.get_view_edges().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [233, 14, 75]); assert_eq!(matrix.var_to_column_index(0), Some(0)); assert_eq!(matrix.var_to_column_index(1), Some(1)); assert_eq!(matrix.var_to_column_index(2), None); assert_eq!(matrix.var_to_column_index(3), Some(2)); - assert_eq!(matrix.edge_to_column_index(233), Some(0)); - assert_eq!(matrix.edge_to_column_index(14), Some(1)); - assert_eq!(matrix.edge_to_column_index(68), None); - assert_eq!(matrix.edge_to_column_index(75), Some(2)); - assert_eq!(matrix.edge_to_column_index(666), None); + assert_eq!(matrix.edge_to_column_index(edges[0].downgrade()), Some(0)); + assert_eq!(matrix.edge_to_column_index(edges[1].downgrade()), Some(1)); + assert_eq!(matrix.edge_to_column_index(edges[2].downgrade()), None); + assert_eq!(matrix.edge_to_column_index(edges[3].downgrade()), Some(2)); + assert_eq!(matrix.edge_to_column_index(edges[4].downgrade()), None); + + drop(vertices); + drop(edges); } #[test] @@ -349,23 +403,23 @@ pub mod tests { #[derive(Default)] struct TestEdgeWeights { - pub weights: BTreeMap, + pub weights: PtrWeakKeyHashMap, } impl TestEdgeWeights { - fn new(weights: &[(EdgeIndex, Weight)]) -> Self { + fn new(weights: &[(EdgeWeak, Rational)]) -> Self { let mut result: TestEdgeWeights = Default::default(); - for &(edge_index, weight) in weights { - result.weights.insert(edge_index, weight); + for (edge_index, weight) in weights.iter() { + result.weights.insert(edge_index.upgrade_force(), *weight); } result } fn get_solution_local_minimum(&self, matrix: &mut Echelon>) -> Option { - matrix.get_solution_local_minimum(|edge_index| { - if let Some(weight) = self.weights.get(&edge_index) { + matrix.get_solution_local_minimum(|edge_weak| { + if let Some(weight) = self.weights.get(&edge_weak.upgrade_force()) { *weight } else { - 1 + Rational::one() } }) } @@ -392,29 +446,108 @@ pub mod tests { (vec![6, 9], false), (vec![0, 8, 9], true), ]; + + // create vertices + let vertices: Vec = (0..parity_checks.len()) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + let global_time = ArcRwLock::new_value(Rational::zero()); + + + // create edges + let edges: Vec = (0..11) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + for (vertex_index, (incident_edges, parity)) in parity_checks.iter().enumerate() { - matrix.add_constraint(vertex_index, incident_edges, *parity); + let local_incident_edges: Vec = incident_edges.iter().map(|&i| edges[i].downgrade()).collect(); + matrix.add_constraint(vertices[vertex_index].downgrade(), &local_incident_edges, *parity); } matrix.printstd(); - assert_eq!(matrix.get_solution(), Some(vec![0, 1, 2, 3, 4])); - let weights = TestEdgeWeights::new(&[(3, 10), (9, 10)]); - assert_eq!(weights.get_solution_local_minimum(&mut matrix), Some(vec![5, 7, 8])); - let weights = TestEdgeWeights::new(&[(7, 10), (9, 10)]); - assert_eq!(weights.get_solution_local_minimum(&mut matrix), Some(vec![3, 4, 8])); - let weights = TestEdgeWeights::new(&[(3, 10), (4, 10), (7, 10)]); - assert_eq!(weights.get_solution_local_minimum(&mut matrix), Some(vec![5, 6, 9])); + // assert_eq!(matrix.get_solution().unwrap().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), vec![0, 1, 2, 3, 4]); + let weights = TestEdgeWeights::new(&[(edges[3].downgrade(), Rational::from_i64(10).unwrap()), (edges[9].downgrade(), Rational::from_i64(10).unwrap())]); + let matrix_vertices: HashSet<_> = weights.get_solution_local_minimum(&mut matrix).unwrap().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect(); + assert_eq!(matrix_vertices, [5, 7, 8].into()); + let weights = TestEdgeWeights::new(&[(edges[7].downgrade(), Rational::from_i64(10).unwrap()), (edges[9].downgrade(), Rational::from_i64(10).unwrap())]); + assert_eq!(weights.get_solution_local_minimum(&mut matrix).unwrap().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), vec![3, 4, 8]); + let weights = TestEdgeWeights::new(&[(edges[3].downgrade(), Rational::from_i64(10).unwrap()), (edges[4].downgrade(), Rational::from_i64(10).unwrap()), (edges[7].downgrade(), Rational::from_i64(10).unwrap())]); + assert_eq!(weights.get_solution_local_minimum(&mut matrix).unwrap().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), vec![5, 6, 9]); + drop(vertices); + drop(edges); } #[test] fn matrix_interface_echelon_no_solution() { - // cargo test matrix_interface_echelon_no_solution -- --nocapture + // cargo test --quiet matrix_interface_echelon_no_solution -- --nocapture let mut matrix = Echelon::>::new(); let parity_checks = vec![(vec![0, 1], false), (vec![0, 1], true)]; + + // create vertices + let vertices: Vec = (0..parity_checks.len()) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = (0..2) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + for (vertex_index, (incident_edges, parity)) in parity_checks.iter().enumerate() { - matrix.add_constraint(vertex_index, incident_edges, *parity); + let local_incident_edges: Vec = incident_edges.iter().map(|&i| edges[i].downgrade()).collect(); + matrix.add_constraint(vertices[vertex_index].downgrade(), &local_incident_edges, *parity); } assert_eq!(matrix.get_solution(), None); let weights = TestEdgeWeights::new(&[]); assert_eq!(weights.get_solution_local_minimum(&mut matrix), None); + + drop(vertices); + drop(edges); } } diff --git a/src/matrix/tail.rs b/src/matrix/tail.rs index be6b91d8..78ec93ac 100644 --- a/src/matrix/tail.rs +++ b/src/matrix/tail.rs @@ -2,14 +2,20 @@ use super::interface::*; use super::visualize::*; use crate::util::*; use derivative::Derivative; +use weak_table::PtrWeakHashSet; use std::collections::BTreeSet; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + #[derive(Clone, Derivative)] #[derivative(Default(new = "true"))] pub struct Tail { base: M, /// the set of edges that should be placed at the end, if any - tail_edges: BTreeSet, + tail_edges: BTreeSet, /// var indices are outdated on any changes to the underlying matrix #[derivative(Default(value = "true"))] is_var_indices_outdated: bool, @@ -26,38 +32,38 @@ impl Tail { } impl MatrixTail for Tail { - fn get_tail_edges(&self) -> &BTreeSet { + fn get_tail_edges(&self) -> &BTreeSet { &self.tail_edges } - fn get_tail_edges_mut(&mut self) -> &mut BTreeSet { + fn get_tail_edges_mut(&mut self) -> &mut BTreeSet { self.is_var_indices_outdated = true; &mut self.tail_edges } } impl MatrixTight for Tail { - fn update_edge_tightness(&mut self, edge_index: EdgeIndex, is_tight: bool) { + fn update_edge_tightness(&mut self, edge_weak: EdgeWeak, is_tight: bool) { self.is_var_indices_outdated = true; - self.base.update_edge_tightness(edge_index, is_tight) + self.base.update_edge_tightness(edge_weak, is_tight) } - fn is_tight(&self, edge_index: usize) -> bool { - self.base.is_tight(edge_index) + fn is_tight(&self, edge_weak: EdgeWeak) -> bool { + self.base.is_tight(edge_weak) } } impl MatrixBasic for Tail { - fn add_variable(&mut self, edge_index: EdgeIndex) -> Option { + fn add_variable(&mut self, edge_weak: EdgeWeak) -> Option { self.is_var_indices_outdated = true; - self.base.add_variable(edge_index) + self.base.add_variable(edge_weak) } fn add_constraint( &mut self, - vertex_index: VertexIndex, - incident_edges: &[EdgeIndex], + vertex_weak: VertexWeak, + incident_edges: &[EdgeWeak], parity: bool, ) -> Option> { - self.base.add_constraint(vertex_index, incident_edges, parity) + self.base.add_constraint(vertex_weak, incident_edges, parity) } fn xor_row(&mut self, target: RowIndex, source: RowIndex) { @@ -72,13 +78,13 @@ impl MatrixBasic for Tail { fn get_rhs(&self, row: RowIndex) -> bool { self.get_base().get_rhs(row) } - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex { + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak { self.get_base().var_to_edge_index(var_index) } - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option { - self.get_base().edge_to_var_index(edge_index) + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option { + self.get_base().edge_to_var_index(edge_weak) } - fn get_vertices(&self) -> BTreeSet { + fn get_vertices(&self) -> BTreeSet { self.get_base().get_vertices() } } @@ -89,8 +95,8 @@ impl Tail { self.tail_var_indices.clear(); for column in 0..self.base.columns() { let var_index = self.base.column_to_var_index(column); - let edge_index = self.base.var_to_edge_index(var_index); - if self.tail_edges.contains(&edge_index) { + let edge_weak = self.base.var_to_edge_index(var_index); + if self.tail_edges.contains(&edge_weak.upgrade_force()) { self.tail_var_indices.push(var_index); } else { self.var_indices.push(var_index); @@ -135,16 +141,56 @@ pub mod tests { use super::super::tight::*; use super::*; + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; + use num_traits::Zero; + type TailMatrix = Tail>; #[test] fn tail_matrix_1() { // cargo test --features=colorful tail_matrix_1 -- --nocapture let mut matrix = TailMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - assert_eq!(matrix.edge_to_var_index(4), Some(1)); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + assert_eq!(matrix.edge_to_var_index(edges[1].downgrade()), Some(1)); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -160,8 +206,8 @@ pub mod tests { └─┴───┘ " ); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -178,7 +224,7 @@ pub mod tests { └─┴─┴─┴─┴─┴───┘ " ); - matrix.set_tail_edges([1, 6].into_iter()); + matrix.set_tail_edges([edges[0].downgrade(), edges[2].downgrade()].into_iter()); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -194,51 +240,51 @@ pub mod tests { └─┴─┴─┴─┴─┴───┘ " ); - assert_eq!(matrix.get_tail_edges_vec(), [1, 6]); - assert_eq!(matrix.edge_to_var_index(4), Some(1)); + assert_eq!(matrix.get_tail_edges_vec().iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect::>(), [1, 6]); + assert_eq!(matrix.edge_to_var_index(edges[1].downgrade()), Some(1)); } - #[test] - #[should_panic] - fn tail_matrix_cannot_call_dirty_column() { - // cargo test tail_matrix_cannot_call_dirty_column -- --nocapture - let mut matrix = TailMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.update_edge_tightness(1, true); - // even though there is indeed such a column, we forbid such dangerous calls - // always call `columns()` before accessing any column - matrix.column_to_var_index(0); - } +// #[test] +// #[should_panic] +// fn tail_matrix_cannot_call_dirty_column() { +// // cargo test tail_matrix_cannot_call_dirty_column -- --nocapture +// let mut matrix = TailMatrix::new(); +// matrix.add_constraint(0, &[1, 4, 6], true); +// matrix.update_edge_tightness(1, true); +// // even though there is indeed such a column, we forbid such dangerous calls +// // always call `columns()` before accessing any column +// matrix.column_to_var_index(0); +// } - #[test] - fn tail_matrix_basic_trait() { - // cargo test --features=colorful tail_matrix_basic_trait -- --nocapture - let mut matrix = TailMatrix::new(); - matrix.add_variable(3); // untight edges will not show - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - matrix.swap_row(2, 1); - matrix.xor_row(0, 1); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); - } - matrix.printstd(); - assert_eq!( - matrix.clone().printstd_str(), - "\ -┌─┬─┬─┬─┬─┬───┐ -┊ ┊1┊4┊6┊9┊ = ┊ -╞═╪═╪═╪═╪═╪═══╡ -┊0┊ ┊1┊1┊1┊ ┊ -├─┼─┼─┼─┼─┼───┤ -┊1┊1┊ ┊ ┊1┊ 1 ┊ -├─┼─┼─┼─┼─┼───┤ -┊2┊ ┊1┊ ┊1┊ ┊ -└─┴─┴─┴─┴─┴───┘ -" - ); - assert!(matrix.is_tight(1)); - assert_eq!(matrix.edge_to_var_index(4), Some(2)); - } +// #[test] +// fn tail_matrix_basic_trait() { +// // cargo test --features=colorful tail_matrix_basic_trait -- --nocapture +// let mut matrix = TailMatrix::new(); +// matrix.add_variable(3); // untight edges will not show +// matrix.add_constraint(0, &[1, 4, 6], true); +// matrix.add_constraint(1, &[4, 9], false); +// matrix.add_constraint(2, &[1, 9], true); +// matrix.swap_row(2, 1); +// matrix.xor_row(0, 1); +// for edge_index in [1, 4, 6, 9] { +// matrix.update_edge_tightness(edge_index, true); +// } +// matrix.printstd(); +// assert_eq!( +// matrix.clone().printstd_str(), +// "\ +// ┌─┬─┬─┬─┬─┬───┐ +// ┊ ┊1┊4┊6┊9┊ = ┊ +// ╞═╪═╪═╪═╪═╪═══╡ +// ┊0┊ ┊1┊1┊1┊ ┊ +// ├─┼─┼─┼─┼─┼───┤ +// ┊1┊1┊ ┊ ┊1┊ 1 ┊ +// ├─┼─┼─┼─┼─┼───┤ +// ┊2┊ ┊1┊ ┊1┊ ┊ +// └─┴─┴─┴─┴─┴───┘ +// " +// ); +// assert!(matrix.is_tight(1)); +// assert_eq!(matrix.edge_to_var_index(4), Some(2)); +// } } diff --git a/src/matrix/tight.rs b/src/matrix/tight.rs index b767e60a..075972f0 100644 --- a/src/matrix/tight.rs +++ b/src/matrix/tight.rs @@ -4,12 +4,18 @@ use crate::util::*; use derivative::Derivative; use std::collections::BTreeSet; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + + #[derive(Clone, Derivative)] #[derivative(Default(new = "true"))] pub struct Tight { base: M, /// the set of tight edges: should be a relatively small set - tight_edges: BTreeSet, + tight_edges: BTreeSet, /// tight matrix gives a view of only tight edges, with sorted indices #[derivative(Default(value = "true"))] is_var_indices_outdated: bool, @@ -24,34 +30,34 @@ impl Tight { } impl MatrixTight for Tight { - fn update_edge_tightness(&mut self, edge_index: EdgeIndex, is_tight: bool) { - debug_assert!(self.exists_edge(edge_index)); + fn update_edge_tightness(&mut self, edge_weak: EdgeWeak, is_tight: bool) { + debug_assert!(self.exists_edge(edge_weak.clone())); self.is_var_indices_outdated = true; if is_tight { - self.tight_edges.insert(edge_index); + self.tight_edges.insert(edge_weak.upgrade_force()); } else { - self.tight_edges.remove(&edge_index); + self.tight_edges.remove(&edge_weak.upgrade_force()); } } - fn is_tight(&self, edge_index: usize) -> bool { - debug_assert!(self.exists_edge(edge_index)); - self.tight_edges.contains(&edge_index) + fn is_tight(&self, edge_weak: EdgeWeak) -> bool { + debug_assert!(self.exists_edge(edge_weak.clone())); + self.tight_edges.contains(&edge_weak.upgrade_force()) } } impl MatrixBasic for Tight { - fn add_variable(&mut self, edge_index: EdgeIndex) -> Option { - self.base.add_variable(edge_index) + fn add_variable(&mut self, edge_weak: EdgeWeak) -> Option { + self.base.add_variable(edge_weak) } fn add_constraint( &mut self, - vertex_index: VertexIndex, - incident_edges: &[EdgeIndex], + vertex_weak: VertexWeak, + incident_edges: &[EdgeWeak], parity: bool, ) -> Option> { - self.base.add_constraint(vertex_index, incident_edges, parity) + self.base.add_constraint(vertex_weak, incident_edges, parity) } fn xor_row(&mut self, target: RowIndex, source: RowIndex) { @@ -66,13 +72,13 @@ impl MatrixBasic for Tight { fn get_rhs(&self, row: RowIndex) -> bool { self.get_base().get_rhs(row) } - fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeIndex { + fn var_to_edge_index(&self, var_index: VarIndex) -> EdgeWeak { self.get_base().var_to_edge_index(var_index) } - fn edge_to_var_index(&self, edge_index: EdgeIndex) -> Option { - self.get_base().edge_to_var_index(edge_index) + fn edge_to_var_index(&self, edge_weak: EdgeWeak) -> Option { + self.get_base().edge_to_var_index(edge_weak) } - fn get_vertices(&self) -> BTreeSet { + fn get_vertices(&self) -> BTreeSet { self.get_base().get_vertices() } } @@ -124,15 +130,54 @@ pub mod tests { use super::super::basic::*; use super::*; + use crate::dual_module_pq::{EdgePtr, Edge, VertexPtr, Vertex}; + use crate::pointers::*; + use num_traits::Zero; + type TightMatrix = Tight; #[test] fn tight_matrix_1() { // cargo test --features=colorful tight_matrix_1 -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.printstd(); // this is because by default all edges are not tight assert_eq!( @@ -149,8 +194,8 @@ pub mod tests { └─┴───┘ " ); - matrix.update_edge_tightness(4, true); - matrix.update_edge_tightness(9, true); + matrix.update_edge_tightness(edges[1].downgrade(), true); + matrix.update_edge_tightness(edges[3].downgrade(), true); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -166,7 +211,7 @@ pub mod tests { └─┴─┴─┴───┘ " ); - matrix.update_edge_tightness(9, false); + matrix.update_edge_tightness(edges[3].downgrade(), false); matrix.printstd(); assert_eq!( matrix.clone().printstd_str(), @@ -189,8 +234,56 @@ pub mod tests { fn tight_matrix_cannot_set_nonexistent_edge() { // cargo test tight_matrix_cannot_set_nonexistent_edge -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.update_edge_tightness(2, true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let another_edge = EdgePtr::new_value(Edge { + edge_index: 2, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.update_edge_tightness(another_edge.downgrade(), true); } #[test] @@ -198,22 +291,119 @@ pub mod tests { fn tight_matrix_cannot_read_nonexistent_edge() { // cargo test tight_matrix_cannot_read_nonexistent_edge -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.is_tight(2); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let another_edge = EdgePtr::new_value(Edge { + edge_index: 2, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.is_tight(another_edge.downgrade()); } #[test] fn tight_matrix_basic_trait() { // cargo test --features=colorful tight_matrix_basic_trait -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_variable(3); // untight edges will not show - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let another_edge = EdgePtr::new_value(Edge { + edge_index: 3, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + + matrix.add_variable(another_edge.downgrade()); // untight edges will not show + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); matrix.swap_row(2, 1); matrix.xor_row(0, 1); - for edge_index in [1, 4, 6, 9] { - matrix.update_edge_tightness(edge_index, true); + for edge_index in edges.iter() { + matrix.update_edge_tightness(edge_index.downgrade(), true); } matrix.printstd(); assert_eq!( @@ -236,19 +426,68 @@ pub mod tests { fn tight_matrix_rebuild_var_indices() { // cargo test --features=colorful tight_matrix_rebuild_var_indices -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_variable(3); // untight edges will not show - matrix.add_constraint(0, &[1, 4, 6], true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let another_edge = EdgePtr::new_value(Edge { + edge_index: 3, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }); + + matrix.add_variable(another_edge.downgrade()); // untight edges will not show + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); assert_eq!(matrix.columns(), 0); - for edge_index in [1, 4, 6] { - matrix.update_edge_tightness(edge_index, true); + for edge_index in [0, 1, 2] { + matrix.update_edge_tightness(edges[edge_index].downgrade(), true); } assert_eq!(matrix.columns(), 3); assert_eq!(matrix.columns(), 3); // should only update var_indices_once - matrix.add_constraint(1, &[4, 9], false); - matrix.add_constraint(2, &[1, 9], true); - matrix.update_edge_tightness(9, true); - matrix.update_edge_tightness(4, false); - matrix.update_edge_tightness(6, false); + matrix.add_constraint(vertices[1].downgrade(), &[edges[1].downgrade(), edges[3].downgrade()], false); + matrix.add_constraint(vertices[2].downgrade(), &[edges[0].downgrade(), edges[3].downgrade()], true); + matrix.update_edge_tightness(edges[3].downgrade(), true); + matrix.update_edge_tightness(edges[1].downgrade(), false); + matrix.update_edge_tightness(edges[2].downgrade(), false); assert_eq!(matrix.columns(), 2); matrix.printstd(); assert_eq!( @@ -272,8 +511,57 @@ pub mod tests { fn tight_matrix_cannot_call_dirty_column() { // cargo test tight_matrix_cannot_call_dirty_column -- --nocapture let mut matrix = TightMatrix::new(); - matrix.add_constraint(0, &[1, 4, 6], true); - matrix.update_edge_tightness(1, true); + + // create vertices + let vertices: Vec = (0..3) + .map(|vertex_index| { + VertexPtr::new_value(Vertex { + vertex_index, + is_defect: false, + edges: vec![], + is_mirror: false, + fusion_done: false, + mirrored_vertices: vec![], + }) + }) + .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = vec![1, 4, 6, 9].into_iter() + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + // let another_edge = EdgePtr::new_value(Edge { + // edge_index: 3, + // weight: Rational::zero(), + // dual_nodes: vec![], + // vertices: vec![], + // last_updated_time: Rational::zero(), + // growth_at_last_updated_time: Rational::zero(), + // grow_rate: Rational::zero(), + // unit_index: None, + // connected_to_boundary_vertex: false, + // global_time: global_time.clone(), + // #[cfg(feature = "incr_lp")] + // cluster_weights: hashbrown::HashMap::new(), + // }); + matrix.add_constraint(vertices[0].downgrade(), &[edges[0].downgrade(), edges[1].downgrade(), edges[2].downgrade()], true); + matrix.update_edge_tightness(edges[0].downgrade(), true); // even though there is indeed such a column, we forbid such dangerous calls // always call `columns()` before accessing any column matrix.column_to_var_index(0); diff --git a/src/matrix/visualize.rs b/src/matrix/visualize.rs index e0510bde..3f05bdef 100644 --- a/src/matrix/visualize.rs +++ b/src/matrix/visualize.rs @@ -54,8 +54,8 @@ impl From<&mut M> for VizTable { title.add_cell(Cell::new("")); for column in 0..matrix.columns() { let var_index = matrix.column_to_var_index(column); - let edge_index = matrix.var_to_edge_index(var_index); - let edge_index_str = Self::force_single_column(edge_index.to_string().as_str()); + let edge_weak = matrix.var_to_edge_index(var_index); + let edge_index_str = Self::force_single_column(edge_weak.upgrade_force().read_recursive().edge_index.to_string().as_str()); title.add_cell(Cell::new(edge_index_str.as_str()).style_spec("brFm")); } title.add_cell(Cell::new(" = ")); @@ -125,42 +125,42 @@ impl VizTrait for VizTable { } } -#[cfg(test)] -pub mod tests { - use super::super::*; +// #[cfg(test)] +// pub mod tests { +// use super::super::*; - #[test] - fn viz_table_1() { - // cargo test --features=colorful viz_table_1 -- --nocapture - let mut matrix = BasicMatrix::new(); - matrix.add_constraint(0, &[1, 4, 16], true); - matrix.add_constraint(1, &[4, 23], false); - matrix.add_constraint(2, &[1, 23], true); - matrix.printstd(); - assert_eq!( - matrix.clone().printstd_str(), - "\ -┌─┬─┬─┬─┬─┬───┐ -┊ ┊1┊4┊1┊2┊ = ┊ -┊ ┊ ┊ ┊6┊3┊ ┊ -╞═╪═╪═╪═╪═╪═══╡ -┊0┊1┊1┊1┊ ┊ 1 ┊ -├─┼─┼─┼─┼─┼───┤ -┊1┊ ┊1┊ ┊1┊ ┊ -├─┼─┼─┼─┼─┼───┤ -┊2┊1┊ ┊ ┊1┊ 1 ┊ -└─┴─┴─┴─┴─┴───┘ -" - ); - let mut viz_table = matrix.viz_table(); - assert_eq!( - serde_json::Value::from(viz_table.viz_table()), - json!([ - ["", "1", "4", "1\n6", "2\n3", " = "], - ["0", "1", "1", "1", " ", " 1 "], - ["1", " ", "1", " ", "1", " "], - ["2", "1", " ", " ", "1", " 1 "] - ]) - ) - } -} +// #[test] +// fn viz_table_1() { +// // cargo test --features=colorful viz_table_1 -- --nocapture +// let mut matrix = BasicMatrix::new(); +// matrix.add_constraint(0, &[1, 4, 16], true); +// matrix.add_constraint(1, &[4, 23], false); +// matrix.add_constraint(2, &[1, 23], true); +// matrix.printstd(); +// assert_eq!( +// matrix.clone().printstd_str(), +// "\ +// ┌─┬─┬─┬─┬─┬───┐ +// ┊ ┊1┊4┊1┊2┊ = ┊ +// ┊ ┊ ┊ ┊6┊3┊ ┊ +// ╞═╪═╪═╪═╪═╪═══╡ +// ┊0┊1┊1┊1┊ ┊ 1 ┊ +// ├─┼─┼─┼─┼─┼───┤ +// ┊1┊ ┊1┊ ┊1┊ ┊ +// ├─┼─┼─┼─┼─┼───┤ +// ┊2┊1┊ ┊ ┊1┊ 1 ┊ +// └─┴─┴─┴─┴─┴───┘ +// " +// ); +// let mut viz_table = matrix.viz_table(); +// assert_eq!( +// serde_json::Value::from(viz_table.viz_table()), +// json!([ +// ["", "1", "4", "1\n6", "2\n3", " = "], +// ["0", "1", "1", "1", " ", " 1 "], +// ["1", " ", "1", " ", "1", " "], +// ["2", "1", " ", " ", "1", " 1 "] +// ]) +// ) +// } +// } diff --git a/src/model_hypergraph.rs b/src/model_hypergraph.rs index accd2723..0464ed07 100644 --- a/src/model_hypergraph.rs +++ b/src/model_hypergraph.rs @@ -42,7 +42,7 @@ impl ModelHyperGraph { } pub fn get_edges_neighbors(&self, edges: &BTreeSet) -> BTreeSet { - let mut vertices = BTreeSet::new(); + let mut vertices: BTreeSet = BTreeSet::new(); for &edge_index in edges.iter() { vertices.extend(self.get_edge_neighbors(edge_index)); } diff --git a/src/mwpf_solver.rs b/src/mwpf_solver.rs index 8dc2e2d9..58e9a9ea 100644 --- a/src/mwpf_solver.rs +++ b/src/mwpf_solver.rs @@ -26,16 +26,16 @@ use std::sync::Arc; pub trait PrimalDualSolver { fn clear(&mut self); - fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, visualizer: Option<&mut Visualizer>); - fn solve(&mut self, syndrome_pattern: &SyndromePattern) { - self.solve_visualizer(syndrome_pattern, None) + fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, visualizer: Option<&mut Visualizer>, seed: u64); + fn solve(&mut self, syndrome_pattern: &SyndromePattern, seed: u64) { + self.solve_visualizer(syndrome_pattern, None, seed) } - fn subgraph_range_visualizer(&mut self, visualizer: Option<&mut Visualizer>) -> (Subgraph, WeightRange); - fn subgraph_range(&mut self) -> (Subgraph, WeightRange) { - self.subgraph_range_visualizer(None) + fn subgraph_range_visualizer(&mut self, visualizer: Option<&mut Visualizer>, seed: u64) -> (Subgraph, WeightRange); + fn subgraph_range(&mut self, seed: u64) -> (Subgraph, WeightRange) { + self.subgraph_range_visualizer(None, seed) } - fn subgraph(&mut self) -> Subgraph { - self.subgraph_range().0 + fn subgraph(&mut self, seed: u64) -> Subgraph { + self.subgraph_range(seed).0 } fn sum_dual_variables(&self) -> Rational; fn generate_profiler_report(&self) -> serde_json::Value; @@ -52,15 +52,15 @@ macro_rules! bind_trait_to_python { } #[pyo3(name = "solve")] // in Python, `solve` and `solve_visualizer` is the same because it can take optional parameter fn trait_solve(&mut self, syndrome_pattern: &SyndromePattern, visualizer: Option<&mut Visualizer>) { - self.solve_visualizer(syndrome_pattern, visualizer) + self.solve_visualizer(syndrome_pattern, visualizer, 0) } #[pyo3(name = "subgraph_range")] // in Python, `subgraph_range` and `subgraph_range_visualizer` is the same fn trait_subgraph_range(&mut self, visualizer: Option<&mut Visualizer>) -> (Subgraph, WeightRange) { - self.subgraph_range_visualizer(visualizer) + self.subgraph_range_visualizer(visualizer, 0) } #[pyo3(name = "subgraph")] fn trait_subgraph(&mut self, visualizer: Option<&mut Visualizer>) -> Subgraph { - self.subgraph_range_visualizer(visualizer).0 + self.subgraph_range_visualizer(visualizer, 0).0 } #[pyo3(name = "sum_dual_variables")] fn trait_sum_dual_variables(&self) -> PyResult> { @@ -120,8 +120,9 @@ impl SolverSerialPlugins { primal_module.config = config.primal.clone(); Self { dual_module: DualModulePQ::new_empty(initializer), + // dual_module: DualModuleSerial::new_empty(initializer), primal_module, - interface_ptr: DualModuleInterfacePtr::new(model_graph.clone()), + interface_ptr: DualModuleInterfacePtr::new(), model_graph, } } @@ -133,7 +134,7 @@ impl PrimalDualSolver for SolverSerialPlugins { self.dual_module.clear(); self.interface_ptr.clear(); } - fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, visualizer: Option<&mut Visualizer>) { + fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, visualizer: Option<&mut Visualizer>, seed: u64) { let syndrome_pattern = Arc::new(syndrome_pattern.clone()); if !syndrome_pattern.erasures.is_empty() { unimplemented!(); @@ -146,15 +147,17 @@ impl PrimalDualSolver for SolverSerialPlugins { ); debug_assert!( { - let subgraph = self.subgraph(); + let subgraph = self.subgraph(seed); self.model_graph .matches_subgraph_syndrome(&subgraph, &syndrome_pattern.defect_vertices) }, "the subgraph does not generate the syndrome" ); } - fn subgraph_range_visualizer(&mut self, visualizer: Option<&mut Visualizer>) -> (Subgraph, WeightRange) { - let (subgraph, weight_range) = self.primal_module.subgraph_range(&self.interface_ptr, &mut self.dual_module); + fn subgraph_range_visualizer(&mut self, visualizer: Option<&mut Visualizer>, seed: u64) -> (Subgraph, WeightRange) { + let (subgraph, weight_range) = self + .primal_module + .subgraph_range(&self.interface_ptr, seed); if let Some(visualizer) = visualizer { visualizer .snapshot_combined( @@ -182,11 +185,20 @@ macro_rules! bind_primal_dual_solver_trait { fn clear(&mut self) { self.0.clear() } - fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, visualizer: Option<&mut Visualizer>) { - self.0.solve_visualizer(syndrome_pattern, visualizer) + fn solve_visualizer( + &mut self, + syndrome_pattern: &SyndromePattern, + visualizer: Option<&mut Visualizer>, + seed: u64, + ) { + self.0.solve_visualizer(syndrome_pattern, visualizer, seed) } - fn subgraph_range_visualizer(&mut self, visualizer: Option<&mut Visualizer>) -> (Subgraph, WeightRange) { - self.0.subgraph_range_visualizer(visualizer) + fn subgraph_range_visualizer( + &mut self, + visualizer: Option<&mut Visualizer>, + seed: u64, + ) -> (Subgraph, WeightRange) { + self.0.subgraph_range_visualizer(visualizer, seed) } fn sum_dual_variables(&self) -> Rational { self.0.sum_dual_variables() @@ -320,7 +332,7 @@ impl SolverErrorPatternLogger { impl PrimalDualSolver for SolverErrorPatternLogger { fn clear(&mut self) {} - fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, _visualizer: Option<&mut Visualizer>) { + fn solve_visualizer(&mut self, syndrome_pattern: &SyndromePattern, _visualizer: Option<&mut Visualizer>, _seed: u64) { self.file .write_all( serde_json::to_string(&serde_json::json!(syndrome_pattern)) @@ -330,7 +342,7 @@ impl PrimalDualSolver for SolverErrorPatternLogger { .unwrap(); self.file.write_all(b"\n").unwrap(); } - fn subgraph_range_visualizer(&mut self, _visualizer: Option<&mut Visualizer>) -> (Subgraph, WeightRange) { + fn subgraph_range_visualizer(&mut self, _visualizer: Option<&mut Visualizer>, _seed: u64) -> (Subgraph, WeightRange) { panic!("error pattern logger do not actually solve the problem, please use Verifier::None by `--verifier none`") } fn sum_dual_variables(&self) -> Rational { diff --git a/src/ordered_float.rs b/src/ordered_float.rs new file mode 100644 index 00000000..df70ea02 --- /dev/null +++ b/src/ordered_float.rs @@ -0,0 +1,349 @@ +#[cfg(not(feature = "f32_weight"))] +type BaseFloat = f64; +#[cfg(feature = "f32_weight")] +type BaseFloat = f32; // there's actually no point in using this, as HIGHs don't support f32 + +use num_traits::Zero; +use crate::pointers::ArcRwLock; + +const EPSILON: BaseFloat = 1e-4; // note: it would be interesting to play around with this. + +#[derive(Debug, Clone, Copy)] +pub struct OrderedFloat(BaseFloat); + +impl OrderedFloat { + pub fn new(value: BaseFloat) -> Self { + Self(value) + } + pub fn numer(&self) -> BaseFloat { + self.0 + } + pub fn denom(&self) -> BaseFloat { + 1.0 + } + pub fn set_zero(&mut self) { + self.0 = 0.0; + } + + pub fn recip(&self) -> Self { + Self::new(1.0 / self.0) + } + pub fn new_raw(numer: i32, denom: i32) -> Self { + Self::new(numer as BaseFloat / denom as BaseFloat) + } + + pub fn floor(&self) -> Self { + Self::new(self.0.floor()) + } +} + +// Implement num_traits +impl num_traits::Zero for OrderedFloat { + fn zero() -> Self { + Self::new(0.0) + } + fn is_zero(&self) -> bool { + self.0.abs() < EPSILON + } +} +impl num_traits::One for OrderedFloat { + fn one() -> Self { + Self::new(1.0) + } + fn is_one(&self) -> bool { + (self.0 - 1.0).abs() < EPSILON + } +} +impl num_traits::Signed for OrderedFloat { + fn is_negative(&self) -> bool { + !self.is_zero() && self.0 < 0.0 + } + fn is_positive(&self) -> bool { + !self.is_zero() && self.0 > 0.0 + } + fn abs(&self) -> Self { + Self::new(self.0.abs()) + } + fn abs_sub(&self, other: &Self) -> Self { + (self - other).max(OrderedFloat::zero()) + } + fn signum(&self) -> Self { + Self::new(self.0.signum()) + } +} +impl num_traits::Num for OrderedFloat { + type FromStrRadixErr = num_traits::ParseFloatError; + fn from_str_radix(str: &str, radix: u32) -> Result { + match BaseFloat::from_str_radix(str, radix) { + Ok(value) => Ok(Self::new(value)), + Err(err) => Err(err), + } + } +} +impl num_traits::FromPrimitive for OrderedFloat { + fn from_i64(n: i64) -> Option { + Some(Self::new(n as BaseFloat)) + } + fn from_u64(n: u64) -> Option { + Some(Self::new(n as BaseFloat)) + } + fn from_f64(n: f64) -> Option { + Some(Self::new(n)) + } + fn from_usize(n: usize) -> Option { + Some(Self::new(n as BaseFloat)) + } +} +impl num_traits::ToPrimitive for OrderedFloat { + fn to_i64(&self) -> Option { + Some(self.0 as i64) + } + fn to_u64(&self) -> Option { + Some(self.0 as u64) + } + #[allow(clippy::unnecessary_cast)] + fn to_f64(&self) -> Option { + Some(self.0 as f64) + } +} + +// Implement std ops +impl std::ops::Rem for OrderedFloat { + type Output = Self; + fn rem(self, other: Self) -> Self { + Self::new(self.0 % other.0) + } +} +impl std::ops::Neg for OrderedFloat { + type Output = Self; + fn neg(self) -> Self { + Self::new(-self.0) + } +} +impl std::ops::Neg for &OrderedFloat { + type Output = OrderedFloat; + fn neg(self) -> OrderedFloat { + OrderedFloat::new(-self.0) + } +} + +// Implement add, sub, mul, div operations, with assign operations, references, by macros +macro_rules! impl_ops { + ($trait:ident, $method:ident) => { + impl std::ops::$trait for OrderedFloat { + type Output = Self; + fn $method(self, other: Self) -> Self { + Self::new(self.0.$method(other.0)) + } + } + impl std::ops::$trait<&OrderedFloat> for OrderedFloat { + type Output = Self; + fn $method(self, other: &Self) -> Self { + Self::new(self.0.$method(other.0)) + } + } + impl std::ops::$trait for &OrderedFloat { + type Output = OrderedFloat; + fn $method(self, other: OrderedFloat) -> OrderedFloat { + OrderedFloat::new(self.0.$method(other.0)) + } + } + impl std::ops::$trait<&OrderedFloat> for &OrderedFloat { + type Output = OrderedFloat; + fn $method(self, other: &OrderedFloat) -> OrderedFloat { + OrderedFloat::new(self.0.$method(other.0)) + } + } + }; +} +impl_ops!(Add, add); +impl_ops!(Sub, sub); +impl_ops!(Mul, mul); +impl_ops!(Div, div); + +// Implement assign operations +macro_rules! impl_assign_ops { + ($trait:ident, $method:ident, $op:tt) => { + #[allow(clippy::assign_op_pattern)] + impl std::ops::$trait for OrderedFloat { + fn $method(&mut self, other: Self) { + *self = *self $op other; + } + } + impl std::ops::$trait<&OrderedFloat> for OrderedFloat { + fn $method(&mut self, other: &Self) { + *self = *self $op other; + } + } + // impl std::ops::$trait<&f32> for OrderedFloat { + // fn $method(&mut self, other: &f32) { + // self.0 = self.0 $op *other as BaseFloat; + // } + // } + // impl std::ops::$trait<&f64> for OrderedFloat { + // fn $method(&mut self, other: &f64) { + // self.0 = self.0 $op *other as BaseFloat; + // } + // } + }; + } +impl_assign_ops!(AddAssign, add_assign, +); +impl_assign_ops!(SubAssign, sub_assign, -); +impl_assign_ops!(MulAssign, mul_assign, *); +impl_assign_ops!(DivAssign, div_assign, /); + +// Implement other std traits +impl std::str::FromStr for OrderedFloat { + type Err = std::num::ParseFloatError; + fn from_str(s: &str) -> Result { + Ok(Self::new(f64::from_str(s)?)) + } +} +impl std::hash::Hash for OrderedFloat { + fn hash(&self, state: &mut H) { + self.0.to_bits().hash(state); + } +} +impl std::fmt::Display for OrderedFloat { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +// Implement Eq +impl Eq for OrderedFloat {} + +// Implement PartialEq +impl PartialEq for OrderedFloat { + fn eq(&self, other: &Self) -> bool { + (self.0 - other.0).abs() < EPSILON + } +} +impl PartialEq for OrderedFloat { + fn eq(&self, other: &f64) -> bool { + (self.0 - other).abs() < EPSILON + } +} +impl PartialEq for f64 { + fn eq(&self, other: &OrderedFloat) -> bool { + (*self - other.0).abs() < EPSILON + } +} + +// Implement PartialOrd +impl PartialOrd for OrderedFloat { + #[allow(clippy::non_canonical_partial_ord_impl)] + fn partial_cmp(&self, other: &Self) -> Option { + if (self.0 - other.0).abs() < EPSILON { + Some(std::cmp::Ordering::Equal) + } else { + self.0.partial_cmp(&other.0) + } + } +} + +// Implement Ord +impl Ord for OrderedFloat { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.partial_cmp(other).unwrap() + } +} + +// Implement From for OrderedFloat +impl From for OrderedFloat { + fn from(value: BaseFloat) -> Self { + OrderedFloat::new(value) + } +} + +// Implement Default +impl Default for OrderedFloat { + fn default() -> Self { + Self::new(0.0) + } +} + +// Implement Sum for OrderedFloat +impl std::iter::Sum for OrderedFloat { + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), std::ops::Add::add) + } +} + +// Implement Sum for references to OrderedFloat +impl<'a> std::iter::Sum<&'a OrderedFloat> for OrderedFloat { + fn sum>(iter: I) -> Self { + iter.fold(Self::zero(), |acc, &item| acc + item) + } +} + +// comparisons using references +impl PartialEq<&OrderedFloat> for OrderedFloat { + fn eq(&self, other: &&Self) -> bool { + (self.0 - other.0).abs() < EPSILON + } +} + +impl PartialEq for &OrderedFloat { + fn eq(&self, other: &OrderedFloat) -> bool { + (self.0 - other.0).abs() < EPSILON + } +} + +// impl PartialEq<&OrderedFloat> for &OrderedFloat { +// fn eq(&self, other: &&OrderedFloat) -> bool { +// (self.0 - other.0).abs() < EPSILON +// } +// } + +impl PartialOrd<&OrderedFloat> for OrderedFloat { + fn partial_cmp(&self, other: &&Self) -> Option { + if (self.0 - other.0).abs() < EPSILON { + Some(std::cmp::Ordering::Equal) + } else { + self.0.partial_cmp(&other.0) + } + } +} + +impl PartialOrd for &OrderedFloat { + fn partial_cmp(&self, other: &OrderedFloat) -> Option { + if (self.0 - other.0).abs() < EPSILON { + Some(std::cmp::Ordering::Equal) + } else { + self.0.partial_cmp(&other.0) + } + } +} + +impl std::fmt::Debug for ArcRwLock { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "[ordered_float: {}]", self.read_recursive().0) + // write!( + // f, + // "[edge: {}]: weight: {}, grow_rate: {}, growth_at_last_updated_time: {}, last_updated_time: {}\n\tdual_nodes: {:?}\n", + // edge.edge_index, + // edge.weight, + // edge.grow_rate, + // edge.growth_at_last_updated_time, + // edge.last_updated_time, + // edge.dual_nodes.iter().filter(|node| !node.weak_ptr.upgrade_force().read_recursive().grow_rate.is_zero()).collect::>() + // ) + } +} + +// impl PartialOrd<&OrderedFloat> for &OrderedFloat { +// fn partial_cmp(&self, other: &&OrderedFloat) -> Option { +// if (self.0 - other.0).abs() < EPSILON { +// Some(std::cmp::Ordering::Equal) +// } else { +// self.0.partial_cmp(&other.0) +// } +// } +// } + +// impl Ord for &OrderedFloat { +// fn cmp(&self, other: &Self) -> std::cmp::Ordering { +// self.partial_cmp(other).unwrap() +// } +// } diff --git a/src/pheap/.gitignore b/src/pheap/.gitignore new file mode 100644 index 00000000..1490cb65 --- /dev/null +++ b/src/pheap/.gitignore @@ -0,0 +1,5 @@ +/data +/tmp +/target +Cargo.lock +massif* diff --git a/src/pheap/Cargo.toml b/src/pheap/Cargo.toml new file mode 100644 index 00000000..7a522856 --- /dev/null +++ b/src/pheap/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "pheap" +version = "0.3.0" +edition = "2018" +authors = ["1crcbl <1crcbl@protonmail.com>"] +license = "MIT OR Apache-2.0" +exclude = [ + "img/*", + "scripts/*", +] + +readme = "README.md" +description = "A (fast) implementation of pairing heap data structure for priority queue and some graph algorithms" + +keywords = ["pairing", "heap", "priority", "queue", "graph"] +categories = ["algorithms", "data-structures"] + +repository = "https://github.com/1crcbl/pheap-rs" + +[dependencies] +num-traits = "0.2.14" + +[dev-dependencies] +criterion = { version = "0.3", features = ["html_reports"] } +priority-queue = "1.1.1" +keyed_priority_queue = "0.3.2" +addressable-pairing-heap = "0.2.0" +pairing-heap = "0.1.0" +clap = "2.33.3" +pathfinding = "2.1.5" + +[[bench]] +name = "heap" +harness = false diff --git a/src/pheap/README.md b/src/pheap/README.md new file mode 100644 index 00000000..41a575a9 --- /dev/null +++ b/src/pheap/README.md @@ -0,0 +1,141 @@ +# Pairing Heap + +[![Crates.io](https://img.shields.io/crates/v/pheap)](https://crates.io/crates/pheap) [![Documentation](https://docs.rs/pheap/badge.svg)](https://docs.rs/pheap) + +From [Wikipedia](https://en.wikipedia.org/wiki/Pairing_heap): +> A pairing heap is a type of heap data structure with relatively simple implementation and excellent practical amortized performance. +> Pairing heaps are heap-ordered multiway tree structures, and can be considered simplified Fibonacci heaps. They are considered a "robust choice" for implementing such algorithms as Prim's MST algorithm. + +A min-pairing heap supports the following operations: +- ```find_min```: finds the minimum element of the heap, which is the root. +- ```merge```: combines two heaps together. +- ```insert```: adds a new element into the heap. +- ```delete_min```: remove the root and reorder its children nodes. +- ```decrease_key```: decrease the priority of an element. Standard implementation of a heap data structure does not support searching for a key efficiently (which is the case in this crate). Thus, this operation can take very long time, with an upper bound of ```O(2^(sqrt(log log n)))```. + +The crate also comes with an efficient implementation of Dijkstra's algorithm to solve the single source shortest path problem and Prim's algorithm for finding minimum spanning tree. + +## Benchmarks +To measure the performance of this implementation, I choose the following libraries that are available on [crates.io](https://crates.io/) to experiment: +- [Addressable pairing heap](https://crates.io/crates/addressable-pairing-heap) +- [Apasel422's implementation of pairing heap](https://crates.io/crates/pairing-heap) +- [Priority queue](https://crates.io/crates/priority-queue) +- [Keyed priority queue](https://crates.io/crates/keyed_priority_queue) + +*If I miss any libraries, please let me know.* + +The experiments are conducted on my PC with the following spec: +> OS: Fedora 34 64-bit +> CPU: AMD® Ryzen 7 3800x 8-core processor +> RAM: 32 GB + +### Experiment 1 +> Each implementation is tasked to execute 1000 insertions / 0 deletes, then 999 insertions / 1 deletes (remove the top element), until the number of deletes is 1000. This means each implementation has to execute 500_500 insertions and 500_500 deletions. + +For this experiment, I use the crate [```criterion```](https://crates.io/crates/criterion) to measure the performance of each implementation. + +| | Pairing heap
(this crate) | Addressable pairing heap | Pairing heap
(Apasel422) | Priority queue | Keyed priority queue +--- | --- | --- | --- | --- | --- +|Average time
(milliseconds)|20.37|56.6|24.18|116.84|111.30| + +### Experiment 2 +> Each implementation is tasked to execute 1000 insertions / 1000 priority update / 0 deletes, then 999 insertions / 999 priority updates | 1 deletes (remove the top element), until the number of deletes is 1000. + +| | Pairing heap
(this crate) | Addressable pairing heap | Pairing heap
(Apasel422) | Priority queue | Keyed priority queue +--- | --- | --- | --- | --- | --- +|Average time
(seconds)|1.399|No implementation|No implementation|0.171|0.142| + +For this experiment, the pairing heap fairs worse than other two libraries. This is due to the fact that pairing heap data structures must search for keys, which in worse cases takes ```O(n)``` time, while other implementations leverage the fast lookup power from hash map. + +### Experiment 3 +> Each implementation is tasked to insert 1 million elements and the memory consumption will be measured. + +For this experiment, I write a simple ```main``` (in ```examples/stress.rs```) and use ```valgrind``` with ```massif``` for the evaluation purpose. + +To compile: +```batch +cargo build --examples --release +``` + +To run valgrind: +```batch +valgrind --tool=massif ./target/release/examples/stress +``` + +The commandline argument `````` accepts the following options: +- ```pairing_heap``` +- ```priority_queue``` +- ```keyed_priority_queue``` +- ```addressable_pairing_heap``` +- ```ap422_pairing_heap``` + +| | Pairing heap
(this crate) | Addressable pairing heap | Pairing heap
(Apasel422) | Priority queue | Keyed priority queue +--- | --- | --- | --- | --- | --- +|Peak heap
memory consumption
(MB)|30.5|72.0|segfault|62|76| + +The image outputs of ```massif-visualiser``` are stored in the folder ```img```. + +## Dijkstra's algorithm +To test the performance of Dijkstra's algorithm with pairing heap, I use the [DIMACS dataset](http://www.diag.uniroma1.it/challenge9/download.shtml). You can download all datasets by using the python script with the following command: + +```python +python3 scripts/download.py -d dimacs-all --dest data/ +``` + +On [crates.io](https://crates.io/) there are several libraries that have Dijkstra's algorithm but I only find the crate [`pathfinding`](https://crates.io/crates/pathfinding) performant (please let me know if I miss any crate). + +For this experiment, all implementations are tasked to solve the shortest path problem on all DIMACS dataset and I take the average runtime after ten runs. + +**Note:** the function `dijkstra_all` of `pathfinding` returns only the direct parent node for a queried node, instead of an entire path, the function `sssp_dijkstra_lazy` is used for my implementation of Dijkstra's algorithm. This function returns a result which is (kind of) equivalent to what `pathfinding` delivers. By doing so, we can compare the solving time of both implementations, while ignoring the path building time. + +Time is measured in millisecond: + +| | Number of nodes | Number of edges | pheap | pathfinding +--- | --- | --- | --- | --- | +|DIMACS-NY| 264_346 | 733_846 | 88 |110| +|DIMACS-BAY| 321_270 | 800_172 | 94 |127| +|DIMACS-COL| 435_666 | 1_057_066 | 126 |172| +|DIMACS-FLA| 1_070_376 | 2_712_798 | 377 |626| +|DIMACS-NW| 1_207_945 | 2_840_208 | 456 |665| +|DIMACS-NE| 1_524_453 | 3_897_636 | 619 |852| +|DIMACS-CAL| 1_890_815 | 4_657_742 | 740 |1_246| +|DIMACS-LKS| 2_758_119 | 6_885_658 | 1_141 |1_695| +|DIMACS-E| 3_598_623 | 8_778_114 | 1_548 |2_151| +|DIMACS-W| 6_262_104 | 15_248_146 | 3_098 |4_460| +|DIMACS-CTR| 14_081_816 | 34_292_496 | 10_183 |11_256| +|DIMACS-USA| 23_947_347 | 58_333_344 | 16_678 |20_896| + +## Minimum spanning tree +In this experiment, I measure the performance of both libraries in finding the MST. However, there are several differences between two crates that are worth mentioning: firstly, while ```pathfinding``` uses Kruskal's algorithm, I implement only the Prim's algorithm using the pairing heap. Secondly, ```pathfinding```'s implementation returns only the iterators of edges and it is the task of users to collect these iterators and (re)construct the MST. On the other hand, my implementation returns the complete graph and total weight of an MST. Thus, I run two experiments for ```pheap```, one solving without building MST, and the other for both solving and building MST. + +Average time after ten runs, measured in milliesecond: + +| | Number of nodes | Number of edges | pheap
(Solve) | pheap
(Solve + Build) | pathfinding +--- | --- | --- | --- | --- | --- | +|DIMACS-NY| 264_346 | 733_846 | 78 |140 | 132| +|DIMACS-BAY| 321_270 | 800_172 | 93 | 170 | 140| +|DIMACS-COL| 435_666 | 1_057_066 | 132 | 243 | 191| +|DIMACS-FLA| 1_070_376 | 2_712_798 | 358 | 727 |598| +|DIMACS-NW| 1_207_945 | 2_840_208 | 409 | 863 | 622| +|DIMACS-NE| 1_524_453 | 3_897_636 | 565 | 1_144 | 845| +|DIMACS-CAL| 1_890_815 | 4_657_742 | 715 | 1_553 | 1_148| +|DIMACS-LKS| 2_758_119 | 6_885_658 | 1_093 | 2_307 | 1_641| +|DIMACS-E| 3_598_623 | 8_778_114 | 1_452 | 3_100 | 2_125| +|DIMACS-W| 6_262_104 | 15_248_146 | 2_618 | 5_732 | 4_042| +|DIMACS-CTR| 14_081_816 | 34_292_496 | 7_371 | 16_470 |9_712| +|DIMACS-USA| 23_947_347 | 58_333_344 | 11_785 | 25_450 |17_943| + +## License + +Licensed under either of + + * Apache License, Version 2.0 + ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license + ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +## Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. \ No newline at end of file diff --git a/src/pheap/benches/heap.rs b/src/pheap/benches/heap.rs new file mode 100644 index 00000000..5f79077e --- /dev/null +++ b/src/pheap/benches/heap.rs @@ -0,0 +1,159 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use keyed_priority_queue::KeyedPriorityQueue; +use pheap::PairingHeap; +use priority_queue::PriorityQueue; + +enum Queue { + PairingHeap(PairingHeap), + AdPairingHeap(addressable_pairing_heap::PairingHeap), + A422PairingHeap(pairing_heap::PairingHeap), + PriorityQueue(PriorityQueue), + KeyedPriorityQueue(KeyedPriorityQueue), +} + +impl Queue { + fn insert(&mut self, key: i32) { + match self { + Queue::PairingHeap(ph) => ph.insert(key, key), + Queue::AdPairingHeap(ph) => { + ph.push(key, key); + } + Queue::PriorityQueue(pq) => { + pq.push(key, key); + } + Queue::A422PairingHeap(ph) => { + ph.push(key); + } + Queue::KeyedPriorityQueue(pq) => { + pq.push(key, key); + } + } + } + + fn update(&mut self, key: i32, delta: i32) { + match self { + Queue::PairingHeap(ph) => ph.decrease_prio(&key, delta), + Queue::AdPairingHeap(_) => unimplemented!(), + Queue::PriorityQueue(pq) => { + if let Some(q) = pq.get_priority(&key).cloned() { + pq.change_priority(&key, q - 10); + } + } + Queue::A422PairingHeap(_) => unimplemented!(), + Queue::KeyedPriorityQueue(pq) => { + if let Some(q) = pq.get_priority(&key).cloned() { + pq.set_priority(&key, q - 10).unwrap(); + } + } + } + } + + fn delete(&mut self) { + match self { + Queue::PairingHeap(ph) => { + ph.delete_min(); + } + Queue::AdPairingHeap(ph) => { + ph.pop(); + } + Queue::PriorityQueue(pq) => { + pq.pop(); + } + Queue::A422PairingHeap(ph) => { + ph.pop(); + } + Queue::KeyedPriorityQueue(pq) => { + pq.pop(); + } + }; + } +} + +enum QueueKind { + PairingHeap, + AdPairingHeap, + A422PairingHeap, + PriorityQueue, + KeyedPriorityQueue, +} + +impl QueueKind { + fn create(&self) -> Queue { + match self { + QueueKind::PairingHeap => Queue::PairingHeap(PairingHeap::::new()), + QueueKind::AdPairingHeap => { + Queue::AdPairingHeap(addressable_pairing_heap::PairingHeap::::new()) + } + QueueKind::PriorityQueue => Queue::PriorityQueue(PriorityQueue::::new()), + QueueKind::A422PairingHeap => { + Queue::A422PairingHeap(pairing_heap::PairingHeap::::new()) + } + QueueKind::KeyedPriorityQueue => { + Queue::KeyedPriorityQueue(KeyedPriorityQueue::::new()) + } + } + } + + fn bench(&self, op: i32, update: bool) { + let mut insert_op = op + 1; + let mut update_op = insert_op; + let mut delete_op = 0; + let mut count = 0; + + let mut q = self.create(); + + while insert_op > -1 { + for _ in 0..insert_op { + q.insert(count); + count += 1; + } + + if update { + for ii in 0..update_op { + q.update(count - ii, 10); + } + } + + for _ in 0..delete_op { + q.delete(); + } + + insert_op -= 1; + update_op -= 1; + delete_op += 1; + } + } +} + +fn no_change_prio(c: &mut Criterion) { + c.bench_function("Pairing Heap (1000)", |b| { + b.iter(|| QueueKind::PairingHeap.bench(1_000, false)) + }); + c.bench_function("Addressable Pairing Heap (1000)", |b| { + b.iter(|| QueueKind::AdPairingHeap.bench(1_000, false)) + }); + c.bench_function("APasel422's Pairing Heap (1000)", |b| { + b.iter(|| QueueKind::A422PairingHeap.bench(1_000, false)) + }); + c.bench_function("Priority Queue (1000)", |b| { + b.iter(|| QueueKind::PriorityQueue.bench(1_000, false)) + }); + c.bench_function("Keyed Priority Queue (1000)", |b| { + b.iter(|| QueueKind::KeyedPriorityQueue.bench(1_000, false)) + }); +} + +fn with_change_prio(c: &mut Criterion) { + c.bench_function("Pairing Heap (1000) | Priority Update", |b| { + b.iter(|| QueueKind::PairingHeap.bench(1_000, true)) + }); + c.bench_function("Priority Queue (1000) | Priority Update", |b| { + b.iter(|| QueueKind::PriorityQueue.bench(1_000, true)) + }); + c.bench_function("Keyed Priority Queue (1000) | Priority Update", |b| { + b.iter(|| QueueKind::KeyedPriorityQueue.bench(1_000, true)) + }); +} + +criterion_group!(benches, no_change_prio, with_change_prio); +criterion_main!(benches); diff --git a/src/pheap/examples/dijkstra.rs b/src/pheap/examples/dijkstra.rs new file mode 100644 index 00000000..095a6edb --- /dev/null +++ b/src/pheap/examples/dijkstra.rs @@ -0,0 +1,160 @@ +use std::{ + collections::HashMap, + fs::File, + io::{BufRead, BufReader}, +}; + +use clap::{App, Arg}; +use pathfinding::prelude::dijkstra_all; +use pheap::graph::SimpleGraph; + +fn main() { + let matches = App::new("Single source shortest path benchmark") + .arg( + Arg::with_name("file") + .short("f") + .long("file") + .takes_value(true) + .required(true) + .help("Path to a DIMACS file."), + ) + .arg( + Arg::with_name("lib") + .long("lib") + .takes_value(true) + .required(true) + .help("The library to be used to solve the shortest path problem. Options: pheap | fast_paths."), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .takes_value(true) + .default_value("5") + .help("Number of runs for search query."), + ) + .get_matches(); + + let filepath = match matches.value_of("file") { + Some(fp) => fp, + None => std::process::exit(1), + }; + + let runs = matches + .value_of("runs") + .unwrap() + .to_string() + .parse::() + .unwrap(); + + match matches.value_of("lib") { + Some(lib) => match lib { + "pheap" => graph(filepath, runs), + "pathfinding" => pathfinding(filepath, runs), + _ => std::process::exit(1), + }, + None => std::process::exit(1), + }; +} + +macro_rules! run_exp { + ($runs:expr, $exe:stmt) => { + let mut durations = Vec::with_capacity($runs); + + for ii in 0..$runs { + println!("> Run {}/{}", ii + 1, $runs); + let start = std::time::Instant::now(); + $exe + let end = std::time::Instant::now() - start; + println!( + "> Time taken to solve the problem: {} (ms)", + end.as_millis() + ); + durations.push(end.as_millis()); + } + + let avg = durations.iter().sum::() as usize; + println!("Average time: {} (ms)", avg / $runs); + }; +} + +fn graph(filepath: &str, runs: usize) { + println!("> Load file: {}", filepath); + + let file = File::open(filepath).unwrap(); + let mut reader = BufReader::new(file); + + let mut n_nodes = 0; + let mut _n_edges = 0; + + for _ in 0..7 { + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + + if !line.is_empty() && line.starts_with('p') { + let s = line.trim().split_whitespace().collect::>(); + n_nodes = s[2].parse::().unwrap(); + _n_edges = s[3].parse::().unwrap(); + } + } + + let mut g = SimpleGraph::::with_capacity(n_nodes); + + for line in reader.lines() { + let (node1, node2, weight) = parse_line(&line.unwrap()); + g.add_weighted_edges(node1, node2, weight); + } + + println!("> Graph created."); + + run_exp!(runs, let _ = g.sssp_dijkstra_lazy(10_000)); +} + +fn pathfinding(filepath: &str, runs: usize) { + println!("> Load file: {}", filepath); + + let file = File::open(filepath).unwrap(); + let mut reader = BufReader::new(file); + + for _ in 0..7 { + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + } + + fn insert_weight( + hm: &mut HashMap>, + node1: usize, + node2: usize, + weight: u32, + ) { + match hm.get_mut(&node1) { + Some(v) => { + v.push((node2, weight)); + } + None => { + let v = vec![(node2, weight)]; + hm.insert(node1, v); + } + } + } + + let mut hm = HashMap::>::new(); + + for line in reader.lines() { + let (node1, node2, weight) = parse_line(&line.unwrap()); + insert_weight(&mut hm, node1, node2, weight); + insert_weight(&mut hm, node2, node1, weight); + } + + run_exp!(runs, let _ = dijkstra_all(&0, |x| { + let nbs = hm.get(x).unwrap(); + nbs.iter().map(|(idx, w)| (*idx, *w)) + })); +} + +fn parse_line(line: &str) -> (usize, usize, u32) { + let s = line.trim().split_whitespace().collect::>(); + let node1 = s[1].parse::().unwrap() - 1; + let node2 = s[2].parse::().unwrap() - 1; + let weight = s[3].parse::().unwrap(); + (node1, node2, weight) +} diff --git a/src/pheap/examples/mst.rs b/src/pheap/examples/mst.rs new file mode 100644 index 00000000..e6f94f72 --- /dev/null +++ b/src/pheap/examples/mst.rs @@ -0,0 +1,148 @@ +use std::{ + fs::File, + io::{BufRead, BufReader}, +}; + +use clap::{App, Arg}; +use pathfinding::prelude::kruskal; +use pheap::graph::{mst_prim, SimpleGraph}; + +macro_rules! run_exp { + ($runs:expr, $exe:stmt) => { + let mut durations = Vec::with_capacity($runs); + + for ii in 0..$runs { + println!("> Run {}/{}", ii + 1, $runs); + let start = std::time::Instant::now(); + $exe + let end = std::time::Instant::now() - start; + println!( + "> Time taken to solve the problem: {} (ms)", + end.as_millis() + ); + durations.push(end.as_millis()); + } + + let avg = durations.iter().sum::() as usize; + println!("Average time: {} (ms)", avg / $runs); + }; +} + +fn main() { + let matches = App::new("Single source shortest path benchmark") + .arg( + Arg::with_name("file") + .short("f") + .long("file") + .takes_value(true) + .required(true) + .help("Path to a DIMACS file."), + ) + .arg( + Arg::with_name("lib") + .long("lib") + .takes_value(true) + .required(true) + .help("The library to be used to solve the shortest path problem. Options: pheap | fast_paths."), + ) + .arg( + Arg::with_name("runs") + .long("runs") + .takes_value(true) + .default_value("5") + .help("Number of runs for search query."), + ) + .get_matches(); + + let filepath = match matches.value_of("file") { + Some(fp) => fp, + None => std::process::exit(1), + }; + + let runs = matches + .value_of("runs") + .unwrap() + .to_string() + .parse::() + .unwrap(); + + match matches.value_of("lib") { + Some(lib) => match lib { + "pheap" => graph(filepath, runs), + "pathfinding" => pathfinding(filepath, runs), + _ => std::process::exit(1), + }, + None => std::process::exit(1), + }; +} + +fn graph(filepath: &str, runs: usize) { + println!("> Load file: {}", filepath); + + let file = File::open(filepath).unwrap(); + let mut reader = BufReader::new(file); + + let mut n_nodes = 0; + let mut _n_edges = 0; + + for _ in 0..7 { + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + + if !line.is_empty() && line.starts_with('p') { + let s = line.trim().split_whitespace().collect::>(); + n_nodes = s[2].parse::().unwrap(); + _n_edges = s[3].parse::().unwrap(); + } + } + + let mut g = SimpleGraph::::with_capacity(n_nodes); + + for line in reader.lines() { + let (node1, node2, weight) = parse_line(&line.unwrap()); + g.add_weighted_edges(node1, node2, weight); + } + + println!("> Graph created."); + + run_exp!(runs, let _ = mst_prim(&g, 0)); +} + +fn pathfinding(filepath: &str, runs: usize) { + println!("> Load file: {}", filepath); + + let file = File::open(filepath).unwrap(); + let mut reader = BufReader::new(file); + + let mut _n_nodes = 0; + let mut n_edges = 0; + + for _ in 0..7 { + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + + if !line.is_empty() && line.starts_with('p') { + let s = line.trim().split_whitespace().collect::>(); + _n_nodes = s[2].parse::().unwrap(); + n_edges = s[3].parse::().unwrap(); + } + } + + let mut edges = Vec::with_capacity(n_edges); + + for line in reader.lines() { + let (node1, node2, weight) = parse_line(&line.unwrap()); + edges.push((node1, node2, weight)); + edges.push((node2, node1, weight)); + } + + run_exp!(runs, let _ = kruskal(&edges)); +} + +fn parse_line(line: &str) -> (usize, usize, u32) { + let s = line.trim().split_whitespace().collect::>(); + let node1 = s[1].parse::().unwrap() - 1; + let node2 = s[2].parse::().unwrap() - 1; + let weight = s[3].parse::().unwrap(); + (node1, node2, weight) +} diff --git a/src/pheap/examples/stress.rs b/src/pheap/examples/stress.rs new file mode 100644 index 00000000..f8b58ef8 --- /dev/null +++ b/src/pheap/examples/stress.rs @@ -0,0 +1,75 @@ +use keyed_priority_queue::KeyedPriorityQueue; +use pheap::PairingHeap; +use priority_queue::PriorityQueue; + +fn create_ph(n: i32) { + let mut ph = PairingHeap::::new(); + + for ii in 0..n { + ph.insert(ii, ii); + } +} + +fn create_pq(n: i32) { + let mut pq = PriorityQueue::::new(); + + for ii in 0..n { + pq.push(ii, ii); + } +} + +fn create_kpq(n: i32) { + let mut pq = KeyedPriorityQueue::::new(); + + for ii in 0..n { + pq.push(ii, ii); + } +} + +fn create_aph(n: i32) { + let mut aph = addressable_pairing_heap::PairingHeap::::new(); + + for ii in 0..n { + aph.push(ii, ii); + } +} + +fn create_a422ph(n: i32) { + let mut aph = pairing_heap::PairingHeap::::new(); + + for ii in 0..n { + aph.push(ii); + } +} + +fn main() { + let args: Vec = std::env::args().collect(); + if args.len() < 3 { + help(); + std::process::exit(1); + } + + let n = args[2].to_string().parse::().unwrap(); + + match args[1].as_str() { + "pairing_heap" => create_ph(n), + "priority_queue" => create_pq(n), + "keyed_priority_queue" => create_kpq(n), + "addressable_pairing_heap" => create_aph(n), + "ap422_pairing_heap" => create_a422ph(n), + _ => { + help(); + std::process::exit(1) + } + } +} + +fn help() { + println!("Usage: ./stress "); + println!("The following data structures are available for testing: "); + println!("> pairing_heap (this library)"); + println!("> addressable_pairing_heap (https://crates.io/crates/addressable-pairing-heap)"); + println!("> ap422_pairing_heap (https://crates.io/crates/pairing-heap)"); + println!("> priority_queue (from crate: https://crates.io/crates/priority-queue)"); + println!("> keyed_priority_queue (from crate: https://crates.io/crates/keyed_priority_queue)"); +} diff --git a/src/pheap/img/mem_addressable_pairing_heap.jpg b/src/pheap/img/mem_addressable_pairing_heap.jpg new file mode 100644 index 00000000..70dd5e5f Binary files /dev/null and b/src/pheap/img/mem_addressable_pairing_heap.jpg differ diff --git a/src/pheap/img/mem_keyed_priority_queue.jpg b/src/pheap/img/mem_keyed_priority_queue.jpg new file mode 100644 index 00000000..8d829d0a Binary files /dev/null and b/src/pheap/img/mem_keyed_priority_queue.jpg differ diff --git a/src/pheap/img/mem_pheap.jpg b/src/pheap/img/mem_pheap.jpg new file mode 100644 index 00000000..fe4fed7a Binary files /dev/null and b/src/pheap/img/mem_pheap.jpg differ diff --git a/src/pheap/img/mem_priority_queue.jpg b/src/pheap/img/mem_priority_queue.jpg new file mode 100644 index 00000000..e7f61146 Binary files /dev/null and b/src/pheap/img/mem_priority_queue.jpg differ diff --git a/src/pheap/scripts/dijkstra.py b/src/pheap/scripts/dijkstra.py new file mode 100644 index 00000000..843deab2 --- /dev/null +++ b/src/pheap/scripts/dijkstra.py @@ -0,0 +1,32 @@ +import networkx as nx +import time + +def dijkstra(filepath, runs): + print("> Parse file: {}".format(filepath)) + G = nx.Graph() + + with open(filepath, 'r') as fin: + lines = fin.readlines() + + for line in lines: + if not line.startswith('a'): + continue + arr = line.strip().split() + id1 = int(arr[1]) + id2 = int(arr[2]) + dist = int(arr[3]) + + G.add_weighted_edges_from([(id1, id2, dist)]) + + print("> Number of nodes: {}".format(len(G.nodes))) + print("> Number of edges: {}".format(len(G.edges))) + + for irun in range(runs): + start = time.time() + a = nx.single_source_dijkstra(G, 10000, 1) + end = time.time() - start + print("Run {}: {} seconds".format(irun, end)) + print(a) + +if __name__ == "__main__": + 0 \ No newline at end of file diff --git a/src/pheap/scripts/download.py b/src/pheap/scripts/download.py new file mode 100644 index 00000000..1d60ef1a --- /dev/null +++ b/src/pheap/scripts/download.py @@ -0,0 +1,107 @@ +import argparse +import os +import sys +import urllib.request +import shutil +import gzip + + +dimacs_distance = { + "dimacs-usa": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.USA.gr.gz", + "dimacs-usa.gr.gz", + "dimacs-usa.gr", + ], + "dimacs-ctr": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.CTR.gr.gz", + "dimacs-ctr.gr.gz", + "dimacs-ctr.gr" + ], + "dimacs-w": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.W.gr.gz", + "dimacs-w.gr.gz", + "dimacs-w.gr" + ], + "dimacs-e": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.E.gr.gz", + "dimacs-e.gr.gz", + "dimacs-e.gr" + ], + "dimacs-lks": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.LKS.gr.gz", + "dimacs-lks.gr.gz", + "dimacs-lks.gr" + ], + "dimacs-cal": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.CAL.gr.gz", + "dimacs-cal.gr.gz", + "dimacs-cal.gr" + ], + "dimacs-ne": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.NE.gr.gz", + "dimacs-ne.gr.gz", + "dimacs-ne.gr" + ], + "dimacs-nw": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.NW.gr.gz", + "dimacs-nw.gr.gz", + "dimacs-nw.gr" + ], + "dimacs-fla": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.FLA.gr.gz", + "dimacs-fla.gr.gz", + "dimacs-fla.gr" + ], + "dimacs-col": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.COL.gr.gz", + "dimacs-col.gr.gz", + "dimacs-col.gr" + ], + "dimacs-bay": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.BAY.gr.gz", + "dimacs-bay.gr.gz", + "dimacs-bay.gr" + ], + "dimacs-ny": [ + "http://www.diag.uniroma1.it//~challenge9/data/USA-road-d/USA-road-d.NY.gr.gz", + "dimacs-ny.gr.gz", + "dimacs-ny.gr" + ], +} + + +def download_dimacs(dataset): + url_dl, gz_name, filename = dimacs_distance[dataset] + gz_dest = os.path.join(args.dest, gz_name) + file_dest = os.path.join(args.dest, filename) + + if not os.path.exists(file_dest) or not os.path.isfile(file_dest): + print("> Download file: {}...".format(url_dl)) + + urllib.request.urlretrieve(url_dl, gz_dest) + + with gzip.open(gz_dest, 'rb') as f_in: + with open(file_dest, 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) + + print("> Dataset is ready at {}".format(file_dest)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Download datasets.") + parser.add_argument('-d', '--dataset', required=True, help="Dataset to be downloaded", metavar="Dataset") + parser.add_argument('--dest', required=True, help="Directory to store files", metavar="Dir") + + args = parser.parse_args() + + if not os.path.exists(args.dest): + os.makedirs(args.dest) + + ds = args.dataset + if ds.startswith("dimacs"): + if ds == "dimacs-all": + for k in dimacs_distance: + download_dimacs(k) + else: + download_dimacs(ds) + diff --git a/src/pheap/scripts/mst.py b/src/pheap/scripts/mst.py new file mode 100644 index 00000000..8b75c4f2 --- /dev/null +++ b/src/pheap/scripts/mst.py @@ -0,0 +1,31 @@ +import networkx as nx +import time + +def mst(filepath, runs): + print("> Parse file: {}".format(filepath)) + G = nx.Graph() + + with open(filepath, 'r') as fin: + lines = fin.readlines() + + for line in lines: + if not line.startswith('a'): + continue + arr = line.strip().split() + id1 = int(arr[1]) + id2 = int(arr[2]) + dist = int(arr[3]) + + G.add_weighted_edges_from([(id1, id2, dist)]) + + print("> Number of nodes: {}".format(len(G.nodes))) + print("> Number of edges: {}".format(len(G.edges))) + + for irun in range(runs): + start = time.time() + a = nx.minimum_spanning_tree(G) + end = time.time() - start + print("Run {}: {} seconds".format(irun, end)) + +if __name__ == "__main__": + 0 \ No newline at end of file diff --git a/src/pheap/src/graph.rs b/src/pheap/src/graph.rs new file mode 100644 index 00000000..eba43a2e --- /dev/null +++ b/src/pheap/src/graph.rs @@ -0,0 +1,527 @@ +use std::{ + collections::HashMap, + fs::File, + io::{LineWriter, Write}, + path::Path, +}; + +use std::ops::AddAssign; + +use num_traits::{Bounded, Num, Zero}; + +use crate::{ph::HeapElmt, PairingHeap}; + +/// A simple and undirected graph. +/// +/// A simple graph assumes that the node indexing starts from ```0``` and is not equipped with a hash map +/// for a mapping from external complex objects to internal graph indices. As a result, [`SimpleGraph`] +/// doesn't have no runtime overhead for such object storage and mapping. +/// +/// # Examples +/// The following example shows how to construct a graph and find the shortest path between node 1 and 5. +/// The data is taken from the illustration in Wikipedia's page for [Dijkstra's algorithm](https://en.wikipedia.org/wiki/Dijkstra's_algorithm). +/// +/// Here, the numbering is adjusted so that the node indexing starts from ```0```. +/// ``` +/// use pheap::graph::SimpleGraph; +/// +/// let mut g = SimpleGraph::::with_capacity(6); +/// +/// g.add_weighted_edges(0, 1, 7); +/// g.add_weighted_edges(0, 2, 9); +/// g.add_weighted_edges(0, 5, 14); +/// g.add_weighted_edges(1, 2, 10); +/// g.add_weighted_edges(1, 3, 15); +/// g.add_weighted_edges(2, 5, 2); +/// g.add_weighted_edges(2, 3, 11); +/// g.add_weighted_edges(3, 4, 6); +/// g.add_weighted_edges(4, 5, 9); +/// +/// // Finds an SSSP from 0 to 4. +/// let mut sp = g.sssp_dijkstra(0, &[4]); +/// assert_eq!(1, sp.len()); +/// +/// let sp = sp.pop().unwrap(); +/// assert_eq!(20, sp.dist()); +/// assert_eq!(&[0, 2, 5, 4], sp.path().as_slice()); +/// +/// // Adds a disconnected component to the graph. +/// g.add_weighted_edges(6, 7, 2); +/// g.add_weighted_edges(6, 8, 3); +/// +/// // Finds an SSSP starting from 0. The result can be used for later query. +/// let lsp = g.sssp_dijkstra_lazy(0); +/// let lsp = g.sssp_dijkstra_lazy(0); +/// let sp = lsp.get(7); +/// assert_eq!(false, sp.is_feasible()); +/// +/// let sp = lsp.get(4); +/// assert_eq!(true, sp.is_feasible()); +/// assert_eq!(20, sp.dist()); +/// assert_eq!(&[0, 2, 5, 4], sp.path().as_slice()); +/// +/// ``` +/// +#[derive(Debug, Default)] +pub struct SimpleGraph { + n_edges: usize, + weights: HashMap>, +} + +impl SimpleGraph { + /// Creates an empty graph. + pub fn new() -> Self { + Self { + n_edges: 0, + weights: HashMap::new(), + } + } + + /// Creates an empty graph with the given capacitiy of nodes. + pub fn with_capacity(n_nodes: usize) -> Self { + Self { + n_edges: 0, + weights: HashMap::with_capacity(n_nodes), + } + } + + /// Returns the number of nodes in the graph. + pub fn n_nodes(&self) -> usize { + self.weights.len() + } + + /// Returns the number of edges in the graph. + pub fn n_edges(&self) -> usize { + self.n_edges + } + + /// Adds a weighted edge to the graph. + /// + /// If the edge already exists in the graph, the weight will be updated. + pub fn add_weighted_edges(&mut self, node1: usize, node2: usize, weight: W) + where + W: Clone + Copy, + { + if node1 != node2 { + self.insert_weight(node1, node2, weight); + self.insert_weight(node2, node1, weight); + } + + self.n_edges += 2; + } + + /// Returns the neighbours of a node. + #[inline] + pub(crate) fn neighbours(&self, node: &usize) -> Option<&Vec<(usize, W)>> { + self.weights.get(&node) + } + + /// Finds the shortest paths from a source node to destination nodes. + /// + /// If you want to keep the result for later usage and/or want to save memory, consider using + /// the lazy version [`SimpleGraph::sssp_dijkstra_lazy`], which returns the intermediate result + /// from Dijkstra's algorithm. + pub fn sssp_dijkstra(&self, src: usize, dest: &[usize]) -> Vec> + where + W: Bounded + Num + Zero + PartialOrd + Copy, + { + let nodes = self.dijkstra(src); + let mut result = Vec::with_capacity(dest.len()); + + for ii in dest { + result.push(traverse_path(src, *ii, &nodes)); + } + + result + } + + /// Finds the shortest paths from a source node to all nodes and returns the intermediate result + /// for later usage. + pub fn sssp_dijkstra_lazy(&self, src: usize) -> LazyShortestPaths + where + W: Bounded + Num + Zero + PartialOrd + Copy, + { + LazyShortestPaths { + src, + paths: self.dijkstra(src), + } + } + + #[inline] + fn dijkstra(&self, src: usize) -> Vec> + where + W: Bounded + Num + Zero + PartialOrd + Copy, + { + let mut pq = PairingHeap::::new(); + pq.insert(src, W::zero()); + + let mut nodes = vec![DijNode::::new(); self.weights.len()]; + nodes[src].dist = W::zero(); + let mut len = pq.len(); + + while len != 0 { + let (node, prio) = pq.delete_min().unwrap(); + let count = nodes[node].len + 1; + + if let Some(nb) = self.neighbours(&node) { + for (u, dist) in nb { + let dijnode = &mut nodes[*u]; + let alt = prio + *dist; + if !dijnode.visited && alt < dijnode.dist { + dijnode.dist = alt; + dijnode.pred = node; + dijnode.len = count; + dijnode.feasible = true; + pq.insert(*u, alt); + } + } + } + + let dijnode = nodes.get_mut(node).unwrap(); + dijnode.visited = true; + len = pq.len(); + } + + nodes + } + + fn insert_weight(&mut self, node1: usize, node2: usize, weight: W) { + match self.weights.get_mut(&node1) { + Some(v) => { + v.push((node2, weight)); + } + None => { + let v = vec![(node2, weight)]; + self.weights.insert(node1, v); + } + } + } + + /// Write graph as a list of edges. + /// + /// Each line contains one edge, following [networkx](https://networkx.org/)'s format: + /// ```index 1 index 2 {'weight': {}}```. + pub fn write_edgelist

(&self, filepath: P) -> std::io::Result<()> + where + P: AsRef, + W: std::fmt::Display, + { + let file = File::create(filepath)?; + let mut file = LineWriter::new(file); + + for (node_idx, nb) in &self.weights { + for (vtx_idx, w) in nb { + file.write_all( + format!("{} {} {{'weight': {}}}\n", node_idx, vtx_idx, w).as_bytes(), + )?; + } + } + + file.flush()?; + + Ok(()) + } +} + +/// The shortest path from a source node to a destination node. +#[derive(Debug)] +pub struct ShortestPath { + src: usize, + dest: usize, + feasible: bool, + dist: W, + path: Vec, +} + +impl ShortestPath { + /// Returns the index of the source node in the shortest path. + pub fn src(&self) -> usize { + self.src + } + + /// Returns the index of the destination node in the shortest path. + pub fn dest(&self) -> usize { + self.dest + } + + /// Returns the shortest path's distance + pub fn dist(&self) -> W + where + W: Copy, + { + self.dist + } + + /// Returns whether a path from the source node to the destination node is feasible. + pub fn is_feasible(&self) -> bool { + self.feasible + } + + /// Returns the path from the source node to destination node. + /// + /// The first element of the vector is the source node, the last the destination node. + pub fn path(&self) -> &Vec { + &self.path + } +} + +/// A struct representing the intermediate output of Dijkstra's algorithm. +#[derive(Debug)] +pub struct LazyShortestPaths { + src: usize, + paths: Vec>, +} + +impl LazyShortestPaths { + /// Returns the shortest path for a given node. + pub fn get(&self, node_index: usize) -> ShortestPath + where + W: Zero + Copy, + { + traverse_path(self.src, node_index, &self.paths) + } + + /// Returns the shortest paths for all nodes. + pub fn get_all(&self) -> Vec> + where + W: Zero + Copy, + { + let n_nodes = self.paths.len(); + let mut result: Vec> = Vec::with_capacity(n_nodes); + + for ii in 0..n_nodes { + let end_node = &self.paths[ii]; + let expected = end_node.len + 1; + + let sp = if end_node.feasible { + let mut len = 0; + let mut chain = Vec::with_capacity(expected); + let mut next = end_node.pred; + + while len < expected { + if next < ii { + let mut sp = result[next].path.clone(); + if ii < self.src { + sp.reverse(); + } + + sp.append(&mut chain); + chain = sp; + break; + } + + chain.insert(0, next); + next = self.paths[next].pred; + len = chain.len(); + } + + ShortestPath { + src: self.src, + dest: ii, + dist: end_node.dist, + path: chain, + feasible: true, + } + } else { + ShortestPath { + src: self.src, + dest: ii, + dist: ::zero(), + path: Vec::with_capacity(0), + feasible: false, + } + }; + + result.push(sp); + } + + result + } + + /// Returns the shortest paths for a given list of node indices. + pub fn get_list(&self, node_indices: &[usize]) -> Vec> + where + W: Zero + Copy, + { + let mut result = Vec::with_capacity(node_indices.len()); + + for ii in node_indices { + result.push(traverse_path(self.src, *ii, &self.paths)); + } + + result + } +} + +#[derive(Clone, Debug)] +struct DijNode { + /// Id of the predecessor's node in SSSP solution from Dijkstra's algorithm. + pred: usize, + /// Flag whether a node is visisted or not. + len: usize, + /// Flag indicating whether the node is already visisted or not. + visited: bool, + /// Flag indicating whether a path from source node is feasible. + feasible: bool, + /// Distance to the predecessor. + dist: W, +} + +impl DijNode { + pub fn new() -> Self + where + W: Bounded, + { + Self { + pred: 0, + dist: ::max_value(), + visited: false, + len: 0, + feasible: false, + } + } +} + +#[inline(always)] +fn traverse_path(src: usize, dest: usize, paths: &[DijNode]) -> ShortestPath +where + W: Zero + Copy, +{ + let end_node = &paths[dest]; + if end_node.feasible { + let expected = end_node.len + 1; + + let mut len = 0; + let mut path = Vec::with_capacity(expected); + path.push(dest); + let mut next = end_node.pred; + + while len < expected { + path.insert(0, next); + next = paths[next].pred; + len = path.len(); + } + + ShortestPath { + src, + dest, + dist: end_node.dist, + path, + feasible: true, + } + } else { + ShortestPath { + src, + dest, + dist: ::zero(), + path: Vec::with_capacity(0), + feasible: false, + } + } +} + +/// Find the minimum spanning tree (MST) in a graph using Prim's algorithm. +/// +/// The function returns a tuple of the total weight of the MST and a simple graph, whose edges are the MST's edges. +/// +/// # Examples +/// ```rust +/// use pheap::graph::{mst_prim, SimpleGraph}; +/// +/// let mut g = SimpleGraph::::new(); +/// +/// g.add_weighted_edges(0, 1, 4); +/// g.add_weighted_edges(0, 7, 8); +/// g.add_weighted_edges(1, 2, 8); +/// g.add_weighted_edges(1, 7, 11); +/// g.add_weighted_edges(2, 3, 7); +/// g.add_weighted_edges(2, 5, 4); +/// g.add_weighted_edges(2, 8, 2); +/// g.add_weighted_edges(3, 4, 9); +/// g.add_weighted_edges(3, 5, 14); +/// g.add_weighted_edges(4, 5, 10); +/// g.add_weighted_edges(5, 6, 2); +/// g.add_weighted_edges(6, 7, 1); +/// g.add_weighted_edges(6, 8, 6); +/// g.add_weighted_edges(7, 8, 7); +/// +/// // gx is the new graph containing the MST's edges and dx is the total weight. +/// let (g0, d0) = mst_prim(&g, 0); +/// let (g4, d4) = mst_prim(&g, 4); +/// +/// assert_eq!(d0, d4); +/// assert_eq!(g0.n_nodes(), g4.n_nodes()); +/// assert_eq!(g0.n_edges(), g4.n_edges()); +/// ``` +pub fn mst_prim(graph: &SimpleGraph, src: usize) -> (SimpleGraph, W) +where + W: Copy + PartialOrd + Bounded + Zero + AddAssign, +{ + let mut pq = PairingHeap::::new(); + let mut nodes: Vec<_> = (0..graph.n_nodes()) + .map(|ii| { + let mut node = PrimNode::::new(); + node.dist = if ii == src { + ::zero() + } else { + ::max_value() + }; + node.idx = ii; + node.heap = pq.insert2(ii, node.dist); + node + }) + .collect(); + + let mut len = pq.len(); + + while len != 0 { + let (node, _) = pq.delete_min().unwrap(); + nodes[node].heap.none(); + + if let Some(nb) = graph.neighbours(&node) { + for (u, dist) in nb { + let primnode = &mut nodes[*u]; + if !primnode.heap.is_none() && *dist < primnode.dist { + primnode.dist = *dist; + primnode.parent = Some(node); + pq.update_prio(&primnode.heap, primnode.dist); + } + } + } + + len = pq.len(); + } + + let mut rg = SimpleGraph::::with_capacity(graph.n_nodes()); + let mut dist = ::zero(); + for node in nodes { + if let Some(p) = node.parent { + rg.add_weighted_edges(p, node.idx, node.dist); + dist += node.dist; + } + } + + (rg, dist) +} + +#[derive(Clone, Debug)] +struct PrimNode { + idx: usize, + parent: Option, + heap: HeapElmt, + dist: W, +} + +impl PrimNode { + pub fn new() -> Self + where + W: Bounded, + { + Self { + idx: 0, + parent: None, + heap: HeapElmt::::default(), + dist: ::max_value(), + } + } +} diff --git a/src/pheap/src/lib.rs b/src/pheap/src/lib.rs new file mode 100644 index 00000000..c36a7676 --- /dev/null +++ b/src/pheap/src/lib.rs @@ -0,0 +1,31 @@ +//! # Pairing Heap +//! A priority queue implemented with a pairing heap. +//! +//! From [Wikipedia](https://en.wikipedia.org/wiki/Pairing_heap): +//! > A pairing heap is a type of heap data structure with relatively simple implementation and excellent practical amortized performance. +//! > Pairing heaps are heap-ordered multiway tree structures, and can be considered simplified Fibonacci heaps. They are considered a "robust choice" for implementing such algorithms as Prim's MST algorithm. +//! +//! A min-pairing heap supports the following operations: +//! - ```find_min```: finds the minimum element of the heap, which is the root. +//! - ```merge```: combines two heaps together. +//! - ```insert```: adds a new element into the heap. +//! - ```delete_min```: remove the root and reorder its children nodes. +//! - ```decrease_key```: decrease the priority of an element. Standard implementation of a heap data structure does not support searching for a key efficiently (which is the case in this crate). Thus, this operation can take very long time, with an upper bound of ```O(2^(sqrt(log log n)))```. +//! +//! The heap data structure is often used in Dijkstra's algorithm and Prim's algorithm. With [`PairingHeap`], +//! the crate provides a fast implementation of these algorithms . See [`graph`] for more info. +//! +#![warn( + missing_docs, + rust_2018_idioms, + missing_debug_implementations, + broken_intra_doc_links +)] + +mod ph; +pub use ph::PairingHeap; + +/// Experimental API for graph analysis. +pub mod graph; + +mod tests; diff --git a/src/pheap/src/ph.rs b/src/pheap/src/ph.rs new file mode 100644 index 00000000..2638a683 --- /dev/null +++ b/src/pheap/src/ph.rs @@ -0,0 +1,391 @@ +use std::{collections::VecDeque, ops::SubAssign, ptr::NonNull}; + +/// A min-pairing heap data structure. +#[derive(Debug)] +pub struct PairingHeap { + root: Option>>, + len: usize, +} + +// implement Clone +impl Clone for PairingHeap { + fn clone(&self) -> Self { + let mut new_heap = Self::default(); + let mut queue = VecDeque::new(); + if let Some(root) = self.root { + queue.push_back(root); + } + + while let Some(node) = queue.pop_front() { + unsafe { + let node = node.as_ref(); + let new_node = new_heap.insert2(node.key.clone(), node.prio.clone()); + if let Some(left) = node.left { + queue.push_back(left); + } + if let Some(right) = node.right { + queue.push_back(right); + } + } + } + + new_heap + } +} + +impl PairingHeap { + /// Creates an empty pairing heap. + #[inline] + pub fn new() -> Self { + Self::default() + } + + /// Returns the number of elements stored in the heap. + #[inline] + pub fn len(&self) -> usize { + self.len + } + + /// Checks whether the heap is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + /// Returns the minimum element, which is the root element, and its priority in a tuple of the heap. + #[inline] + pub fn find_min(&self) -> Option<(&K, &P)> { + match self.root { + Some(node) => unsafe { + let r = node.as_ref(); + Some((&r.key, &r.prio)) + }, + None => None, + } + } + + /// Merges two heaps together and forms a new heap. + /// + /// If one heap is empty, the other heap will be returned and vice versa. Otherwise, a new heap + /// will be created, whose root is the root that has a smaller value. The other root will be + /// inserted in the new heap. + #[inline] + pub fn merge(mut self, mut other: Self) -> Self + where + P: PartialOrd, + { + let len = self.len() + other.len(); + let root = Self::merge_nodes(self.root, other.root); + + self.root = None; + other.root = None; + + Self { root, len } + } + + #[inline] + fn merge_nodes(node1: Option>>, node2: Option>>) -> Option>> + where + P: PartialOrd, + { + match (node1, node2) { + (Some(root1), Some(root2)) => unsafe { + let root = if root1.as_ref().prio < root2.as_ref().prio { + Self::meld(root1, root2) + } else { + Self::meld(root2, root1) + }; + Some(root) + }, + (Some(_), None) => node1, + (None, Some(_)) => node2, + _ => node1, + } + } + + #[inline(always)] + unsafe fn meld(node1: NonNull>, node2: NonNull>) -> NonNull> { + (*node2.as_ptr()).parent = Some(node1); + (*node2.as_ptr()).right = node1.as_ref().left; + (*node1.as_ptr()).left = Some(node2); + node1 + } + + /// Inserts a new element to the heap. + #[inline] + pub fn insert(&mut self, key: K, prio: P) + where + P: PartialOrd, + { + self.insert2(key, prio); + } + + // Expose HeapElmt to pub, no? + #[inline] + pub(crate) fn insert2(&mut self, key: K, prio: P) -> HeapElmt + where + P: PartialOrd, + { + let node = NonNull::new(Box::leak(Box::new(Inner::new(key, prio)))); + + self.root = Self::merge_nodes(self.root, node); + self.len += 1; + + HeapElmt { inner: node } + } + + /// Decreases the priority of a key by the amount given in ```delta```. + pub fn decrease_prio(&mut self, key: &K, delta: P) + where + K: PartialEq, + P: PartialOrd + SubAssign, + { + if let Some(root) = self.root { + unsafe { + if &root.as_ref().key == key { + (*root.as_ptr()).prio -= delta; + return; + } + + let mut targ = None; + let mut prev = None; + let mut tmp_nodes = VecDeque::with_capacity(self.len << 2); + let mut traverse = root.as_ref().left; + + while let Some(node) = traverse { + if &node.as_ref().key == key { + targ = traverse; + break; + } + + prev = traverse; + tmp_nodes.push_back(traverse); + + if node.as_ref().right.is_some() { + traverse = node.as_ref().right; + } else { + while let Some(front) = tmp_nodes.pop_front() { + traverse = front.unwrap().as_ref().left; + if traverse.is_some() { + break; + } + } + } + } + + if let Some(node) = targ { + // Every node must have a parent. So unwrap() here shouldn't panic. + let parent = node.as_ref().parent.unwrap(); + (*node.as_ptr()).prio -= delta; + + if parent.as_ref().prio < node.as_ref().prio { + return; + } + + if parent.as_ref().left == targ { + (*parent.as_ptr()).left = node.as_ref().right; + } + + if let Some(prev_node) = prev { + if prev_node.as_ref().right == targ { + (*prev_node.as_ptr()).right = node.as_ref().right; + } + } + + (*node.as_ptr()).parent = None; + (*node.as_ptr()).right = None; + + self.root = Self::merge_nodes(self.root, targ); + } + } + } + } + + // TODO: currently only works when new_prio < prio. + pub(crate) fn update_prio(&mut self, node: &HeapElmt, new_prio: P) + where + P: PartialOrd, + { + unsafe { + self.update(node.inner, new_prio); + } + } + + unsafe fn update(&mut self, targ: Option>>, new_prio: P) + where + P: PartialOrd, + { + if let Some(node) = targ { + match node.as_ref().parent { + Some(parent) => { + let mut prev = parent.as_ref().left; + + while let Some(prev_node) = prev { + if prev_node.as_ref().right == targ { + break; + } else { + prev = prev_node.as_ref().right; + } + } + + (*node.as_ptr()).prio = new_prio; + + if parent.as_ref().prio < node.as_ref().prio { + return; + } + + if parent.as_ref().left == targ { + (*parent.as_ptr()).left = node.as_ref().right; + } + + if let Some(prev_node) = prev { + if prev_node.as_ref().right == targ { + (*prev_node.as_ptr()).right = node.as_ref().right; + } + } + + (*node.as_ptr()).parent = None; + (*node.as_ptr()).right = None; + + self.root = Self::merge_nodes(self.root, targ); + } + None => { + (*node.as_ptr()).prio = new_prio; + } + }; + } + } + + /// Deletes the minimum element, which is the root, of the heap, and then returns the root's key value and priority. + pub fn delete_min(&mut self) -> Option<(K, P)> + where + P: PartialOrd, + { + self.root.map(|root| unsafe { + self.len -= 1; + let mut targ = (*root.as_ptr()).left.take(); + if targ.is_none() { + self.root = None; + } else { + // TODO: optimise so that capacity is known here. + let mut tmp_nodes = VecDeque::new(); + + // First pass: left to right + while let Some(node) = targ { + (*node.as_ptr()).parent = None; + let right = (*node.as_ptr()).right.take(); + + let node_next = match right { + Some(node_right) => { + let next = (*node_right.as_ptr()).right.take(); + (*node_right.as_ptr()).parent = None; + next + } + None => None, + }; + + tmp_nodes.push_back(Self::merge_nodes(Some(node), right)); + + targ = node_next; + } + + // Second pass: right to left + // If left is not None, there must be at least one element in VecDeque. + // So unwrap() is safe here. + let mut node = tmp_nodes.pop_back().unwrap(); + + while let Some(node_prev) = tmp_nodes.pop_back() { + node = Self::merge_nodes(node, node_prev); + } + + self.root = node; + } + let node = Box::from_raw(root.as_ptr()); + node.into_value() + }) + } +} + +impl Default for PairingHeap { + fn default() -> Self { + Self { root: None, len: 0 } + } +} + +impl Drop for PairingHeap { + fn drop(&mut self) { + // Remove all children of a node, then the node itself. + // Returns the next sibling in the end. + + unsafe fn remove(targ: Option>>) -> Option>> { + if let Some(node) = targ { + while let Some(left) = node.as_ref().left { + (*node.as_ptr()).left = remove(Some(left)); + } + + let sibling = (*node.as_ptr()).right.take(); + (*node.as_ptr()).parent = None; + Box::from_raw(node.as_ptr()); + + sibling + } else { + None + } + } + + unsafe { + remove(self.root); + } + + self.root = None; + } +} + +#[derive(Clone, Debug)] +pub(crate) struct HeapElmt { + inner: Option>>, +} + +impl HeapElmt { + pub(crate) fn is_none(&self) -> bool { + self.inner.is_none() + } + + pub(crate) fn none(&mut self) { + self.inner = None; + } +} + +impl Default for HeapElmt { + fn default() -> Self { + Self { inner: None } + } +} + +#[derive(Debug)] +struct Inner { + /// Pointer to a node's parent. + parent: Option>>, + /// Pointer to a node's first (or left-most) child. + left: Option>>, + /// Pointer to a node's next older sibling. + right: Option>>, + key: K, + prio: P, +} + +impl Inner { + fn new(key: K, prio: P) -> Self { + Self { + key, + prio, + parent: None, + left: None, + right: None, + } + } + + fn into_value(self) -> (K, P) { + (self.key, self.prio) + } +} diff --git a/src/pheap/src/tests.rs b/src/pheap/src/tests.rs new file mode 100644 index 00000000..4d3d5490 --- /dev/null +++ b/src/pheap/src/tests.rs @@ -0,0 +1,221 @@ +#![cfg(test)] +use super::PairingHeap; +use crate::graph::{mst_prim, SimpleGraph}; +use crate::ph::HeapElmt; + +#[cfg(test)] +fn create_heap(start: i32, end: i32) -> (PairingHeap, Vec>) { + let mut ph = PairingHeap::::new(); + let elmts: Vec<_> = (start..end).map(|ii| ph.insert2(ii, ii)).collect(); + (ph, elmts) +} + +#[test] +fn create_insert() { + let mut ph = PairingHeap::::new(); + assert_eq!(0, ph.len()); + assert!(ph.is_empty()); + + for ii in 1..=10 { + ph.insert(ii, ii); + } + + assert_eq!(10, ph.len()); +} + +#[test] +fn find_min() { + let (ph, _) = create_heap(0, 0); + assert!(ph.find_min().is_none()); + + let (ph, _) = create_heap(1, 11); + let min = ph.find_min(); + assert!(min.is_some()); + let (k, _) = min.unwrap(); + assert_eq!(1, *k); + + let min_prio = ph.find_min(); + assert!(min_prio.is_some()); + let (k, p) = min_prio.unwrap(); + assert_eq!(1, *k); + assert_eq!(1, *p); +} + +#[test] +fn merge() { + let ph1 = create_heap(1, 11).0; + let len1 = ph1.len(); + let ph2 = create_heap(11, 21).0; + let len2 = ph2.len(); + + let ph = ph2.merge(ph1); + println!("Len: {}", ph.len()); + assert_eq!(len1 + len2, ph.len()); + let min_prio = ph.find_min(); + assert!(min_prio.is_some()); + let (k, p) = min_prio.unwrap(); + assert_eq!(1, *k); + assert_eq!(1, *p); +} + +#[test] +fn delete_min() { + let (mut ph, _) = create_heap(1, 11); + let mut len = ph.len(); + let mut tracker = 1; + + while len != 0 { + let min_prio = ph.find_min(); + assert!(min_prio.is_some()); + let (k, p) = min_prio.unwrap(); + let (k, p) = (*k, *p); + assert_eq!(tracker, p); + tracker += 1; + + let del_prio = ph.delete_min(); + assert!(del_prio.is_some()); + let (kt, pt) = del_prio.unwrap(); + assert_eq!(k, kt); + assert_eq!(p, pt); + + len = ph.len(); + } + + assert!(ph.find_min().is_none()); + assert_eq!(0, ph.len()); +} + +#[test] +fn decrease_prio() { + let (mut ph, _) = create_heap(1, 11); + + ph.delete_min(); + ph.decrease_prio(&8, 4); + ph.decrease_prio(&6, 3); + ph.decrease_prio(&9, 3); + ph.decrease_prio(&10, 2); + + let mut len = ph.len(); + let mut count = 0; + + let key_exp = vec![2, 6, 3, 8, 4, 5, 9, 7, 10]; + let prio_exp = vec![2, 3, 3, 4, 4, 5, 6, 7, 8]; + + while len != 0 { + let del_prio = ph.delete_min(); + assert!(del_prio.is_some()); + let (k, p) = del_prio.unwrap(); + assert_eq!( + key_exp[count], k, + "Check key: Expected: {} | Got: {}", + key_exp[count], k + ); + assert_eq!( + prio_exp[count], p, + "Check prio for key {}: Expected: {} | Got: {}", + k, prio_exp[count], p + ); + + len = ph.len(); + count += 1; + } +} + +#[test] +fn update_prio() { + let (mut ph, v) = create_heap(1, 11); + + ph.delete_min(); + + ph.update_prio(&v[7], 4); + ph.update_prio(&v[5], 3); + ph.update_prio(&v[8], 6); + ph.update_prio(&v[9], 8); + + let key_exp = vec![2, 6, 3, 8, 4, 5, 9, 7, 10]; + let prio_exp = vec![2, 3, 3, 4, 4, 5, 6, 7, 8]; + + let mut len = ph.len(); + let mut count = 0; + + while len != 0 { + let del_prio = ph.delete_min(); + assert!(del_prio.is_some()); + let (k, p) = del_prio.unwrap(); + assert_eq!( + key_exp[count], k, + "Check key: Expected: {} | Got: {}", + key_exp[count], k + ); + assert_eq!( + prio_exp[count], p, + "Check prio for key {}: Expected: {} | Got: {}", + k, prio_exp[count], p + ); + + len = ph.len(); + count += 1; + } +} + +#[test] +fn test_dijkstra() { + let mut g = SimpleGraph::::with_capacity(6); + + g.add_weighted_edges(0, 1, 7); + g.add_weighted_edges(0, 2, 9); + g.add_weighted_edges(0, 5, 14); + g.add_weighted_edges(1, 2, 10); + g.add_weighted_edges(1, 3, 15); + g.add_weighted_edges(2, 5, 2); + g.add_weighted_edges(2, 3, 11); + g.add_weighted_edges(3, 4, 6); + g.add_weighted_edges(4, 5, 9); + + let mut sp = g.sssp_dijkstra(0, &[4]); + assert_eq!(1, sp.len()); + + let sp = sp.pop().unwrap(); + assert_eq!(true, sp.is_feasible()); + assert_eq!(20, sp.dist()); + assert_eq!(&[0, 2, 5, 4], sp.path().as_slice()); + + g.add_weighted_edges(6, 7, 2); + g.add_weighted_edges(6, 8, 3); + + let lsp = g.sssp_dijkstra_lazy(0); + let sp = lsp.get(7); + assert_eq!(false, sp.is_feasible()); + + let sp = lsp.get(4); + assert_eq!(true, sp.is_feasible()); + assert_eq!(20, sp.dist()); + assert_eq!(&[0, 2, 5, 4], sp.path().as_slice()); +} + +#[test] +fn test_mst_prim() { + let mut g = SimpleGraph::::new(); + + g.add_weighted_edges(0, 1, 4); + g.add_weighted_edges(0, 7, 8); + g.add_weighted_edges(1, 2, 8); + g.add_weighted_edges(1, 7, 11); + g.add_weighted_edges(2, 3, 7); + g.add_weighted_edges(2, 5, 4); + g.add_weighted_edges(2, 8, 2); + g.add_weighted_edges(3, 4, 9); + g.add_weighted_edges(3, 5, 14); + g.add_weighted_edges(4, 5, 10); + g.add_weighted_edges(5, 6, 2); + g.add_weighted_edges(6, 7, 1); + g.add_weighted_edges(6, 8, 6); + g.add_weighted_edges(7, 8, 7); + + let (g0, d0) = mst_prim(&g, 0); + let (g4, d4) = mst_prim(&g, 4); + + assert_eq!(d0, d4); + assert_eq!(g0.n_nodes(), g4.n_nodes()); + assert_eq!(g0.n_edges(), g4.n_edges()); +} diff --git a/src/plugin.rs b/src/plugin.rs index f1fc4de6..acb2e881 100644 --- a/src/plugin.rs +++ b/src/plugin.rs @@ -5,7 +5,7 @@ //! A plugin must implement Clone trait, because it will be cloned multiple times for each cluster //! -use crate::decoding_hypergraph::*; +// use crate::decoding_hypergraph::*; use crate::derivative::Derivative; use crate::dual_module::*; use crate::matrix::*; @@ -23,7 +23,6 @@ pub trait PluginImpl { /// given the tight edges and parity constraints, find relaxers fn find_relaxers( &self, - decoding_graph: &DecodingHyperGraph, matrix: &mut EchelonMatrix, positive_dual_nodes: &[DualNodePtr], ) -> RelaxerVec; @@ -65,6 +64,7 @@ pub enum RepeatStrategy { } /// describes what plugins to enable and also the recursive strategy +#[derive(Clone)] pub struct PluginEntry { /// the implementation of a plugin pub plugin: Arc, @@ -75,7 +75,6 @@ pub struct PluginEntry { impl PluginEntry { pub fn execute( &self, - decoding_graph: &DecodingHyperGraph, matrix: &mut EchelonMatrix, positive_dual_nodes: &[DualNodePtr], relaxer_forest: &mut RelaxerForest, @@ -84,13 +83,13 @@ impl PluginEntry { let mut repeat_count = 0; while repeat { // execute the plugin - let relaxers = self.plugin.find_relaxers(decoding_graph, &mut *matrix, positive_dual_nodes); + let relaxers = self.plugin.find_relaxers(&mut *matrix, positive_dual_nodes); if relaxers.is_empty() { repeat = false; } for relaxer in relaxers.into_iter() { for edge_index in relaxer.get_untighten_edges().keys() { - matrix.update_edge_tightness(*edge_index, false); + matrix.update_edge_tightness(edge_index.downgrade(), false); } let relaxer = Arc::new(relaxer); let sum_speed = relaxer.get_sum_speed(); @@ -137,7 +136,6 @@ impl PluginManager { pub fn find_relaxer( &mut self, - decoding_graph: &DecodingHyperGraph, matrix: &mut EchelonMatrix, positive_dual_nodes: &[DualNodePtr], ) -> Option { @@ -148,11 +146,11 @@ impl PluginManager { .map(|ptr| ptr.read_recursive().invalid_subgraph.clone()), ); for plugin_entry in self.plugins.iter().take(*self.plugin_count.read_recursive()) { - if let Some(relaxer) = plugin_entry.execute(decoding_graph, matrix, positive_dual_nodes, &mut relaxer_forest) { + if let Some(relaxer) = plugin_entry.execute( matrix, positive_dual_nodes, &mut relaxer_forest) { return Some(relaxer); } } // add a union find relaxer finder as the last resort if nothing is reported - PluginUnionFind::entry().execute(decoding_graph, matrix, positive_dual_nodes, &mut relaxer_forest) + PluginUnionFind::entry().execute( matrix, positive_dual_nodes, &mut relaxer_forest) } } diff --git a/src/plugin_single_hair.rs b/src/plugin_single_hair.rs index 68d44a27..5c2d6009 100644 --- a/src/plugin_single_hair.rs +++ b/src/plugin_single_hair.rs @@ -17,25 +17,30 @@ use num_traits::One; use std::collections::BTreeSet; use std::sync::Arc; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak}; + #[derive(Debug, Clone, Default)] pub struct PluginSingleHair {} impl PluginImpl for PluginSingleHair { fn find_relaxers( &self, - decoding_graph: &DecodingHyperGraph, matrix: &mut EchelonMatrix, positive_dual_nodes: &[DualNodePtr], ) -> Vec { // single hair requires the matrix to have at least one feasible solution - if let Some(relaxer) = PluginUnionFind::find_single_relaxer(decoding_graph, matrix) { + if let Some(relaxer) = PluginUnionFind::find_single_relaxer(matrix) { return vec![relaxer]; } // then try to find more relaxers let mut relaxers = vec![]; for dual_node_ptr in positive_dual_nodes.iter() { let dual_node = dual_node_ptr.read_recursive(); - let mut hair_view = HairView::new(matrix, dual_node.invalid_subgraph.hair.iter().cloned()); + let hair = dual_node.invalid_subgraph.hair.iter().map(|e| e.downgrade()); + let mut hair_view = HairView::new(matrix, hair); debug_assert!(hair_view.get_echelon_satisfiable()); // hair_view.printstd(); // optimization: check if there exists a single-hair solution, if not, clear the previous relaxers @@ -65,16 +70,21 @@ impl PluginImpl for PluginSingleHair { if !unnecessary_edges.is_empty() { // we can construct a relaxer here, by growing a new invalid subgraph that // removes those unnecessary edges and shrinking the existing one - let mut vertices: BTreeSet = hair_view.get_vertices(); - let mut edges: BTreeSet = BTreeSet::from_iter(hair_view.get_base_view_edges()); - for &edge_index in dual_node.invalid_subgraph.hair.iter() { - edges.remove(&edge_index); + let mut vertices: BTreeSet = hair_view.get_vertices(); + let mut edges: BTreeSet = hair_view.get_base_view_edges().iter().map(|e| e.upgrade_force()).collect(); + for edge_ptr in dual_node.invalid_subgraph.hair.iter() { + edges.remove(&edge_ptr); } - for &edge_index in unnecessary_edges.iter() { - edges.insert(edge_index); - vertices.extend(decoding_graph.get_edge_neighbors(edge_index)); + for edge_weak in unnecessary_edges.iter() { + edges.insert(edge_weak.upgrade_force()); + for vertex in edge_weak.upgrade_force().get_vertex_neighbors().iter() { + vertices.insert(vertex.upgrade_force()); + } + // for vertex in edge_index.upgrade_force().read_recursive().vertices.iter() { + // vertices.insert(vertex.upgrade_force()); + // } } - let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete(vertices, edges, decoding_graph)); + let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete(&vertices, &edges)); let relaxer = Relaxer::new( [ (invalid_subgraph, Rational::one()), @@ -150,7 +160,8 @@ pub mod tests { defect_vertices, 4, vec![PluginSingleHair::entry_with_strategy(RepeatStrategy::Once)], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, + // GrowingStrategy::SingleCluster, ); } @@ -168,7 +179,8 @@ pub mod tests { defect_vertices, 4, vec![PluginSingleHair::entry_with_strategy(RepeatStrategy::Once)], - GrowingStrategy::SingleCluster, + // GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } @@ -246,7 +258,8 @@ pub mod tests { vec![PluginSingleHair::entry_with_strategy(RepeatStrategy::Multiple { max_repetition: usize::MAX, })], - GrowingStrategy::SingleCluster, + // GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } diff --git a/src/plugin_union_find.rs b/src/plugin_union_find.rs index 3296750f..f76885fc 100644 --- a/src/plugin_union_find.rs +++ b/src/plugin_union_find.rs @@ -5,6 +5,7 @@ //! sure there is a feasible MINLP solution. //! + use crate::decoding_hypergraph::*; use crate::dual_module::*; use crate::invalid_subgraph::*; @@ -15,19 +16,24 @@ use crate::relaxer::*; use crate::util::*; use std::collections::BTreeSet; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak}; + #[derive(Debug, Clone, Default)] pub struct PluginUnionFind {} impl PluginUnionFind { /// check if the cluster is valid (hypergraph union-find decoder) - pub fn find_single_relaxer(decoding_graph: &DecodingHyperGraph, matrix: &mut EchelonMatrix) -> Option { + pub fn find_single_relaxer(matrix: &mut EchelonMatrix) -> Option { if matrix.get_echelon_info().satisfiable { return None; // cannot find any relaxer } + let local_edges: BTreeSet = matrix.get_view_edges().iter().map(|e| e.upgrade_force()).collect(); let invalid_subgraph = InvalidSubgraph::new_complete_ptr( - matrix.get_vertices(), - BTreeSet::from_iter(matrix.get_view_edges()), - decoding_graph, + &matrix.get_vertices(), + &local_edges, ); Some(Relaxer::new([(invalid_subgraph, Rational::one())].into())) } @@ -36,11 +42,10 @@ impl PluginUnionFind { impl PluginImpl for PluginUnionFind { fn find_relaxers( &self, - decoding_graph: &DecodingHyperGraph, matrix: &mut EchelonMatrix, _positive_dual_nodes: &[DualNodePtr], ) -> Vec { - if let Some(relaxer) = Self::find_single_relaxer(decoding_graph, matrix) { + if let Some(relaxer) = Self::find_single_relaxer(matrix) { vec![relaxer] } else { vec![] diff --git a/src/pointers.rs b/src/pointers.rs index 456c34e5..5413eecf 100644 --- a/src/pointers.rs +++ b/src/pointers.rs @@ -3,6 +3,7 @@ use crate::parking_lot::lock_api::{RwLockReadGuard, RwLockWriteGuard}; use crate::parking_lot::{RawRwLock, RwLock}; +use std::collections::BTreeSet; use std::sync::{Arc, Weak}; pub trait RwLockPtr { @@ -113,6 +114,31 @@ impl std::ops::Deref for ArcRwLock { } } +impl std::hash::Hash for ArcRwLock { + fn hash(&self, state: &mut H) { + std::ptr::hash(self, state); + } +} + +impl std::hash::Hash for WeakRwLock { + fn hash(&self, state: &mut H) { + std::ptr::hash(self, state); + } +} + +impl weak_table::traits::WeakElement for WeakRwLock { + type Strong = ArcRwLock; + fn new(view: &Self::Strong) -> Self { + view.downgrade() + } + fn view(&self) -> Option { + self.upgrade() + } + fn clone(view: &Self::Strong) -> Self::Strong { + view.clone() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/primal_module.rs b/src/primal_module.rs index a8f8f559..761c8b1e 100644 --- a/src/primal_module.rs +++ b/src/primal_module.rs @@ -3,12 +3,20 @@ //! Generics for primal modules, defining the necessary interfaces for a primal module //! +use std::collections::{BTreeMap, BTreeSet}; +use std::sync::Arc; + use crate::dual_module::*; use crate::num_traits::FromPrimitive; +use crate::num_traits::Zero; +use crate::ordered_float::OrderedFloat; use crate::pointers::*; +use crate::primal_module_serial::{ClusterAffinity, PrimalClusterPtr, PrimalClusterWeak}; +use crate::relaxer_optimizer::OptimizerResult; use crate::util::*; use crate::visualize::*; -use std::sync::Arc; + +pub type Affinity = OrderedFloat; /// common trait that must be implemented for each implementation of primal module pub trait PrimalModuleImpl { @@ -25,12 +33,34 @@ pub trait PrimalModuleImpl { /// and then tell dual module what to do to resolve these conflicts; /// note that this function doesn't necessarily resolve all the conflicts, but can return early if some major change is made. /// when implementing this function, it's recommended that you resolve as many conflicts as possible. + /// + /// note: this is only ran in the "search" mode fn resolve( &mut self, group_max_update_length: GroupMaxUpdateLength, interface: &DualModuleInterfacePtr, dual_module: &mut impl DualModuleImpl, - ); + ) -> bool; + + /// kept in case of future need for this deprecated function (backwards compatibility for cases such as `SingleCluster` growing strategy) + fn old_resolve( + &mut self, + _group_max_update_length: GroupMaxUpdateLength, + _interface: &DualModuleInterfacePtr, + _dual_module: &mut impl DualModuleImpl, + ) -> bool { + false + } + + /// resolve the conflicts in the "tune" mode + fn resolve_tune( + &mut self, + _group_max_update_length: BTreeSet, + _interface: &DualModuleInterfacePtr, + _dual_module: &mut impl DualModuleImpl, + ) -> (BTreeSet, bool) { + panic!("`resolve_tune` not implemented, this primal module does not work with tuning mode"); + } fn solve( &mut self, @@ -50,6 +80,7 @@ pub trait PrimalModuleImpl { ) where Self: MWPSVisualizer + Sized, { + // println!("syndrome pattern: {:?}", syndrome_pattern); if let Some(visualizer) = visualizer { self.solve_step_callback( interface, @@ -57,7 +88,9 @@ pub trait PrimalModuleImpl { dual_module, |interface, dual_module, primal_module, group_max_update_length| { if cfg!(debug_assertions) { - println!("group_max_update_length: {:?}", group_max_update_length); + // println!("/////////////////////////////////////////////////////////////////////////////////////"); + // println!("group_max_update_length: {:?}", group_max_update_length); + // dual_module.debug_print(); } if group_max_update_length.is_unbounded() { visualizer @@ -108,38 +141,87 @@ pub trait PrimalModuleImpl { ) where F: FnMut(&DualModuleInterfacePtr, &mut D, &mut Self, &GroupMaxUpdateLength), { + // println!(" in solve step callback interface loaded"); + // Search, this part is unchanged let mut group_max_update_length = dual_module.compute_maximum_update_length(); + // println!("first group max update length: {:?}", group_max_update_length); + while !group_max_update_length.is_unbounded() { callback(interface, dual_module, self, &group_max_update_length); - if let Some(length) = group_max_update_length.get_valid_growth() { - dual_module.grow(length); - } else { - self.resolve(group_max_update_length, interface, dual_module); + match group_max_update_length.get_valid_growth() { + Some(length) => dual_module.grow(length), + None => { + self.resolve(group_max_update_length, interface, dual_module); + } } group_max_update_length = dual_module.compute_maximum_update_length(); + // println!("group max update length: {:?}", group_max_update_length); + } + + // from here, all states should be syncronized + let mut start = true; + + // starting with unbounded state here: All edges and nodes are not growing as of now + // Tune + while self.has_more_plugins() { + // Note: intersting, seems these aren't needed... But just kept here in case of future need, as well as correctness related failures + if start { + start = false; + dual_module.advance_mode(); + #[cfg(feature = "incr_lp")] + self.calculate_edges_free_weight_clusters(dual_module); + } + self.update_sorted_clusters_aff(dual_module); + let cluster_affs = self.get_sorted_clusters_aff(); + + for cluster_affinity in cluster_affs.into_iter() { + let cluster_ptr = cluster_affinity.cluster_ptr; + let mut dual_node_deltas = BTreeMap::new(); + let (mut resolved, optimizer_result) = + self.resolve_cluster_tune(&cluster_ptr, interface, dual_module, &mut dual_node_deltas); + + let mut conflicts = dual_module.get_conflicts_tune(optimizer_result, dual_node_deltas); + while !resolved { + let (_conflicts, _resolved) = self.resolve_tune(conflicts, interface, dual_module); + if _resolved { + break; + } + conflicts = _conflicts; + resolved = _resolved; + } + } } } - fn subgraph(&mut self, interface: &DualModuleInterfacePtr, dual_module: &mut impl DualModuleImpl) -> Subgraph; + fn subgraph(&mut self, interface: &DualModuleInterfacePtr, seed: u64) + -> Subgraph; fn subgraph_range( &mut self, interface: &DualModuleInterfacePtr, - dual_module: &mut impl DualModuleImpl, + seed: u64, ) -> (Subgraph, WeightRange) { - let subgraph = self.subgraph(interface, dual_module); + let subgraph = self.subgraph(interface, seed); + // let weight_range = WeightRange::new( + // interface.sum_dual_variables(), + // interface + // .read_recursive() + // .decoding_graph + // .model_graph + // .initializer + // .get_subgraph_total_weight(&subgraph), + // ); + let mut upper = Rational::zero(); + for (i, edge_weak) in subgraph.iter().enumerate() { + // weight += self.weighted_edges[edge_index as usize].weight; + // println!("{:?} edge in subgraph: {:?}, weight: {:?}", i, edge_weak.upgrade_force().read_recursive().edge_index, edge_weak.upgrade_force().read_recursive().weight); + upper += edge_weak.upgrade_force().read_recursive().weight; + } let weight_range = WeightRange::new( interface.sum_dual_variables(), - Rational::from_usize( - interface - .read_recursive() - .decoding_graph - .model_graph - .initializer - .get_subgraph_total_weight(&subgraph), - ) - .unwrap(), + upper ); + (subgraph, weight_range) } @@ -147,4 +229,51 @@ pub trait PrimalModuleImpl { fn generate_profiler_report(&self) -> serde_json::Value { json!({}) } + + /* tune mode methods */ + /// check if there are more plugins to be applied, defaulted to having no plugins + fn has_more_plugins(&mut self) -> bool { + false + } + + /// in "tune" mode, return the list of clusters that need to be resolved + fn pending_clusters(&mut self) -> Vec { + panic!("not implemented `pending_clusters`"); + } + + /// check if a cluster has been solved, if not then resolve it + fn resolve_cluster( + &mut self, + _cluster_ptr: &PrimalClusterPtr, + _interface_ptr: &DualModuleInterfacePtr, + _dual_module: &mut impl DualModuleImpl, + ) -> bool { + panic!("not implemented `resolve_cluster`"); + } + + /// `resolve_cluster` but in tuning mode, optimizer result denotes what the optimizer has accomplished + fn resolve_cluster_tune( + &mut self, + _cluster_ptr: &PrimalClusterPtr, + _interface_ptr: &DualModuleInterfacePtr, + _dual_module: &mut impl DualModuleImpl, + // _dual_node_deltas: &mut BTreeMap, + _dual_node_deltas: &mut BTreeMap, + ) -> (bool, OptimizerResult) { + panic!("not implemented `resolve_cluster_tune`"); + } + + /* affinity */ + fn update_sorted_clusters_aff(&mut self, _dual_module: &mut D) { + panic!("not implemented `update_sorted_clusters_aff`"); + } + + fn get_sorted_clusters_aff(&mut self) -> BTreeSet { + panic!("not implemented `get_sorted_clusters_aff`"); + } + + #[cfg(feature = "incr_lp")] + fn calculate_edges_free_weight_clusters(&mut self, dual_module: &mut impl DualModuleImpl) { + panic!("not implemented `calculate_edges_free_weight_clusters`"); + } } diff --git a/src/primal_module_parallel.rs b/src/primal_module_parallel.rs new file mode 100644 index 00000000..81a12757 --- /dev/null +++ b/src/primal_module_parallel.rs @@ -0,0 +1,1403 @@ +//! Parallel Primal Module +//! +//! A parallel implementation of the primal module, by calling functions provided by the serial primal module +//! +//! + +use color_print::cprintln; +use super::dual_module::*; +use crate::{dual_module_parallel::*, plugin}; +use crate::dual_module_pq::{FutureQueueMethods, Obstacle}; +use super::pointers::*; +use super::primal_module::*; +use super::primal_module_serial::*; +use super::util::*; +use std::cmp::Ordering; +use super::visualize::*; +use crate::rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, BTreeSet}; +use std::ops::DerefMut; +use std::sync::{Arc, Condvar, Mutex}; +use std::time::{Duration, Instant}; +use crate::num_traits::Zero; +use crate::num_traits::FromPrimitive; +use crate::plugin::*; + + +pub struct PrimalModuleParallel { + /// the basic wrapped serial modules at the beginning, afterwards the fused units are appended after them + pub units: Vec, + /// local configuration + pub config: PrimalModuleParallelConfig, + /// partition information generated by the config + pub partition_info: Arc, + /// thread pool used to execute async functions in parallel + pub thread_pool: Arc, +} + +pub struct PrimalModuleParallelUnit { + /// the index + pub unit_index: usize, + /// the dual module interface, for constant-time clear + pub interface_ptr: DualModuleInterfacePtr, + /// partition information generated by the config + pub partition_info: Arc, + /// the owned serial primal module + pub serial_module: PrimalModuleSerial, + /// adjacent parallel units of this unit, and whether they each are fused with this unit + pub adjacent_parallel_units: BTreeMap, + /// whether this unit is solved + pub is_solved: bool, +} + + +pub type PrimalModuleParallelUnitPtr = ArcRwLock; +pub type PrimalModuleParallelUnitWeak = WeakRwLock; + +impl std::fmt::Debug for PrimalModuleParallelUnitPtr { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let unit = self.read_recursive(); + write!(f, "{}", unit.unit_index) + } +} + +impl std::fmt::Debug for PrimalModuleParallelUnitWeak { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.upgrade_force().fmt(f) + } +} + +impl Ord for PrimalModuleParallelUnitPtr { + fn cmp(&self, other: &Self) -> Ordering { + // compare the pointer address + let ptr1 = Arc::as_ptr(self.ptr()); + let ptr2 = Arc::as_ptr(other.ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for PrimalModuleParallelUnitPtr { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct PrimalModuleParallelConfig { + /// enable async execution of dual operations; only used when calling top-level operations, not used in individual units + #[serde(default = "primal_module_parallel_default_configs::thread_pool_size")] + pub thread_pool_size: usize, + /// pin threads to cores sequentially + #[serde(default = "primal_module_parallel_default_configs::pin_threads_to_cores")] + pub pin_threads_to_cores: bool, +} + +impl Default for PrimalModuleParallelConfig { + fn default() -> Self { + serde_json::from_value(json!({})).unwrap() + } +} + +pub mod primal_module_parallel_default_configs { + pub fn thread_pool_size() -> usize { + 0 + } // by default to the number of CPU cores + // pub fn thread_pool_size() -> usize { 1 } // debug: use a single core + pub fn pin_threads_to_cores() -> bool { + false + } // pin threads to cores to achieve most stable results +} + +impl PrimalModuleParallel { + pub fn new_config( + initializer: &SolverInitializer, + partition_info: &PartitionInfo, + config: PrimalModuleParallelConfig, + growing_strategy: GrowingStrategy, + plugins: Arc, + ) -> Self { + let partition_info = Arc::new(partition_info.clone()); + let mut thread_pool_builder = rayon::ThreadPoolBuilder::new(); + if config.thread_pool_size != 0 { + thread_pool_builder = thread_pool_builder.num_threads(config.thread_pool_size); + } + if config.pin_threads_to_cores { + let core_ids = core_affinity::get_core_ids().unwrap(); + // println!("core_ids: {core_ids:?}"); + thread_pool_builder = thread_pool_builder.start_handler(move |thread_index| { + // https://stackoverflow.com/questions/7274585/linux-find-out-hyper-threaded-core-id + if thread_index < core_ids.len() { + crate::core_affinity::set_for_current(core_ids[thread_index]); + } // otherwise let OS decide which core to execute + }); + } + + let thread_pool = thread_pool_builder.build().expect("creating thread pool failed"); + let mut units = vec![]; + let unit_count = partition_info.units.len(); + thread_pool.scope(|_| { + (0..unit_count) + .into_par_iter() + .map(|unit_index| { + // println!("unit_index: {unit_index}"); + let mut primal_module = PrimalModuleSerial::new_empty(initializer); + primal_module.growing_strategy = growing_strategy; + primal_module.plugins = plugins.clone(); + let interface_ptr = DualModuleInterfacePtr::new(); + + PrimalModuleParallelUnitPtr::new_value(PrimalModuleParallelUnit { + unit_index, + interface_ptr, + partition_info: partition_info.clone(), + serial_module: primal_module, + adjacent_parallel_units: BTreeMap::new(), + is_solved: false, + }) + }) + .collect_into_vec(&mut units); + }); + + // we need to fill in the BTreeMap of adjacent_parallel_units + // we need to fill in the adjacent_parallel_units here + for unit_index in 0..partition_info.units.len() { + // println!("for unit {:?}", unit_index); + let mut unit = units[unit_index].write(); + for adjacent_unit_index in &partition_info.units[unit_index].adjacent_parallel_units { + // println!("adjacent_parallel_unit: {:?}", adjacent_unit_index); + let adjacnet_unit_pointer = &units[*adjacent_unit_index]; + unit.adjacent_parallel_units.insert(adjacnet_unit_pointer.clone(), false); + // println!("adjacent_parallel_unit ptr: {:?}", Arc::as_ptr(pointer.clone().ptr())); + } + drop(unit); + } + + Self { + units, + config, + partition_info, + thread_pool: Arc::new(thread_pool), + } + } +} + +impl PrimalModuleParallelUnitPtr { + + // syndrome pattern is created in this function. This function could not be used for dynamic fusion + fn individual_solve( + &self, + _primal_module_parallel: &PrimalModuleParallel, + partitioned_syndrome_pattern: PartitionedSyndromePattern, + parallel_dual_module: &DualModuleParallel, + callback: &mut Option<&mut F>, + ) where + F: FnMut( + &DualModuleInterfacePtr, + &DualModuleParallelUnit, + &PrimalModuleSerial, + Option<&GroupMaxUpdateLength>, + ), + Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + + let mut primal_unit = self.write(); + let unit_index = primal_unit.unit_index; + // cprintln!("individual_solve for unit: {:?}", unit_index); + // println!("unit index: {}", primal_unit.unit_index); + let dual_module_ptr = ¶llel_dual_module.units[unit_index]; + // let mut dual_unit = dual_module_ptr.write(); + let partition_unit_info = &primal_unit.partition_info.units[unit_index]; + let owned_defect_range = partitioned_syndrome_pattern.partition(partition_unit_info); + let interface_ptr = primal_unit.interface_ptr.clone(); + + // solve the individual unit first + if !primal_unit.is_solved { + // we solve the individual unit first + let syndrome_pattern = Arc::new(owned_defect_range.expand()); + // let syndrome_pattern = Arc::new(SyndromePattern::new(dual_module_ptr.read_recursive().serial_module.all_defect_vertices.clone(), vec![])); + // println!("defect vertices in unit: {:?} are {:?}", unit_index, syndrome_pattern.defect_vertices); + primal_unit.serial_module.solve_step_callback_ptr( + &interface_ptr, + syndrome_pattern, + &mut dual_module_ptr.clone(), + |interface, dual_module, primal_module, group_max_update_length| { + if let Some(callback) = callback.as_mut() { + callback(interface, dual_module, primal_module, Some(group_max_update_length)); + } + }, + ); + primal_unit.is_solved = true; + // println!("unit: {:?}, is_solved: {:?}", unit_index, primal_unit.is_solved); + if let Some(callback) = callback.as_mut() { + callback(&primal_unit.interface_ptr, &dual_module_ptr.write().deref_mut(), &primal_unit.serial_module, None); + } + } + drop(primal_unit); + } + + /// call this only if children is guaranteed to be ready and solved + #[allow(clippy::unnecessary_cast)] + fn fuse_and_solve( + &self, + _primal_module_parallel: &PrimalModuleParallel, + partitioned_syndrome_pattern: PartitionedSyndromePattern, + parallel_dual_module: &DualModuleParallel, + callback: &mut Option<&mut F>, + ) where + F: FnMut( + &DualModuleInterfacePtr, + &DualModuleParallelUnit, + &PrimalModuleSerial, + Option<&GroupMaxUpdateLength>, + ), + Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + // cprintln!("fuse_and_solve for unit: {:?}", self.read_recursive().unit_index); + // assert!(self.read_recursive().is_solved, "this unit must have been solved before we fuse it with its neighbors"); + + // this unit has been solved, we can fuse it with its adjacent units + // we iterate through the dag_partition_unit to fuse units together + let self_dual_ptr = ¶llel_dual_module.units[self.read_recursive().unit_index]; + self.fuse_operation_on_adjacent_units(self_dual_ptr, parallel_dual_module); + + let mut primal_unit = self.write(); + primal_unit.fuse_operation_on_self(self_dual_ptr, parallel_dual_module); + + if let Some(callback) = callback.as_mut() { + callback(&primal_unit.interface_ptr, &self_dual_ptr.write().deref_mut(), &primal_unit.serial_module, None); + } + + // now we have finished fusing self with all adjacent units, we run solve again + + // let mut dual_unit = self_dual_ptr.write(); + let partition_unit_info = &primal_unit.partition_info.units[primal_unit.unit_index]; + let owned_defect_range = partitioned_syndrome_pattern.partition(partition_unit_info); + let interface_ptr = primal_unit.interface_ptr.clone(); + + if primal_unit.is_solved { + primal_unit.serial_module.solve_step_callback_interface_loaded_ptr( + &interface_ptr, + &mut self_dual_ptr.clone(), + |interface, dual_module, primal_module, group_max_update_length| { + if let Some(callback) = callback.as_mut() { + callback(interface, dual_module, primal_module, Some(group_max_update_length)); + } + }, + ); + if let Some(callback) = callback.as_mut() { + callback(&primal_unit.interface_ptr, &self_dual_ptr.write().deref_mut(), &primal_unit.serial_module, None); + } + } else { + // we solve the individual unit first + let syndrome_pattern = Arc::new(owned_defect_range.expand()); + // println!("unit: {:?}, owned_defect_range: {:?}", primal_unit.unit_index, syndrome_pattern); + primal_unit.serial_module.solve_step_callback_ptr( + &interface_ptr, + syndrome_pattern, + &mut self_dual_ptr.clone(), + |interface, dual_module, primal_module, group_max_update_length| { + if let Some(callback) = callback.as_mut() { + callback(interface, dual_module, primal_module, Some(group_max_update_length)); + } + }, + ); + primal_unit.is_solved = true; + if let Some(callback) = callback.as_mut() { + callback(&primal_unit.interface_ptr, &self_dual_ptr.write().deref_mut(), &primal_unit.serial_module, None); + } + } + + } + + fn fuse_operation_on_adjacent_units + (&self, + self_dual_ptr: &DualModuleParallelUnitPtr, + parallel_dual_module: &DualModuleParallel, + ) + where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + // we need to fuse this unit with all of its adjacent units + // this is for the adjacent unit + for (adjacent_unit_ptr, is_fused) in self.read_recursive().adjacent_parallel_units.iter() { + if *is_fused { + // if already fused, then skip + continue; + } else { + let mut adjacent_unit = adjacent_unit_ptr.write(); + if let Some(is_fused_with_self) = adjacent_unit.adjacent_parallel_units.get_mut(self) { + *is_fused_with_self = true; + } else { + panic!("this adjacent unit does not have self as its adjacent unit, check new_config"); + } + + // after setting the bool in BTreeMap of PrimalModuleParallelUnit, we need to add the corresponding DualModuleParallelUnit + let adjacent_dual_unit_ptr = ¶llel_dual_module.units[adjacent_unit.unit_index]; + let mut adjacent_dual_unit = adjacent_dual_unit_ptr.write(); + adjacent_dual_unit.adjacent_parallel_units.push(self_dual_ptr.clone()); + + // we also need to change the `is_fusion` of all vertices of adjacent_dual_unit to true. + // println!("all mirrored vertices len: {:?}", adjacent_dual_unit.serial_module.all_mirrored_vertices.len()); + for vertex_ptr in adjacent_dual_unit.serial_module.all_mirrored_vertices.iter() { + let mut vertex = vertex_ptr.write(); + vertex.fusion_done = true; + } + + for vertex_ptr in adjacent_dual_unit.serial_module.all_mirrored_vertices.iter() { + // we also need to reset the growth of all edges connecting adjacent_unit with self_unit, this is to allow dual nodes from two units interact with each other + // so that Conflict can be reported + + for edge_weak in vertex_ptr.read_recursive().edges.iter() { + let edge_ptr = edge_weak.upgrade_force(); + let mut edge = edge_ptr.write(); + // println!("edge weak of mirrored vertex"); + if edge.connected_to_boundary_vertex { + // println!("edge: {:?}", edge.edge_index); + edge.growth_at_last_updated_time /= Rational::from_usize(2).unwrap(); + } + } + } + + + // println!("adjacent_unit: {:?}", adjacent_unit.unit_index); + // println!("adjacent_unit.adjacent_parallel_units: {:?}", adjacent_dual_unit.adjacent_parallel_units); + // for vertex_ptr in adjacent_dual_unit.serial_module.vertices.iter() { + // println!("vertex {:?} is fusion: {:?}", vertex_ptr.read_recursive().vertex_index, vertex_ptr.read_recursive().fusion_done); + // } + drop(adjacent_unit); + } + + } + + } +} + +impl PrimalModuleParallelUnit { + fn fuse_operation_on_self + (&mut self, + self_dual_ptr: &DualModuleParallelUnitPtr, + parallel_dual_module: &DualModuleParallel, + ) + where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + let mut self_dual_unit = self_dual_ptr.write(); + for (adjacent_unit_ptr, is_fused) in self.adjacent_parallel_units.iter_mut() { + if *is_fused { + // if already fused, then skip + continue; + } else { + *is_fused = true; + + // we need to add the DualModuleParallelUnitPtr to the adjacent_parallel_units of self + let adjacent_dual_unit_ptr = ¶llel_dual_module.units[adjacent_unit_ptr.read_recursive().unit_index]; + self_dual_unit.adjacent_parallel_units.push(adjacent_dual_unit_ptr.clone()); + } + } + // we also need to change the `is_fusion` of all vertices of self_dual_unit to true. + for vertex_ptr in self_dual_unit.serial_module.vertices.iter() { + let mut vertex = vertex_ptr.write(); + vertex.fusion_done = true; + } + + // for vertex_ptr in self_dual_unit.serial_module.vertices.iter() { + + // // we also need to reset the growth of all edges connecting adjacent_unit with self_unit, this is to allow dual nodes from two units interact with each other + // // so that Conflict can be reported + // for edge_weak in vertex_ptr.get_edge_neighbors().iter() { + // // println!("incident edge to vertex_ptr in boundary unit is {:?}, with growth: {:?}", edge_weak.upgrade_force().read_recursive().edge_index, edge_weak.upgrade_force().read_recursive().growth_at_last_updated_time); + // // let edge_ptr = edge_weak.upgrade_force(); + // // let mut edge = edge_ptr.write(); + // // if edge.connected_to_boundary_vertex { + // // println!("edge self: {:?}", edge.edge_index); + // // edge.growth_at_last_updated_time = Rational::zero(); + // // } + // } + // } + // println!("self_dual_unit: {:?}", self_dual_unit.unit_index); + // println!("self_dual_unit.adjacent_parallel_units: {:?}", self_dual_unit.adjacent_parallel_units); + // for vertex_ptr in self_dual_unit.serial_module.vertices.iter() { + // println!("vertex {:?} is fusion: {:?}", vertex_ptr.read_recursive().vertex_index, vertex_ptr.read_recursive().fusion_done); + // } + drop(self_dual_unit); + } +} + +impl PrimalModuleParallel { + pub fn parallel_solve( + &mut self, + syndrome_pattern: Arc, + parallel_dual_module: &DualModuleParallel, + ) where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + self.parallel_solve_step_callback(syndrome_pattern, parallel_dual_module, |_, _, _, _| {}); + } + + pub fn parallel_solve_visualizer( + &mut self, + syndrome_pattern: Arc, + parallel_dual_module: &DualModuleParallel, + visualizer: Option<&mut Visualizer>, + ) where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + if let Some(visualizer) = visualizer { + self.parallel_solve_step_callback( + syndrome_pattern, + parallel_dual_module, + |interface, dual_module, primal_module, group_max_update_length| { + if let Some(group_max_update_length) = group_max_update_length { + if cfg!(debug_assertions) { + // println!("group_max_update_length: {:?}", group_max_update_length); + } + if group_max_update_length.is_unbounded() { + visualizer + .snapshot_combined("unbounded grow".to_string(), vec![interface, dual_module, primal_module]) + .unwrap(); + } else if let Some(length) = group_max_update_length.get_valid_growth() { + visualizer + .snapshot_combined(format!("grow {length}"), vec![interface, dual_module, primal_module]) + .unwrap(); + } else { + let first_conflict = format!("{:?}", group_max_update_length.peek().unwrap()); + visualizer + .snapshot_combined( + format!("resolve {first_conflict}"), + vec![interface, dual_module, primal_module], + ) + .unwrap(); + }; + } else { + visualizer + .snapshot_combined("unit solved".to_string(), vec![interface, dual_module, primal_module]) + .unwrap(); + } + + }, + ); + // let last_unit = self.units.last().unwrap().read_recursive(); + // visualizer + // .snapshot_combined( + // "solved".to_string(), + // vec![&last_unit.interface_ptr, parallel_dual_module, self], + // ) + // .unwrap(); + } else { + self.parallel_solve(syndrome_pattern, parallel_dual_module); + } + } + + pub fn parallel_solve_step_callback( + &mut self, + syndrome_pattern: Arc, + parallel_dual_module: &DualModuleParallel, + mut callback: F, + ) where + F: FnMut( + &DualModuleInterfacePtr, + &DualModuleParallelUnit, + &PrimalModuleSerial, + Option<&GroupMaxUpdateLength>, + ), + Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + // // parallel implementation using rayon + // let thread_pool = Arc::clone(&self.thread_pool); + // thread_pool.scope(|_| { + // (0..self.partition_info.config.partitions.len()) + // .into_par_iter() + // .for_each( |unit_index| { + // let unit_ptr = self.units[unit_index].clone(); + // unit_ptr.individual_solve::( + // self, + // PartitionedSyndromePattern::new(&syndrome_pattern), + // parallel_dual_module, + // &mut None, + // ); + // }) + // }); + + + // thread_pool.scope(|_| { + // (self.partition_info.config.partitions.len()..self.partition_info.units.len()) + // .into_par_iter() + // .for_each( |unit_index| { + // if (unit_index - self.partition_info.config.partitions.len()) % 2 == 0 { + // let unit_ptr = self.units[unit_index].clone(); + // unit_ptr.fuse_and_solve::( + // self, + // PartitionedSyndromePattern::new(&syndrome_pattern), + // parallel_dual_module, + // &mut None, + // ); + // } + // }) + // }); + + // for unit_index in self.partition_info.config.partitions.len()..self.partition_info.units.len() { + // if (unit_index - self.partition_info.config.partitions.len()) % 2 == 1 { + // let unit_ptr = self.units[unit_index].clone(); + // unit_ptr.fuse_and_solve::( + // self, + // PartitionedSyndromePattern::new(&syndrome_pattern), + // parallel_dual_module, + // &mut None, + // ); + // } + // } + + // thread_pool.scope(|_| { + // (self.partition_info.config.partitions.len()..self.partition_info.units.len()) + // .into_par_iter() + // .for_each( |unit_index| { + // if (unit_index - self.partition_info.config.partitions.len()) % 2 == 1 { + // let unit_ptr = self.units[unit_index].clone(); + // unit_ptr.fuse_and_solve::( + // self, + // PartitionedSyndromePattern::new(&syndrome_pattern), + // parallel_dual_module, + // &mut None, + // ); + // } + // }) + // }); + + + // sequential implementation + for unit_index in 0..self.partition_info.config.partitions.len(){ + let unit_ptr = self.units[unit_index].clone(); + unit_ptr.individual_solve::( + self, + PartitionedSyndromePattern::new(&syndrome_pattern), + parallel_dual_module, + &mut Some(&mut callback), + ); + } + + for unit_index in self.partition_info.config.partitions.len()..self.partition_info.units.len() { + let unit_ptr = self.units[unit_index].clone(); + unit_ptr.fuse_and_solve::( + self, + PartitionedSyndromePattern::new(&syndrome_pattern), + parallel_dual_module, + &mut Some(&mut callback), + ); + } + } + + +} + +impl PrimalModuleImpl for PrimalModuleParallel { + /// create a primal module given the dual module + fn new_empty(_solver_initializer: &SolverInitializer) -> Self { + // use new_config directly instead + unimplemented!() + // Self::new_config( + // solver_initializer, + // &PartitionConfig::new(solver_initializer.vertex_num).info(), + // PrimalModuleParallelConfig::default(), + // growing_strategy, + // plugins, + // ) + } + + /// clear all states; however this method is not necessarily called when load a new decoding problem, so you need to call it yourself + fn clear(&mut self) { + self.thread_pool.scope(|_| { + self.units.par_iter().enumerate().for_each(|(_unit_idx, unit_ptr)| { + let mut unit = unit_ptr.write(); + unit.clear(); + }); + }); + } + + /// load a new decoding problem given dual interface: note that all nodes MUST be defect node + /// this function needs to be written to allow dynamic fusion + fn load(&mut self, _interface_ptr: &DualModuleInterfacePtr, _dual_module: &mut D) { + panic!("load interface directly into the parallel primal module is forbidden, use `individual_solve` instead"); + } + + /// analyze the reason why dual module cannot further grow, update primal data structure (alternating tree, temporary matches, etc) + /// and then tell dual module what to do to resolve these conflicts; + /// note that this function doesn't necessarily resolve all the conflicts, but can return early if some major change is made. + /// when implementing this function, it's recommended that you resolve as many conflicts as possible. + /// + /// note: this is only ran in the "search" mode + fn resolve( + &mut self, + _group_max_update_length: GroupMaxUpdateLength, + _interface: &DualModuleInterfacePtr, + _dual_module: &mut impl DualModuleImpl, + ) -> bool { + panic!("parallel primal module cannot handle global resolve requests, use `individual_solve` instead"); + } + + /// resolve the conflicts in the "tune" mode + fn resolve_tune( + &mut self, + _group_max_update_length: BTreeSet, + _interface: &DualModuleInterfacePtr, + _dual_module: &mut impl DualModuleImpl, + ) -> (BTreeSet, bool) { + panic!("`resolve_tune` not implemented, this primal module does not work with tuning mode"); + } + + fn solve( + &mut self, + interface: &DualModuleInterfacePtr, + syndrome_pattern: Arc, + dual_module: &mut impl DualModuleImpl, + ) { + self.solve_step_callback(interface, syndrome_pattern, dual_module, |_, _, _, _| {}) + } + + fn subgraph(&mut self, _interface: &DualModuleInterfacePtr, seed: u64) + -> Subgraph + { + // let unit_ptr0 = self.units.last().unwrap(); + // let mut unit = unit_ptr0.write(); + // let interface_ptr = unit.interface_ptr.clone(); + // unit.subgraph(&interface_ptr, seed) + // sequential implementation for debugging purposes + let mut subgraph = vec![]; + for unit_ptr in self.units.iter() { + let mut unit = unit_ptr.write(); + // println!("unit: {:?}", unit.unit_index); + let interface_ptr = unit.interface_ptr.clone(); + subgraph.extend(unit.subgraph(&interface_ptr, seed)) + } + subgraph + + // // // implementation using rayon + // self.thread_pool.scope(|_| { + // let results: Vec<_> = + // self.units.par_iter().filter_map(| unit_ptr| { + // let mut unit = unit_ptr.write(); + // let interface_ptr = unit.interface_ptr.clone(); + // Some(unit.subgraph(&interface_ptr, seed)) + // }).collect(); + // let mut final_subgraph: Vec = vec![]; + // for local_subgraph in results.into_iter() { + // final_subgraph.extend(local_subgraph); + // } + // final_subgraph + // }) + } + + fn subgraph_range( + &mut self, + interface: &DualModuleInterfacePtr, + seed: u64, + ) -> (Subgraph, WeightRange) { + let subgraph = self.subgraph(interface, seed); + let mut upper = Rational::zero(); + for edge_weak in subgraph.iter() { + // weight += self.weighted_edges[edge_index as usize].weight; + // println!("{:?} edge in subgraph: {:?}, weight: {:?}", i, edge_weak.upgrade_force().read_recursive().edge_index, edge_weak.upgrade_force().read_recursive().weight); + upper += edge_weak.upgrade_force().read_recursive().weight; + } + + // let lower = self.units.last().unwrap().read_recursive().interface_ptr.sum_dual_variables(); + + let mut lower = Rational::zero(); + for unit_ptr in self.units.iter() { + let unit = unit_ptr.read_recursive(); + lower += unit.interface_ptr.sum_dual_variables(); + } + + let weight_range = WeightRange::new( + lower, + upper + ); + + (subgraph, weight_range) + } +} + +impl PrimalModuleImpl for PrimalModuleParallelUnit { + /// create a primal module given the dual module + /// this function needs to be implemented for dynamic fusion + fn new_empty(_solver_initializer: &SolverInitializer) -> Self { + panic!("creating parallel unit directly from initializer is forbidden, use `PrimalModuleParallel::new` instead"); + } + + /// clear all states; however this method is not necessarily called when load a new decoding problem, so you need to call it yourself + fn clear(&mut self) { + self.serial_module.clear(); + self.interface_ptr.clear(); + } + + /// load a new decoding problem given dual interface: note that all nodes MUST be defect node + fn load(&mut self, interface_ptr: &DualModuleInterfacePtr, dual_module: &mut D) { + self.serial_module.load(interface_ptr, dual_module); + } + + /// analyze the reason why dual module cannot further grow, update primal data structure (alternating tree, temporary matches, etc) + /// and then tell dual module what to do to resolve these conflicts; + /// note that this function doesn't necessarily resolve all the conflicts, but can return early if some major change is made. + /// when implementing this function, it's recommended that you resolve as many conflicts as possible. + /// + /// note: this is only ran in the "search" mode + fn resolve( + &mut self, + group_max_update_length: GroupMaxUpdateLength, + interface: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + ) -> bool { + self.serial_module.resolve(group_max_update_length, interface, dual_module) + } + + /// resolve the conflicts in the "tune" mode + fn resolve_tune( + &mut self, + group_max_update_length: BTreeSet, + interface: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + ) -> (BTreeSet, bool) { + self.serial_module.resolve_tune(group_max_update_length, interface, dual_module) + } + + fn subgraph(&mut self, interface: &DualModuleInterfacePtr, seed: u64) + -> Subgraph + { + // println!("\nfn subgraph for unit: {:?}", self.unit_index); + self.serial_module.subgraph(interface, seed) + } + + fn subgraph_range( + &mut self, + interface: &DualModuleInterfacePtr, + seed: u64, + ) -> (Subgraph, WeightRange) { + self.serial_module.subgraph_range(interface, seed) + } +} + + + +impl MWPSVisualizer for PrimalModuleParallel { + fn snapshot(&self, abbrev: bool) -> serde_json::Value { + // do the sanity check first before taking snapshot + // self.sanity_check().unwrap(); + let mut value = json!({}); + for unit_ptr in self.units.iter() { + let unit = unit_ptr.read_recursive(); + // if !unit.is_active { + // continue; + // } // do not visualize inactive units + let value_2 = unit.snapshot(abbrev); + snapshot_combine_values(&mut value, value_2, abbrev); + } + value + } +} + +impl MWPSVisualizer for PrimalModuleParallelUnit { + fn snapshot(&self, abbrev: bool) -> serde_json::Value { + self.serial_module.snapshot(abbrev) + } +} + + + +#[cfg(test)] +pub mod tests { + use super::super::example_codes::*; + use super::super::primal_module::*; + + use super::super::primal_module_serial::*; + use crate::decoding_hypergraph::*; + use super::*; + use crate::num_traits::FromPrimitive; + + use crate::plugin_single_hair::PluginSingleHair; + use crate::plugin_union_find::PluginUnionFind; + use crate::plugin::PluginVec; + use crate::dual_module_serial::*; + use crate::dual_module_pq::*; + use std::usize::MAX; + + pub fn primal_module_parallel_basic_standard_syndrome( + code: impl ExampleCode, + visualize_filename: String, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, + ) -> ( + PrimalModuleParallel, + impl DualModuleImpl + MWPSVisualizer, + ) { + println!("{defect_vertices:?}"); + let visualizer = { + let visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename.clone()); + visualizer + }; + + // create dual module + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 18), // unit 0 + VertexRange::new(24, 42), // unit 1 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 2, by fusing 0 and 1 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + let partition_info = partition_config.info(); + + + let mut dual_module_parallel_config = DualModuleParallelConfig::default(); + // dual_module_parallel_config.enable_parallel_execution = true; + let dual_module: DualModuleParallel>, FutureObstacleQueue> = + DualModuleParallel::new_config(&initializer, &partition_info, dual_module_parallel_config); + + // create primal module + let primal_config = PrimalModuleParallelConfig {..Default::default()}; + let primal_module = PrimalModuleParallel::new_config(&model_graph.initializer, &partition_info, primal_config.clone(), growing_strategy, Arc::new(plugins.clone())); + // primal_module.growing_strategy = growing_strategy; + // primal_module.plugins = Arc::new(plugins); + // primal_module.config = serde_json::from_value(json!({"timeout":1})).unwrap(); + + primal_module_parallel_basic_standard_syndrome_optional_viz( + code, + defect_vertices, + final_dual, + plugins, + growing_strategy, + dual_module, + primal_module, + model_graph, + Some(visualizer), + ) + } + + #[allow(clippy::too_many_arguments)] + pub fn primal_module_parallel_basic_standard_syndrome_optional_viz + ( + _code: impl ExampleCode, + defect_vertices: Vec, + final_dual: Weight, + _plugins: PluginVec, + _growing_strategy: GrowingStrategy, + mut dual_module: DualModuleParallel, Queue>, + mut primal_module: PrimalModuleParallel, + model_graph: Arc, + mut visualizer: Option, + ) -> ( + PrimalModuleParallel, + impl DualModuleImpl + MWPSVisualizer, + ) + where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + // try to work on a simple syndrome + let decoding_graph = DecodingHyperGraph::new_defects(model_graph, defect_vertices.clone()); + let begin_time = std::time::Instant::now(); + primal_module.parallel_solve_visualizer( + decoding_graph.syndrome_pattern.clone(), + &mut dual_module, + visualizer.as_mut(), + ); + + let useless_interface_ptr = DualModuleInterfacePtr::new(); + let (subgraph, weight_range) = primal_module.subgraph_range(&useless_interface_ptr, 0); + + if let Some(visualizer) = visualizer.as_mut() { + let last_interface_ptr = &primal_module.units.last().unwrap().read_recursive().interface_ptr; + visualizer + .snapshot_combined( + "subgraph".to_string(), + vec![last_interface_ptr, &dual_module, &subgraph, &weight_range], + ) + .unwrap(); + } + assert!( + decoding_graph + .model_graph + .matches_subgraph_syndrome(&subgraph, &defect_vertices), + "the result subgraph is invalid" + ); + primal_module.clear(); + dual_module.clear(); + let end_time = std::time::Instant::now(); + let resolve_time = (end_time - begin_time); + println!("resolve time {:?}", resolve_time); + + // assert_eq!( + // Rational::from_usize(final_dual).unwrap(), + // weight_range.upper, + // "unmatched sum dual variables" + // ); + // assert_eq!( + // Rational::from_usize(final_dual).unwrap(), + // weight_range.lower, + // "unexpected final dual variable sum" + // ); + (primal_module, dual_module) + } + + /// test a simple case + #[test] + fn primal_module_parallel_tentative_test_1() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_1 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![13, 20, 29, 32, 39]; + + let visualize_filename = "primal_module_parallel_tentative_test_1.json".to_string(); + primal_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 4, + vec![], + GrowingStrategy::ModeBased, + ); + } + + /// test a simple case + #[test] + fn primal_module_parallel_tentative_test_2() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_2 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![7, 21, 28]; + + let visualize_filename = "primal_module_parallel_tentative_test_2.json".to_string(); + primal_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 6, + vec![], + GrowingStrategy::ModeBased, + ); + } + + /// test a simple case, split into 2, no defect vertex in boundary-unit, clusters do not grow into other units + #[test] + fn primal_module_parallel_tentative_test_3() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_3 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![2, 35]; + + let visualize_filename = "primal_module_parallel_tentative_test_3.json".to_string(); + primal_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 4, + vec![], + GrowingStrategy::ModeBased, + ); + } + + // test a simple case, split into 2, a defect vertex in boundary-unit, clusters do grow into other units + #[test] + fn primal_module_parallel_tentative_test_4() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_4 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![19, 35]; + + let visualize_filename = "primal_module_parallel_tentative_test_4.json".to_string(); + primal_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 3, + vec![], + GrowingStrategy::ModeBased, + ); + } + + #[test] + fn primal_module_parallel_tentative_test_5() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_5 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![16, 19, 29, 39]; + + let visualize_filename = "primal_module_parallel_tentative_test_5.json".to_string(); + primal_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 5, + vec![], + GrowingStrategy::ModeBased, + ); + } + + #[test] + fn primal_module_parallel_tentative_test_6() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_6 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![16, 19, 29, 32, 39]; + + let visualize_filename = "primal_module_parallel_tentative_test_6.json".to_string(); + primal_module_parallel_basic_standard_syndrome( + code, + visualize_filename, + defect_vertices, + 5, + vec![], + GrowingStrategy::ModeBased, + ); + } + + pub fn primal_module_parallel_basic_standard_syndrome_split_into_4( + code: impl ExampleCode, + visualize_filename: String, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, + ) -> ( + PrimalModuleParallel, + impl DualModuleImpl + MWPSVisualizer, + ) { + println!("{defect_vertices:?}"); + let visualizer = { + let visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename.clone()); + visualizer + }; + + // create model graph + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + partition_config.partitions = vec![ + VertexRange::new(0, 6), // unit 0 + VertexRange::new(12, 18), // unit 1 + VertexRange::new(24, 30), // unit 2 + VertexRange::new(36, 42), // unit 3 + ]; + partition_config.fusions = vec![ + (0, 1), // unit 4, by fusing 0 and 1 + (1, 2), // unit 5, + (2, 3), // unit 6 + ]; + let a = partition_config.dag_partition_units.add_node(()); + let b = partition_config.dag_partition_units.add_node(()); + let c = partition_config.dag_partition_units.add_node(()); + let d = partition_config.dag_partition_units.add_node(()); + partition_config.dag_partition_units.add_edge(a, b, false); + partition_config.dag_partition_units.add_edge(b, c, false); + partition_config.dag_partition_units.add_edge(c, d, false); + + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + let partition_info = partition_config.info(); + + let mut dual_module_parallel_config = DualModuleParallelConfig::default(); + // dual_module_parallel_config.enable_parallel_execution = true; + let dual_module: DualModuleParallel>, FutureObstacleQueue> = + DualModuleParallel::new_config(&initializer, &partition_info, dual_module_parallel_config); + + // create primal module + let primal_config = PrimalModuleParallelConfig {..Default::default()}; + let primal_module = PrimalModuleParallel::new_config(&model_graph.initializer, &partition_info, primal_config.clone(), growing_strategy, Arc::new(plugins.clone())); + // primal_module.growing_strategy = growing_strategy; + // primal_module.plugins = Arc::new(plugins); + // primal_module.config = serde_json::from_value(json!({"timeout":1})).unwrap(); + + primal_module_parallel_basic_standard_syndrome_optional_viz( + code, + defect_vertices, + final_dual, + plugins, + growing_strategy, + dual_module, + primal_module, + model_graph, + Some(visualizer), + ) + } + + /// test a simple case, split into 4, a defect vertex in boundary-unit, clusters grow into other units + #[test] + fn primal_module_parallel_tentative_test_7() { + // RUST_BACKTRACE=1 cargo test primal_module_parallel_tentative_test_7 -- --nocapture + let weight = 1; // do not change, the data is hard-coded + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![13, 20, 29, 32, 39]; + + let visualize_filename = "primal_module_parallel_tentative_test_7.json".to_string(); + primal_module_parallel_basic_standard_syndrome_split_into_4( + code, + visualize_filename, + defect_vertices, + 6, + vec![], + GrowingStrategy::ModeBased, + ); + } + + + /// test for time partition + #[allow(clippy::unnecessary_cast)] + pub fn graph_time_partition(initializer: &SolverInitializer, positions: &Vec, defect_vertices: &Vec, split_num: usize) -> PartitionConfig { + assert!(positions.len() > 0, "positive number of positions"); + let mut partition_config = PartitionConfig::new(initializer.vertex_num); + let mut last_t = positions[0].t; + let mut t_list: Vec = vec![]; + t_list.push(last_t); + for position in positions { + assert!(position.t >= last_t, "t not monotonically increasing, vertex reordering must be performed before calling this"); + if position.t != last_t { + t_list.push(position.t); + } + last_t = position.t; + } + + // pick the t value in the middle to split it + let mut t_split_vec: Vec = vec![0.0; split_num - 1]; + for i in 0..(split_num - 1) { + let index: usize = t_list.len()/split_num * (i + 1); + t_split_vec[i] = t_list[index]; + } + // find the vertices indices + let mut split_start_index_vec = vec![MAX; split_num - 1]; + let mut split_end_index_vec = vec![MAX; split_num - 1]; + let mut start_index = 0; + let mut end_index = 0; + for (vertex_index, position) in positions.iter().enumerate() { + if start_index < split_num - 1 { + if split_start_index_vec[start_index] == MAX && position.t == t_split_vec[start_index] { + split_start_index_vec[start_index] = vertex_index; + if start_index != 0 { + end_index += 1; + } + start_index += 1; + } + } + + if end_index < split_num - 1 { + if position.t == t_split_vec[end_index] { + split_end_index_vec[end_index] = vertex_index + 1; + // end_index += 1; + } + } + } + + assert!(split_start_index_vec.iter().all(|&x| x != MAX), "Some elements in split_start_index_vec are equal to MAX"); + + // partitions are found + let mut graph_nodes = vec![]; + let mut partitions_vec = vec![]; + for i in 0..split_num { + if i == 0 { + partitions_vec.push(VertexRange::new(0, split_start_index_vec[0])); + } else if i == split_num - 1 { + partitions_vec.push(VertexRange::new(split_end_index_vec[i - 1], positions.len())); + } else { + partitions_vec.push(VertexRange::new(split_end_index_vec[i - 1], split_start_index_vec[i])); + } + + if i < split_num - 1 { + partition_config.fusions.push((i, i+1)); + } + + let a = partition_config.dag_partition_units.add_node(()); + graph_nodes.push(a.clone()); + } + partition_config.partitions = partitions_vec; + + for i in 0..split_num { + if i < split_num - 1 { + partition_config.dag_partition_units.add_edge(graph_nodes[i], graph_nodes[i+1], false); + } + } + partition_config.defect_vertices = BTreeSet::from_iter(defect_vertices.clone()); + + partition_config + } + + pub fn primal_module_parallel_evaluation_qec_playground_helper( + code: impl ExampleCode, + visualize_filename: String, + defect_vertices: Vec, + final_dual: Weight, + plugins: PluginVec, + growing_strategy: GrowingStrategy, + split_num: usize, + ) -> ( + PrimalModuleParallel, + impl DualModuleImpl + MWPSVisualizer, + ) { + println!("{defect_vertices:?}"); + let visualizer = { + let visualizer = Visualizer::new( + Some(visualize_data_folder() + visualize_filename.as_str()), + code.get_positions(), + true, + ) + .unwrap(); + print_visualize_link(visualize_filename.clone()); + visualizer + }; + + // create dual module + let model_graph = code.get_model_graph(); + let initializer = &model_graph.initializer; + let partition_config = graph_time_partition(&initializer, &code.get_positions(), &defect_vertices, split_num); + let partition_info = partition_config.info(); + + + // create dual module + // let decoding_graph = DecodingHyperGraph::new_defects(model_graph.clone(), vec![3, 29, 30]); + let mut dual_module_parallel_config = DualModuleParallelConfig::default(); + dual_module_parallel_config.enable_parallel_execution = true; + let mut dual_module: DualModuleParallel>, FutureObstacleQueue> = + DualModuleParallel::new_config(&initializer, &partition_info, dual_module_parallel_config); + + // create primal module + let mut primal_config = PrimalModuleParallelConfig {..Default::default()}; + primal_config.thread_pool_size = 4; + let primal_module = PrimalModuleParallel::new_config(&model_graph.initializer, &partition_info, primal_config.clone(), growing_strategy, Arc::new(plugins.clone())); + + primal_module_parallel_basic_standard_syndrome_optional_viz( + code, + defect_vertices, + final_dual, + plugins, + growing_strategy, + dual_module, + primal_module, + model_graph, + Some(visualizer), + ) + } + + #[test] + fn primal_module_parallel_circuit_level_noise_qec_playground_1() { + // cargo test primal_module_parallel_circuit_level_noise_qec_playground_1 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode + }); + + let code = QECPlaygroundCode::new(3, 0.1, config); + let defect_vertices = vec![12, 19, 20]; + + let visualize_filename = "primal_module_parallel_circuit_level_noise_qec_playground_1.json".to_string(); + primal_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices, + 1661019, + vec![], + GrowingStrategy::ModeBased, + 2, + ); + } + + /// test solver on circuit level noise with random errors, split into 2 + #[test] + fn primal_module_parallel_circuit_level_noise_qec_playground_2() { + // cargo test primal_module_parallel_circuit_level_noise_qec_playground_2 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode + }); + + let mut code = QECPlaygroundCode::new(7, 0.005, config); + let defect_vertices = code.generate_random_errors(132).0.defect_vertices; + + let visualize_filename = "primal_module_parallel_circuit_level_noise_qec_playground_2.json".to_string(); + primal_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices.clone(), + 2424788, + vec![], + GrowingStrategy::ModeBased, + 2, + ); + } + + /// test solver on circuit level noise with random errors, split into 4 + #[test] + fn primal_module_parallel_circuit_level_noise_qec_playground_3() { + // cargo test primal_module_parallel_circuit_level_noise_qec_playground_3 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode + }); + + let mut code = QECPlaygroundCode::new(7, 0.005, config); + let defect_vertices = code.generate_random_errors(132).0.defect_vertices; + + let visualize_filename = "primal_module_parallel_circuit_level_noise_qec_playground_3.json".to_string(); + primal_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices.clone(), + 2424788, + vec![], + GrowingStrategy::ModeBased, + 4, + ); + } + + /// test solver on circuit level noise with random errors, split into 8 + #[test] + fn primal_module_parallel_circuit_level_noise_qec_playground_4() { + // cargo test primal_module_parallel_circuit_level_noise_qec_playground_4 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode, + "nm": 18, + }); + + let mut code = QECPlaygroundCode::new(3, 0.005, config); + + let defect_vertices = vec![16, 26, 29, 37, 39, 44, 46, 47, 51, 52, 54, 67, 122, 151]; + + let visualize_filename = "primal_module_parallel_circuit_level_noise_qec_playground_4.json".to_string(); + primal_module_parallel_evaluation_qec_playground_helper( + code, + visualize_filename, + defect_vertices.clone(), + 2424788, + vec![], + GrowingStrategy::ModeBased, + 8, + ); + + // for seed in 0..500 { + // let defect_vertices = code.clone().generate_random_errors(seed).0.defect_vertices; + + // let visualize_filename = "primal_module_parallel_circuit_level_noise_qec_playground_4.json".to_string(); + // primal_module_parallel_evaluation_qec_playground_helper( + // code.clone(), + // visualize_filename, + // defect_vertices.clone(), + // 2424788, + // vec![], + // GrowingStrategy::ModeBased, + // 8, + // ); + // } + + } +} \ No newline at end of file diff --git a/src/primal_module_serial.rs b/src/primal_module_serial.rs index 6274e6b7..208278dd 100644 --- a/src/primal_module_serial.rs +++ b/src/primal_module_serial.rs @@ -3,6 +3,7 @@ //! This implementation targets to be an exact MWPF solver, although it's not yet sure whether it is actually one. //! +use color_print::cprintln; use crate::decoding_hypergraph::*; use crate::dual_module::*; use crate::invalid_subgraph::*; @@ -14,13 +15,28 @@ use crate::primal_module::*; use crate::relaxer_optimizer::*; use crate::util::*; use crate::visualize::*; -use parking_lot::RwLock; -use serde::{Deserialize, Serialize}; + use std::collections::BTreeMap; use std::collections::{BTreeSet, VecDeque}; use std::fmt::Debug; use std::sync::Arc; use std::time::Instant; +use std::cmp::Ordering; + +use crate::itertools::Itertools; +#[cfg(feature = "incr_lp")] +use parking_lot::Mutex; +use parking_lot::RwLock; +use serde::{Deserialize, Serialize}; +use std::ops::DerefMut; + +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + +use crate::dual_module_parallel::*; +use crate::dual_module_pq::*; pub struct PrimalModuleSerial { /// growing strategy, default to single-tree approach for easier debugging and better locality @@ -35,11 +51,45 @@ pub struct PrimalModuleSerial { pub plugins: Arc, /// how many plugins are actually executed for every cluster pub plugin_count: Arc>, - pub plugin_pending_clusters: Vec, + pub plugin_pending_clusters: Vec, /// configuration pub config: PrimalModuleSerialConfig, /// the time spent on resolving the obstacles pub time_resolve: f64, + /// sorted clusters by affinity, only exist when needed + pub sorted_clusters_aff: Option>, +} + +#[derive(Eq, Debug)] +pub struct ClusterAffinity { + pub cluster_ptr: PrimalClusterPtr, + pub affinity: Affinity, +} + +impl PartialEq for ClusterAffinity { + fn eq(&self, other: &Self) -> bool { + self.affinity == other.affinity && self.cluster_ptr.eq(&other.cluster_ptr) + } +} + +// first sort by affinity in descending order, then by cluster_index in ascending order +impl Ord for ClusterAffinity { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // First, compare affinity in descending order + match other.affinity.cmp(&self.affinity) { + std::cmp::Ordering::Equal => { + // If affinities are equal, compare cluster_index in ascending order + self.cluster_ptr.read_recursive().cluster_index.cmp(&other.cluster_ptr.read_recursive().cluster_index) + } + other => other, + } + } +} + +impl PartialOrd for ClusterAffinity { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -60,9 +110,11 @@ pub mod primal_serial_default_configs { #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub enum GrowingStrategy { /// focus on a single cluster at a time, for easier debugging and better locality - SingleCluster, + SingleCluster, // Question: Should this be deprecated? /// all clusters grow at the same time at the same speed MultipleClusters, + /// utilizing the search/tune mode separation + ModeBased, } pub struct PrimalModuleSerialNode { @@ -75,15 +127,27 @@ pub struct PrimalModuleSerialNode { pub type PrimalModuleSerialNodePtr = ArcRwLock; pub type PrimalModuleSerialNodeWeak = WeakRwLock; +impl std::fmt::Debug for PrimalModuleSerialNodePtr { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let primal_node = self.read_recursive(); // reading index is consistent + write!( + f, + "dual_node_ptr: {:?}\ncluster_index: {:?}", + primal_node.dual_node_ptr, + primal_node.cluster_weak.upgrade_force().read_recursive().cluster_index, + ) + } +} + pub struct PrimalCluster { /// the index in the cluster pub cluster_index: NodeIndex, /// the nodes that belongs to this cluster pub nodes: Vec, /// all the edges ever exists in any hair - pub edges: BTreeSet, + pub edges: BTreeSet, /// all the vertices ever touched by any tight edge - pub vertices: BTreeSet, + pub vertices: BTreeSet, /// the parity matrix to determine whether it's a valid cluster and also find new ways to increase the dual pub matrix: EchelonMatrix, /// the parity subgraph result, only valid when it's solved @@ -92,11 +156,47 @@ pub struct PrimalCluster { pub plugin_manager: PluginManager, /// optimizing the direction of relaxers pub relaxer_optimizer: RelaxerOptimizer, + /// HIHGS solution stored for incrmental lp + #[cfg(feature = "incr_lp")] //note: really depends where we want the error to manifest + pub incr_solution: Option>>, } pub type PrimalClusterPtr = ArcRwLock; pub type PrimalClusterWeak = WeakRwLock; +impl std::fmt::Debug for PrimalClusterPtr { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let cluster = self.read_recursive(); // reading index is consistent + write!( + f, + "cluster_index: {:?}\tnodes: {:?}\tedges: {:?}\nvertices: {:?}\nsubgraph: {:?}", + cluster.cluster_index, + cluster.nodes, + cluster.edges, + cluster.vertices, + cluster.subgraph, + ) + } +} + + +impl Ord for PrimalClusterPtr { + fn cmp(&self, other: &Self) -> Ordering { + // compare the pointer address + let ptr1 = Arc::as_ptr(self.ptr()); + let ptr2 = Arc::as_ptr(other.ptr()); + // https://doc.rust-lang.org/reference/types/pointer.html + // "When comparing raw pointers they are compared by their address, rather than by what they point to." + ptr1.cmp(&ptr2) + } +} + +impl PartialOrd for PrimalClusterPtr { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + impl PrimalModuleImpl for PrimalModuleSerial { fn new_empty(_initializer: &SolverInitializer) -> Self { Self { @@ -109,6 +209,7 @@ impl PrimalModuleImpl for PrimalModuleSerial { plugin_pending_clusters: vec![], config: serde_json::from_value(json!({})).unwrap(), time_resolve: 0., + sorted_clusters_aff: None, } } @@ -124,6 +225,7 @@ impl PrimalModuleImpl for PrimalModuleSerial { #[allow(clippy::unnecessary_cast)] fn load(&mut self, interface_ptr: &DualModuleInterfacePtr, dual_module: &mut D) { let interface = interface_ptr.read_recursive(); + // println!("interface.nodes len: {:?}", interface.nodes.len()); for index in 0..interface.nodes.len() as NodeIndex { let dual_node_ptr = &interface.nodes[index as usize]; let node = dual_node_ptr.read_recursive(); @@ -146,17 +248,23 @@ impl PrimalModuleImpl for PrimalModuleSerial { nodes: vec![], edges: node.invalid_subgraph.hair.clone(), vertices: node.invalid_subgraph.vertices.clone(), - matrix: node.invalid_subgraph.generate_matrix(&interface.decoding_graph), + matrix: node.invalid_subgraph.generate_matrix(), subgraph: None, plugin_manager: PluginManager::new(self.plugins.clone(), self.plugin_count.clone()), relaxer_optimizer: RelaxerOptimizer::new(), + #[cfg(all(feature = "incr_lp", feature = "highs"))] + incr_solution: None, }); // create the primal node of this defect node and insert into cluster let primal_node_ptr = PrimalModuleSerialNodePtr::new_value(PrimalModuleSerialNode { dual_node_ptr: dual_node_ptr.clone(), cluster_weak: primal_cluster_ptr.downgrade(), }); + drop(node); primal_cluster_ptr.write().nodes.push(primal_node_ptr.clone()); + // fill in the primal_module_serial_node in the corresponding dual node + dual_node_ptr.write().primal_module_serial_node = Some(primal_node_ptr.clone().downgrade()); + // add to self self.nodes.push(primal_node_ptr); self.clusters.push(primal_cluster_ptr); @@ -168,6 +276,7 @@ impl PrimalModuleImpl for PrimalModuleSerial { self.pending_nodes.push_back(primal_node_ptr.downgrade()); } } + } fn resolve( @@ -175,39 +284,459 @@ impl PrimalModuleImpl for PrimalModuleSerial { group_max_update_length: GroupMaxUpdateLength, interface_ptr: &DualModuleInterfacePtr, dual_module: &mut impl DualModuleImpl, - ) { + ) -> bool { + let begin = Instant::now(); + let res = self.resolve_core(group_max_update_length, interface_ptr, dual_module); + self.time_resolve += begin.elapsed().as_secs_f64(); + res + } + + fn old_resolve( + &mut self, + group_max_update_length: GroupMaxUpdateLength, + interface_ptr: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + ) -> bool { + let begin = Instant::now(); + let res = self.old_resolve_core(group_max_update_length, interface_ptr, dual_module); + self.time_resolve += begin.elapsed().as_secs_f64(); + res + } + + fn resolve_tune( + &mut self, + group_max_update_length: BTreeSet, + interface_ptr: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + ) -> (BTreeSet, bool) { let begin = Instant::now(); - self.resolve_core(group_max_update_length, interface_ptr, dual_module); + let res = self.resolve_core_tune(group_max_update_length, interface_ptr, dual_module); self.time_resolve += begin.elapsed().as_secs_f64(); + res } - fn subgraph(&mut self, _interface: &DualModuleInterfacePtr, _dual_module: &mut impl DualModuleImpl) -> Subgraph { + fn subgraph( + &mut self, + _interface: &DualModuleInterfacePtr, + seed: u64, + ) -> Subgraph { + let mut subgraph = vec![]; for cluster_ptr in self.clusters.iter() { let cluster = cluster_ptr.read_recursive(); if cluster.nodes.is_empty() { continue; } + // println!("cluster.subgraph: {:?}", cluster.subgraph); + // println!("cluster: {:?}", cluster_ptr); + subgraph.extend( cluster .subgraph .clone() - .expect("bug occurs: cluster should be solved, but the subgraph is not yet generated") - .iter(), + .unwrap_or_else(|| panic!("bug occurs: cluster should be solved, but the subgraph is not yet generated || the seed is {seed:?}")), ); + + } + // println!("subgraph: {:?}", subgraph); subgraph } + + /// check if there are more plugins to be applied + /// will return false if timeout has been reached, else consume a plugin + fn has_more_plugins(&mut self) -> bool { + if self.time_resolve > self.config.timeout { + return false; + } + return if *self.plugin_count.read_recursive() < self.plugins.len() { + // increment the plugin count + *self.plugin_count.write() += 1; + // self.plugin_pending_clusters = (0..self.clusters.len()).collect(); + self.plugin_pending_clusters = self.clusters.iter().map(|c| c.downgrade()).collect(); + true + } else { + false + }; + } + + /// get the pending clusters + fn pending_clusters(&mut self) -> Vec { + self.plugin_pending_clusters.clone() + } + + // TODO: extract duplicate codes + + /// analyze a cluster and return whether there exists an optimal solution (depending on optimization levels) + #[allow(clippy::unnecessary_cast)] + fn resolve_cluster( + &mut self, + cluster_ptr: &PrimalClusterPtr, + interface_ptr: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + ) -> bool { + // cprintln!("resolver cluster"); + // cprintln!("This a green and bold text."); + + // let cluster_ptr = self.clusters[cluster_index as usize].clone(); + let mut cluster = cluster_ptr.write(); + if cluster.nodes.is_empty() { + return true; // no longer a cluster, no need to handle + } + // set all nodes to stop growing in the cluster + for primal_node_ptr in cluster.nodes.iter() { + let dual_node_ptr = primal_node_ptr.read_recursive().dual_node_ptr.clone(); + dual_module.set_grow_rate(&dual_node_ptr, Rational::zero()); + } + // update the matrix with new tight edges + let cluster = &mut *cluster; + for edge_weak in cluster.edges.iter() { + // println!("{:?} cluster edge: {:?}", i, edge_weak.read_recursive().edge_index); + cluster + .matrix + .update_edge_tightness(edge_weak.downgrade(), dual_module.is_edge_tight(edge_weak.clone())); + } + + // find an executable relaxer from the plugin manager + let relaxer = { + let positive_dual_variables: Vec = cluster + .nodes + .iter() + .map(|p| p.read_recursive().dual_node_ptr.clone()) + .filter(|dual_node_ptr| !dual_node_ptr.read_recursive().get_dual_variable().is_zero()) + .collect(); + let cluster_mut = &mut *cluster; // must first get mutable reference + let plugin_manager = &mut cluster_mut.plugin_manager; + let matrix = &mut cluster_mut.matrix; + plugin_manager.find_relaxer( matrix, &positive_dual_variables) + }; + + // if a relaxer is found, execute it and return + if let Some(relaxer) = relaxer { + for (invalid_subgraph, grow_rate) in relaxer.get_direction() { + let (existing, dual_node_ptr) = interface_ptr.find_or_create_node(invalid_subgraph, dual_module); + if !existing { + // create the corresponding primal node and add it to cluster + let primal_node_ptr = PrimalModuleSerialNodePtr::new_value(PrimalModuleSerialNode { + dual_node_ptr: dual_node_ptr.clone(), + cluster_weak: cluster_ptr.downgrade(), + }); + cluster.nodes.push(primal_node_ptr.clone()); + self.nodes.push(primal_node_ptr.clone()); + dual_node_ptr.write().primal_module_serial_node = Some(primal_node_ptr.downgrade()); + } + + dual_module.set_grow_rate(&dual_node_ptr, grow_rate.clone()); + } + cluster.relaxer_optimizer.insert(relaxer); + return false; + } + + // TODO idea: plugins can suggest subgraph (ideally, a global maximum), if so, then it will adopt th + // subgraph with minimum weight from all plugins as the starting point to do local minimum + + // find a local minimum (hopefully a global minimum) + let weight_of = |edge_weak: EdgeWeak| edge_weak.upgrade_force().read_recursive().weight; + cluster.subgraph = Some(cluster.matrix.get_solution_local_minimum(weight_of).expect("satisfiable")); + true + } + + /// analyze a cluster and return whether there exists an optimal solution (depending on optimization levels) + #[allow(clippy::unnecessary_cast)] + fn resolve_cluster_tune( + &mut self, + cluster_ptr: &PrimalClusterPtr, + interface_ptr: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + // dual_node_deltas: &mut BTreeMap, + dual_node_deltas: &mut BTreeMap, + ) -> (bool, OptimizerResult) { + let mut optimizer_result = OptimizerResult::default(); + // let cluster_ptr = self.clusters[cluster_index as usize].clone(); + let mut cluster = cluster_ptr.write(); + if cluster.nodes.is_empty() { + return (true, optimizer_result); // no longer a cluster, no need to handle + } + // update the matrix with new tight edges + let cluster = &mut *cluster; + for edge_index in cluster.edges.iter() { + cluster + .matrix + .update_edge_tightness(edge_index.downgrade(), dual_module.is_edge_tight_tune(edge_index.clone())); + } + + // find an executable relaxer from the plugin manager + let relaxer = { + let positive_dual_variables: Vec = cluster + .nodes + .iter() + .map(|p| p.read_recursive().dual_node_ptr.clone()) + .filter(|dual_node_ptr| !dual_node_ptr.read_recursive().dual_variable_at_last_updated_time.is_zero()) + .collect(); + let cluster_mut = &mut *cluster; // must first get mutable reference + let plugin_manager = &mut cluster_mut.plugin_manager; + let matrix = &mut cluster_mut.matrix; + plugin_manager.find_relaxer( matrix, &positive_dual_variables) + }; + + // if a relaxer is found, execute it and return + if let Some(mut relaxer) = relaxer { + #[cfg(feature = "float_lp")] + // float_lp is enabled, optimizer really plays a role + if cluster.relaxer_optimizer.should_optimize(&relaxer) { + #[cfg(not(feature = "incr_lp"))] + { + let dual_variables: BTreeMap, Rational> = cluster + .nodes + .iter() + .map(|primal_node_ptr| { + let primal_node = primal_node_ptr.read_recursive(); + let dual_node = primal_node.dual_node_ptr.read_recursive(); + ( + dual_node.invalid_subgraph.clone(), + dual_node.dual_variable_at_last_updated_time.clone(), + ) + }) + .collect(); + let edge_slacks: BTreeMap = dual_variables + .keys() + .flat_map(|invalid_subgraph: &Arc| invalid_subgraph.hair.iter()) + .chain( + relaxer + .get_direction() + .keys() + .flat_map(|invalid_subgraph| invalid_subgraph.hair.iter()), + ) + .unique() + .map(|edge_index| (edge_index.clone(), dual_module.get_edge_slack_tune(edge_index.clone()))) + .collect(); + let (new_relaxer, early_returned) = + cluster.relaxer_optimizer.optimize(relaxer, edge_slacks, dual_variables); + relaxer = new_relaxer; + if early_returned { + optimizer_result = OptimizerResult::EarlyReturned; + } else { + optimizer_result = OptimizerResult::Optimized; + } + } + + #[cfg(feature = "incr_lp")] + { + let mut dual_variables: BTreeMap, Rational)> = BTreeMap::new(); + let mut participating_dual_variable_indices = hashbrown::HashSet::new(); + for primal_node_ptr in cluster.nodes.iter() { + let primal_node = primal_node_ptr.read_recursive(); + let dual_node = primal_node.dual_node_ptr.read_recursive(); + dual_variables.insert( + dual_node.index, + ( + dual_node.invalid_subgraph.clone(), + dual_node.dual_variable_at_last_updated_time, + ), + ); + participating_dual_variable_indices.insert(dual_node.index); + } + + for (invalid_subgraph, _) in relaxer.get_direction().iter() { + let (existing, dual_node_ptr) = + interface_ptr.find_or_create_node_tune(invalid_subgraph, dual_module); + if !existing { + // create the corresponding primal node and add it to cluster + let primal_node_ptr = PrimalModuleSerialNodePtr::new_value(PrimalModuleSerialNode { + dual_node_ptr: dual_node_ptr.clone(), + cluster_weak: cluster_ptr.downgrade(), + }); + cluster.nodes.push(primal_node_ptr.clone()); + self.nodes.push(primal_node_ptr); + // participating_dual_variable_indices.insert(dual_node_ptr.read_recursive().index); + + // maybe optimize here + } + match dual_variables.get_mut(&dual_node_ptr.read_recursive().index) { + Some(_) => {} + None => { + dual_variables.insert( + dual_node_ptr.read_recursive().index, + ( + dual_node_ptr.read_recursive().invalid_subgraph.clone(), + dual_node_ptr.read_recursive().dual_variable_at_last_updated_time, + ), + ); + } + }; + } + let edge_free_weights: BTreeMap = dual_variables + .values() + .flat_map(|(invalid_subgraph, _)| invalid_subgraph.hair.iter().cloned()) + .chain( + relaxer + .get_direction() + .keys() + .flat_map(|invalid_subgraph| invalid_subgraph.hair.iter().cloned()), + ) + .unique() + .map(|edge_index| { + ( + edge_index, + // dual_module.get_edge_free_weight(edge_index, &participating_dual_variable_indices), + dual_module.get_edge_free_weight_cluster(edge_index, cluster_index), + ) + }) + .collect(); + + let (new_relaxer, early_returned) = cluster.relaxer_optimizer.optimize_incr( + relaxer, + edge_free_weights, + dual_variables, + &mut cluster.incr_solution, + ); + relaxer = new_relaxer; + if early_returned { + optimizer_result = OptimizerResult::EarlyReturned; + } else { + optimizer_result = OptimizerResult::Optimized; + } + } + } else { + optimizer_result = OptimizerResult::Skipped; + } + + #[cfg(not(feature = "float_lp"))] + // with rationals, it is actually usually better when always optimized + { + let dual_variables: BTreeMap, Rational> = cluster + .nodes + .iter() + .map(|primal_node_ptr| { + let primal_node = primal_node_ptr.read_recursive(); + let dual_node = primal_node.dual_node_ptr.read_recursive(); + ( + dual_node.invalid_subgraph.clone(), + dual_node.dual_variable_at_last_updated_time.clone(), + ) + }) + .collect(); + let edge_slacks: BTreeMap = dual_variables + .keys() + .flat_map(|invalid_subgraph: &Arc| invalid_subgraph.hair.iter().cloned()) + .chain( + relaxer + .get_direction() + .keys() + .flat_map(|invalid_subgraph| invalid_subgraph.hair.iter().cloned()), + ) + .unique() + .map(|edge_index| (edge_index, dual_module.get_edge_slack_tune(edge_index))) + .collect(); + + let (new_relaxer, early_returned) = cluster.relaxer_optimizer.optimize(relaxer, edge_slacks, dual_variables); + relaxer = new_relaxer; + if early_returned { + optimizer_result = OptimizerResult::EarlyReturned; + } else { + optimizer_result = OptimizerResult::Optimized; + } + } + + for (invalid_subgraph, grow_rate) in relaxer.get_direction() { + let (existing, dual_node_ptr) = interface_ptr.find_or_create_node_tune(invalid_subgraph, dual_module); + if !existing { + // create the corresponding primal node and add it to cluster + let primal_node_ptr = PrimalModuleSerialNodePtr::new_value(PrimalModuleSerialNode { + dual_node_ptr: dual_node_ptr.clone(), + cluster_weak: cluster_ptr.downgrade(), + }); + cluster.nodes.push(primal_node_ptr.clone()); + self.nodes.push(primal_node_ptr.clone()); + dual_node_ptr.write().primal_module_serial_node = Some(primal_node_ptr.downgrade()); + } + + // Document the desired deltas + let index = dual_node_ptr.read_recursive().index; + dual_node_deltas.insert( + OrderedDualNodePtr::new(index, dual_node_ptr), + (grow_rate.clone(), cluster_ptr.clone()), + ); + } + + cluster.relaxer_optimizer.insert(relaxer); + return (false, optimizer_result); + } + + // find a local minimum (hopefully a global minimum) + // let interface = interface_ptr.read_recursive(); + // let initializer = interface.decoding_graph.model_graph.initializer.as_ref(); + // let weight_of = |edge_index: EdgeIndex| initializer.weighted_edges[edge_index].weight; + let weight_of = |edge_weak: EdgeWeak| edge_weak.upgrade_force().read_recursive().weight; + cluster.subgraph = Some(cluster.matrix.get_solution_local_minimum(weight_of).expect("satisfiable")); + + (true, optimizer_result) + } + + /// update the sorted clusters_aff, should be None to start with + fn update_sorted_clusters_aff(&mut self, dual_module: &mut D) { + let pending_clusters = self.pending_clusters(); + let mut sorted_clusters_aff = BTreeSet::default(); + + for cluster_index in pending_clusters.iter() { + // let cluster_ptr = self.clusters[*cluster_index].clone(); + let cluster_ptr = cluster_index.upgrade_force(); + let affinity = dual_module.calculate_cluster_affinity(cluster_ptr.clone()); + if let Some(affinity) = affinity { + sorted_clusters_aff.insert(ClusterAffinity { + cluster_ptr: cluster_ptr.clone(), + affinity, + }); + } + } + self.sorted_clusters_aff = Some(sorted_clusters_aff); + } + + /// consume the sorted_clusters_aff + fn get_sorted_clusters_aff(&mut self) -> BTreeSet { + self.sorted_clusters_aff.take().unwrap() + } + + #[cfg(feature = "incr_lp")] + fn calculate_edges_free_weight_clusters(&mut self, dual_module: &mut impl DualModuleImpl) { + for cluster in self.clusters.iter() { + let cluster = cluster.read_recursive(); + for node in cluster.nodes.iter() { + let dual_node = node.read_recursive(); + let dual_node_read = dual_node.dual_node_ptr.read_recursive(); + for edge_index in dual_node_read.invalid_subgraph.hair.iter() { + dual_module.update_edge_cluster_weights( + *edge_index, + cluster.cluster_index, + dual_node_read.dual_variable_at_last_updated_time, + ); + } + } + } + } } impl PrimalModuleSerial { // union the cluster of two dual nodes #[allow(clippy::unnecessary_cast)] - pub fn union(&self, dual_node_ptr_1: &DualNodePtr, dual_node_ptr_2: &DualNodePtr, decoding_graph: &DecodingHyperGraph) { - let node_index_1 = dual_node_ptr_1.read_recursive().index; - let node_index_2 = dual_node_ptr_2.read_recursive().index; - let primal_node_1 = self.nodes[node_index_1 as usize].read_recursive(); - let primal_node_2 = self.nodes[node_index_2 as usize].read_recursive(); + pub fn union( + &self, + dual_node_ptr_1: &DualNodePtr, + dual_node_ptr_2: &DualNodePtr, + dual_module: &mut impl DualModuleImpl, // note: remove if not for cluster-based + ) { + // cluster_1 will become the union of cluster_1 and cluster_2 + // and cluster_2 will be outdated + // let node_index_1 = dual_node_ptr_1.read_recursive().index; + // let node_index_2 = dual_node_ptr_2.read_recursive().index; + // let primal_node_1 = self.nodes[node_index_1 as usize].read_recursive(); + // let primal_node_2 = self.nodes[node_index_2 as usize].read_recursive(); + let primal_node_1_weak = dual_node_ptr_1.read_recursive().primal_module_serial_node.clone().unwrap(); + let primal_node_2_weak = dual_node_ptr_2.read_recursive().primal_module_serial_node.clone().unwrap(); + let primal_node_1_ptr = primal_node_1_weak.upgrade_force(); + let primal_node_2_ptr = primal_node_2_weak.upgrade_force(); + let primal_node_1 = primal_node_1_ptr.read_recursive(); + let primal_node_2 = primal_node_2_ptr.read_recursive(); if primal_node_1.cluster_weak.ptr_eq(&primal_node_2.cluster_weak) { return; // already in the same cluster } @@ -217,18 +746,49 @@ impl PrimalModuleSerial { drop(primal_node_2); let mut cluster_1 = cluster_ptr_1.write(); let mut cluster_2 = cluster_ptr_2.write(); + let cluster_2_index = cluster_2.cluster_index; for primal_node_ptr in cluster_2.nodes.drain(..) { + #[cfg(feature = "incr_lp")] + { + let primal_node = primal_node_ptr.read_recursive(); + dual_module.update_edge_cluster_weights_union( + &primal_node.dual_node_ptr, + cluster_2_index, + cluster_1.cluster_index, + ); + } + primal_node_ptr.write().cluster_weak = cluster_ptr_1.downgrade(); cluster_1.nodes.push(primal_node_ptr); } - cluster_1.edges.append(&mut cluster_2.edges); + cluster_1.edges.extend(&mut cluster_2.edges.clone().into_iter()); cluster_1.subgraph = None; // mark as no subgraph - for &vertex_index in cluster_2.vertices.iter() { - if !cluster_1.vertices.contains(&vertex_index) { - cluster_1.vertices.insert(vertex_index); - let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); - let parity = decoding_graph.is_vertex_defect(vertex_index); - cluster_1.matrix.add_constraint(vertex_index, incident_edges, parity); + + #[cfg(all(feature = "incr_lp", feature = "highs"))] + match (&cluster_1.incr_solution, &cluster_2.incr_solution) { + (None, Some(_)) => { + cluster_1.incr_solution = cluster_2.incr_solution.take(); + } + (Some(c1), Some(c2)) => { + if c2.lock().constraints_len() > c1.lock().constraints_len() { + cluster_1.incr_solution = cluster_2.incr_solution.take(); + } + } + + // no need to changes + (None, None) => {} + (Some(_), None) => {} + } + + for vertex_ptr in cluster_2.vertices.iter() { + if !cluster_1.vertices.contains(&vertex_ptr.clone()) { + cluster_1.vertices.insert(vertex_ptr.clone()); + // let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); + // let parity = decoding_graph.is_vertex_defect(vertex_index); + // let incident_edges = &vertex_ptr.read_recursive().edges; + let incident_edges = &vertex_ptr.get_edge_neighbors(); + let parity = vertex_ptr.read_recursive().is_defect; + cluster_1.matrix.add_constraint(vertex_ptr.downgrade(), incident_edges, parity); } } cluster_1.relaxer_optimizer.append(&mut cluster_2.relaxer_optimizer); @@ -241,16 +801,17 @@ impl PrimalModuleSerial { mut group_max_update_length: GroupMaxUpdateLength, interface_ptr: &DualModuleInterfacePtr, dual_module: &mut impl DualModuleImpl, - ) { + ) -> bool { debug_assert!(!group_max_update_length.is_unbounded() && group_max_update_length.get_valid_growth().is_none()); - let mut active_clusters = BTreeSet::::new(); + let mut active_clusters = BTreeSet::::new(); let interface = interface_ptr.read_recursive(); - let decoding_graph = &interface.decoding_graph; + // println!("in resolve core"); while let Some(conflict) = group_max_update_length.pop() { match conflict { - MaxUpdateLength::Conflicting(edge_index) => { + MaxUpdateLength::Conflicting(edge_ptr) => { // union all the dual nodes in the edge index and create new dual node by adding this edge to `internal_edges` - let dual_nodes = dual_module.get_edge_nodes(edge_index); + // println!("conflict edge_ptr: {:?}", edge_ptr); + let dual_nodes = dual_module.get_edge_nodes(edge_ptr.clone()); debug_assert!( !dual_nodes.is_empty(), "should not conflict if no dual nodes are contributing" @@ -258,7 +819,96 @@ impl PrimalModuleSerial { let dual_node_ptr_0 = &dual_nodes[0]; // first union all the dual nodes for dual_node_ptr in dual_nodes.iter().skip(1) { - self.union(dual_node_ptr_0, dual_node_ptr, &interface.decoding_graph); + self.union(dual_node_ptr_0, dual_node_ptr, dual_module); + } + let primal_node_weak = dual_node_ptr_0.read_recursive().primal_module_serial_node.clone().unwrap(); + let cluster_ptr = primal_node_weak.upgrade_force().read_recursive().cluster_weak.upgrade_force(); + // let cluster_ptr = self.nodes[dual_node_ptr_0.read_recursive().index as usize] + // .read_recursive() + // .cluster_weak + // .upgrade_force(); + let mut cluster = cluster_ptr.write(); + // then add new constraints because these edges may touch new vertices + let incident_vertices = &edge_ptr.get_vertex_neighbors(); + // println!("incidenet_vertices: {:?}", incident_vertices); + // println!("cluster matrix before add constraint: {:?}", cluster.matrix.printstd()); + for vertex_weak in incident_vertices.iter() { + // println!("incident vertex: {:?}", vertex_weak.upgrade_force().read_recursive().vertex_index); + if !cluster.vertices.contains(&vertex_weak.upgrade_force()) { + cluster.vertices.insert(vertex_weak.upgrade_force()); + let vertex_ptr = vertex_weak.upgrade_force(); + let vertex = vertex_ptr.read_recursive(); + let incident_edges = &vertex_ptr.get_edge_neighbors(); + // println!("vertex {:?}, fusion_done: {:?}, is_mirror: {:?}, incident_edges: {:?}", vertex_ptr.read_recursive().vertex_index, + // vertex_ptr.read_recursive().fusion_done, vertex_ptr.read_recursive().is_mirror, incident_edges); + let parity = vertex.is_defect; + + cluster.matrix.add_constraint(vertex_weak.clone(), &incident_edges, parity); + } + } + // println!("cluster matrix after add constraint: {:?}", cluster.matrix.printstd()); + cluster.edges.insert(edge_ptr.clone()); + // add to active cluster so that it's processed later + active_clusters.insert(cluster_ptr.clone()); + } + MaxUpdateLength::ShrinkProhibited(dual_node_ptr) => { + let primal_node_weak = dual_node_ptr.ptr.read_recursive().primal_module_serial_node.clone().unwrap(); + let cluster_ptr = primal_node_weak.upgrade_force().read_recursive().cluster_weak.upgrade_force(); + // let cluster_ptr = self.nodes[dual_node_ptr.index as usize] + // .read_recursive() + // .cluster_weak + // .upgrade_force(); + // let cluster_index = cluster_ptr.read_recursive().cluster_index; + active_clusters.insert(cluster_ptr.clone()); + } + _ => { + unreachable!() + } + } + } + drop(interface); + if *self.plugin_count.read_recursive() != 0 && self.time_resolve > self.config.timeout { + *self.plugin_count.write() = 0; // force only the first plugin + } + let mut all_solved = true; + for cluster_ptr in active_clusters.iter() { + // println!("active cluster index: {:?}", cluster_ptr.read_recursive().cluster_index); + let solved = self.resolve_cluster(cluster_ptr, interface_ptr, dual_module); + all_solved &= solved; + } + if !all_solved { + return false; // already give dual module something to do + } + + true + } + + #[allow(clippy::unnecessary_cast)] + /// for backwards-compatibility + fn old_resolve_core( + &mut self, + mut group_max_update_length: GroupMaxUpdateLength, + interface_ptr: &DualModuleInterfacePtr, + dual_module: &mut impl DualModuleImpl, + ) -> bool { + debug_assert!(!group_max_update_length.is_unbounded() && group_max_update_length.get_valid_growth().is_none()); + let mut active_clusters = BTreeSet::::new(); + let interface = interface_ptr.read_recursive(); + while let Some(conflict) = group_max_update_length.pop() { + match conflict { + MaxUpdateLength::Conflicting(edge_ptr) => { + // union all the dual nodes in the edge index and create new dual node by adding this edge to `internal_edges` + let dual_nodes = dual_module.get_edge_nodes(edge_ptr.clone()); + debug_assert!( + !dual_nodes.is_empty(), + "should not conflict if no dual nodes are contributing" + ); + let dual_node_ptr_0 = &dual_nodes[0]; + // first union all the dual nodes + for dual_node_ptr in dual_nodes.iter().skip(1) { + println!("iiii"); + // self.union(dual_node_ptr_0, dual_node_ptr, &interface.decoding_graph); + self.union(dual_node_ptr_0, dual_node_ptr, dual_module); } let cluster_ptr = self.nodes[dual_node_ptr_0.read_recursive().index as usize] .read_recursive() @@ -266,26 +916,31 @@ impl PrimalModuleSerial { .upgrade_force(); let mut cluster = cluster_ptr.write(); // then add new constraints because these edges may touch new vertices - let incident_vertices = decoding_graph.get_edge_neighbors(edge_index); - for &vertex_index in incident_vertices.iter() { - if !cluster.vertices.contains(&vertex_index) { - cluster.vertices.insert(vertex_index); - let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); - let parity = decoding_graph.is_vertex_defect(vertex_index); - cluster.matrix.add_constraint(vertex_index, incident_edges, parity); + // let incident_vertices = decoding_graph.get_edge_neighbors(edge_index); + let incident_vertices = &edge_ptr.read_recursive().vertices; + for vertex_weak in incident_vertices.iter() { + if !cluster.vertices.contains(&vertex_weak.upgrade_force()) { + cluster.vertices.insert(vertex_weak.upgrade_force()); + // let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); + // let parity = decoding_graph.is_vertex_defect(vertex_index); + let vertex_ptr = vertex_weak.upgrade_force(); + let vertex = vertex_ptr.read_recursive(); + let incident_edges = &vertex.edges; + let parity = vertex.is_defect; + cluster.matrix.add_constraint(vertex_weak.clone(), incident_edges, parity); } } - cluster.edges.insert(edge_index); + cluster.edges.insert(edge_ptr.clone()); // add to active cluster so that it's processed later - active_clusters.insert(cluster.cluster_index); + active_clusters.insert(cluster_ptr.clone()); } MaxUpdateLength::ShrinkProhibited(dual_node_ptr) => { - let cluster_ptr = self.nodes[dual_node_ptr.read_recursive().index as usize] + let cluster_ptr = self.nodes[dual_node_ptr.index as usize] .read_recursive() .cluster_weak .upgrade_force(); - let cluster_index = cluster_ptr.read_recursive().cluster_index; - active_clusters.insert(cluster_index); + // let cluster_index = cluster_ptr.read_recursive().cluster_index; + active_clusters.insert(cluster_ptr.clone()); } _ => { unreachable!() @@ -297,12 +952,12 @@ impl PrimalModuleSerial { *self.plugin_count.write() = 0; // force only the first plugin } let mut all_solved = true; - for &cluster_index in active_clusters.iter() { + for cluster_index in active_clusters.iter() { let solved = self.resolve_cluster(cluster_index, interface_ptr, dual_module); all_solved &= solved; } if !all_solved { - return; // already give dual module something to do + return false; // already give dual module something to do } while !self.pending_nodes.is_empty() { let primal_node_weak = self.pending_nodes.pop_front().unwrap(); @@ -311,122 +966,269 @@ impl PrimalModuleSerial { let cluster_ptr = primal_node.cluster_weak.upgrade_force(); if cluster_ptr.read_recursive().subgraph.is_none() { dual_module.set_grow_rate(&primal_node.dual_node_ptr, Rational::one()); - return; // let the dual module to find more obstacles + return false; // let the dual module to find more obstacles } } if *self.plugin_count.read_recursive() == 0 { - return; + return true; } // check that all clusters have passed the plugins loop { while let Some(cluster_index) = self.plugin_pending_clusters.pop() { - let solved = self.resolve_cluster(cluster_index, interface_ptr, dual_module); + let solved = self.resolve_cluster(&cluster_index.upgrade_force(), interface_ptr, dual_module); if !solved { - return; // let the dual module to handle one + return false; // let the dual module to handle one } } if *self.plugin_count.read_recursive() < self.plugins.len() { // increment the plugin count *self.plugin_count.write() += 1; - self.plugin_pending_clusters = (0..self.clusters.len()).collect(); + // self.plugin_pending_clusters = (0..self.clusters.len()).collect(); + self.plugin_pending_clusters = self.clusters.iter().map(|c| c.downgrade()).collect(); } else { break; // nothing more to check } } + true } - /// analyze a cluster and return whether there exists an optimal solution (depending on optimization levels) #[allow(clippy::unnecessary_cast)] - fn resolve_cluster( + // returns (conflicts_needing_to_be_resolved, should_grow) + fn resolve_core_tune( &mut self, - cluster_index: NodeIndex, + group_max_update_length: BTreeSet, interface_ptr: &DualModuleInterfacePtr, dual_module: &mut impl DualModuleImpl, - ) -> bool { - let cluster_ptr = self.clusters[cluster_index as usize].clone(); - let mut cluster = cluster_ptr.write(); - if cluster.nodes.is_empty() { - return true; // no longer a cluster, no need to handle + ) -> (BTreeSet, bool) { + let mut active_clusters = BTreeSet::::new(); + let interface = interface_ptr.read_recursive(); + for conflict in group_max_update_length.into_iter() { + match conflict { + MaxUpdateLength::Conflicting(edge_ptr) => { + // union all the dual nodes in the edge index and create new dual node by adding this edge to `internal_edges` + let dual_nodes = dual_module.get_edge_nodes(edge_ptr.clone()); + debug_assert!( + !dual_nodes.is_empty(), + "should not conflict if no dual nodes are contributing" + ); + let dual_node_ptr_0 = &dual_nodes[0]; + // first union all the dual nodes + for dual_node_ptr in dual_nodes.iter().skip(1) { + // self.union(dual_node_ptr_0, dual_node_ptr, &interface.decoding_graph); + self.union(dual_node_ptr_0, dual_node_ptr, dual_module); + } + let primal_node_weak = dual_node_ptr_0.read_recursive().primal_module_serial_node.clone().unwrap(); + let cluster_ptr = primal_node_weak.upgrade_force().read_recursive().cluster_weak.upgrade_force(); + // let cluster_ptr = self.nodes[dual_node_ptr_0.read_recursive().index as usize] + // .read_recursive() + // .cluster_weak + // .upgrade_force(); + let mut cluster = cluster_ptr.write(); + // then add new constraints because these edges may touch new vertices + // let incident_vertices = &edge_ptr.read_recursive().vertices; + let incident_vertices = &edge_ptr.get_vertex_neighbors(); + for vertex_weak in incident_vertices.iter() { + if !cluster.vertices.contains(&vertex_weak.upgrade_force()) { + cluster.vertices.insert(vertex_weak.upgrade_force()); + // let incident_edges = decoding_graph.get_vertex_neighbors(vertex_index); + // let parity = decoding_graph.is_vertex_defect(vertex_index); + let vertex_ptr = vertex_weak.upgrade_force(); + let vertex = vertex_ptr.read_recursive(); + // let incident_edges = &vertex.edges; + let incident_edges = &vertex_ptr.get_edge_neighbors(); + let parity = vertex.is_defect; + cluster.matrix.add_constraint(vertex_weak.clone(), incident_edges, parity); + } + } + cluster.edges.insert(edge_ptr.clone()); + // add to active cluster so that it's processed later + active_clusters.insert(cluster_ptr.clone()); + } + MaxUpdateLength::ShrinkProhibited(dual_node_ptr) => { + let primal_node_weak = dual_node_ptr.ptr.read_recursive().primal_module_serial_node.clone().unwrap(); + let cluster_ptr = primal_node_weak.upgrade_force().read_recursive().cluster_weak.upgrade_force(); + // let cluster_ptr = self.nodes[dual_node_ptr.index as usize] + // .read_recursive() + // .cluster_weak + // .upgrade_force(); + // let cluster_index = cluster_ptr.read_recursive().cluster_index; + active_clusters.insert(cluster_ptr.clone()); + } + _ => { + unreachable!() + } + } } - // set all nodes to stop growing in the cluster - for primal_node_ptr in cluster.nodes.iter() { - let dual_node_ptr = primal_node_ptr.read_recursive().dual_node_ptr.clone(); - dual_module.set_grow_rate(&dual_node_ptr, Rational::zero()); + drop(interface); + if *self.plugin_count.read_recursive() != 0 && self.time_resolve > self.config.timeout { + *self.plugin_count.write() = 0; // force only the first plugin } - // update the matrix with new tight edges - let cluster = &mut *cluster; - for &edge_index in cluster.edges.iter() { - cluster - .matrix - .update_edge_tightness(edge_index, dual_module.is_edge_tight(edge_index)); + let mut all_solved = true; + let mut dual_node_deltas = BTreeMap::new(); + let mut optimizer_result = OptimizerResult::default(); + for cluster_ptr in active_clusters.iter() { + let (solved, other) = + self.resolve_cluster_tune(cluster_ptr, interface_ptr, dual_module, &mut dual_node_deltas); + if !solved { + // todo: investigate more + return (dual_module.get_conflicts_tune(other, dual_node_deltas), false); + } + all_solved &= solved; + optimizer_result.or(other); } - // find an executable relaxer from the plugin manager - let relaxer = { - let positive_dual_variables: Vec = cluster - .nodes - .iter() - .map(|p| p.read_recursive().dual_node_ptr.clone()) - .filter(|dual_node_ptr| !dual_node_ptr.read_recursive().get_dual_variable().is_zero()) - .collect(); - let decoding_graph = &interface_ptr.read_recursive().decoding_graph; - let cluster_mut = &mut *cluster; // must first get mutable reference - let plugin_manager = &mut cluster_mut.plugin_manager; - let matrix = &mut cluster_mut.matrix; - plugin_manager.find_relaxer(decoding_graph, matrix, &positive_dual_variables) - }; + let all_conflicts = dual_module.get_conflicts_tune(optimizer_result, dual_node_deltas); - // if a relaxer is found, execute it and return - if let Some(mut relaxer) = relaxer { - if !cluster.plugin_manager.is_empty() && cluster.relaxer_optimizer.should_optimize(&relaxer) { - let dual_variables: BTreeMap, Rational> = cluster - .nodes - .iter() - .map(|primal_node_ptr| { - let primal_node = primal_node_ptr.read_recursive(); - let dual_node = primal_node.dual_node_ptr.read_recursive(); - (dual_node.invalid_subgraph.clone(), dual_node.get_dual_variable().clone()) - }) - .collect(); - let edge_slacks: BTreeMap = dual_variables - .keys() - .flat_map(|invalid_subgraph: &Arc| invalid_subgraph.hair.iter().cloned()) - .chain( - relaxer - .get_direction() - .keys() - .flat_map(|invalid_subgraph| invalid_subgraph.hair.iter().cloned()), - ) - .map(|edge_index| (edge_index, dual_module.get_edge_slack(edge_index))) - .collect(); - relaxer = cluster.relaxer_optimizer.optimize(relaxer, edge_slacks, dual_variables); - } - for (invalid_subgraph, grow_rate) in relaxer.get_direction() { - let (existing, dual_node_ptr) = interface_ptr.find_or_create_node(invalid_subgraph, dual_module); - if !existing { - // create the corresponding primal node and add it to cluster - let primal_node_ptr = PrimalModuleSerialNodePtr::new_value(PrimalModuleSerialNode { - dual_node_ptr: dual_node_ptr.clone(), - cluster_weak: cluster_ptr.downgrade(), - }); - cluster.nodes.push(primal_node_ptr.clone()); - self.nodes.push(primal_node_ptr); + (all_conflicts, all_solved) + } +} + + +impl PrimalModuleSerial { + // // for parallel + // #[allow(clippy::unnecessary_cast)] + // fn load_ptr( + // &mut self, + // interface_ptr: &DualModuleInterfacePtr, + // dual_module_ptr: &mut DualModuleParallelUnitPtr, + // ) where Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + // { + // let interface = interface_ptr.read_recursive(); + // println!("interface.nodes len: {:?}", interface.nodes.len()); + // for index in 0..interface.nodes.len() as NodeIndex { + // let dual_node_ptr = &interface.nodes[index as usize]; + // let node = dual_node_ptr.read_recursive(); + // debug_assert!( + // node.invalid_subgraph.edges.is_empty(), + // "must load a fresh dual module interface, found a complex node" + // ); + // debug_assert!( + // node.invalid_subgraph.vertices.len() == 1, + // "must load a fresh dual module interface, found invalid defect node" + // ); + // debug_assert_eq!( + // node.index, index, + // "must load a fresh dual module interface, found index out of order" + // ); + // assert_eq!(node.index as usize, self.nodes.len(), "must load defect nodes in order"); + // // construct cluster and its parity matrix (will be reused over all iterations) + // let primal_cluster_ptr = PrimalClusterPtr::new_value(PrimalCluster { + // cluster_index: self.clusters.len() as NodeIndex, + // nodes: vec![], + // edges: node.invalid_subgraph.hair.clone(), + // vertices: node.invalid_subgraph.vertices.clone(), + // matrix: node.invalid_subgraph.generate_matrix(), + // subgraph: None, + // plugin_manager: PluginManager::new(self.plugins.clone(), self.plugin_count.clone()), + // relaxer_optimizer: RelaxerOptimizer::new(), + // #[cfg(all(feature = "incr_lp", feature = "highs"))] + // incr_solution: None, + // }); + // // create the primal node of this defect node and insert into cluster + // let primal_node_ptr = PrimalModuleSerialNodePtr::new_value(PrimalModuleSerialNode { + // dual_node_ptr: dual_node_ptr.clone(), + // cluster_weak: primal_cluster_ptr.downgrade(), + // }); + // drop(node); + // primal_cluster_ptr.write().nodes.push(primal_node_ptr.clone()); + // // fill in the primal_module_serial_node in the corresponding dual node + // dual_node_ptr.write().primal_module_serial_node = Some(primal_node_ptr.clone().downgrade()); + + // // add to self + // self.nodes.push(primal_node_ptr); + // self.clusters.push(primal_cluster_ptr); + // } + // if matches!(self.growing_strategy, GrowingStrategy::SingleCluster) { + // for primal_node_ptr in self.nodes.iter().skip(1) { + // let dual_node_ptr = primal_node_ptr.read_recursive().dual_node_ptr.clone(); + // dual_module_ptr.write().set_grow_rate(&dual_node_ptr, Rational::zero()); + // self.pending_nodes.push_back(primal_node_ptr.downgrade()); + // } + // } + // } + + // for parallel + pub fn solve_step_callback_ptr( + &mut self, + interface: &DualModuleInterfacePtr, + syndrome_pattern: Arc, + dual_module_ptr: &mut DualModuleParallelUnitPtr, + callback: F, + ) where + F: FnMut(&DualModuleInterfacePtr, &DualModuleParallelUnit, &mut Self, &GroupMaxUpdateLength), + Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + // let mut dual_module = dual_module_ptr.write(); + // interface.load_ptr(syndrome_pattern, dual_module_ptr); + interface.load(syndrome_pattern, dual_module_ptr.write().deref_mut()); + self.load(interface, dual_module_ptr.write().deref_mut()); + // drop(dual_module); + self.solve_step_callback_interface_loaded_ptr(interface, dual_module_ptr, callback); + } + + + pub fn solve_step_callback_interface_loaded_ptr( + &mut self, + interface: &DualModuleInterfacePtr, + dual_module_ptr: &mut DualModuleParallelUnitPtr, + mut callback: F, + ) where + F: FnMut(&DualModuleInterfacePtr, &DualModuleParallelUnit, &mut Self, &GroupMaxUpdateLength), + Queue: FutureQueueMethods + Default + std::fmt::Debug + Send + Sync + Clone, + { + // println!(" in solve step callback interface loaded ptr"); + // Search, this part is unchanged + let mut group_max_update_length = dual_module_ptr.compute_maximum_update_length(); + // println!("first group max update length: {:?}", group_max_update_length); + + while !group_max_update_length.is_unbounded() { + callback(interface, &dual_module_ptr.read_recursive(), self, &group_max_update_length); + match group_max_update_length.get_valid_growth() { + Some(length) => dual_module_ptr.grow(length), + None => { + self.resolve(group_max_update_length, interface, dual_module_ptr.write().deref_mut()); } - dual_module.set_grow_rate(&dual_node_ptr, grow_rate.clone()); } - cluster.relaxer_optimizer.insert(relaxer); - return false; + group_max_update_length = dual_module_ptr.compute_maximum_update_length(); + // println!("group max update length: {:?}", group_max_update_length); } - // TODO idea: plugins can suggest subgraph (ideally, a global maximum), if so, then it will adopt th - // subgraph with minimum weight from all plugins as the starting point to do local minimum + // from here, all states should be syncronized + let mut start = true; - // find a local minimum (hopefully a global minimum) - let interface = interface_ptr.read_recursive(); - let initializer = interface.decoding_graph.model_graph.initializer.as_ref(); - let weight_of = |edge_index: EdgeIndex| initializer.weighted_edges[edge_index].weight; - cluster.subgraph = Some(cluster.matrix.get_solution_local_minimum(weight_of).expect("satisfiable")); - true + // starting with unbounded state here: All edges and nodes are not growing as of now + // Tune + let mut dual_module = dual_module_ptr.write(); + while self.has_more_plugins() { + // println!("self.has more plugins"); + // Note: intersting, seems these aren't needed... But just kept here in case of future need, as well as correctness related failures + if start { + start = false; + dual_module.advance_mode(); + #[cfg(feature = "incr_lp")] + self.calculate_edges_free_weight_clusters(dual_module); + } + self.update_sorted_clusters_aff(dual_module.deref_mut()); + let cluster_affs = self.get_sorted_clusters_aff(); + + for cluster_affinity in cluster_affs.into_iter() { + let cluster_ptr = cluster_affinity.cluster_ptr; + let mut dual_node_deltas = BTreeMap::new(); + let (mut resolved, optimizer_result) = + self.resolve_cluster_tune(&cluster_ptr, interface, dual_module.deref_mut(), &mut dual_node_deltas); + + let mut conflicts = dual_module.get_conflicts_tune(optimizer_result, dual_node_deltas); + while !resolved { + let (_conflicts, _resolved) = self.resolve_tune(conflicts, interface, dual_module.deref_mut()); + if _resolved { + break; + } + conflicts = _conflicts; + resolved = _resolved; + } + } + } + drop(dual_module); } } @@ -442,6 +1244,7 @@ pub mod tests { use super::super::dual_module_serial::*; use super::super::example_codes::*; use super::*; + use crate::dual_module; use crate::num_traits::FromPrimitive; use crate::plugin_single_hair::PluginSingleHair; use crate::plugin_union_find::PluginUnionFind; @@ -453,7 +1256,7 @@ pub mod tests { final_dual: Weight, plugins: PluginVec, growing_strategy: GrowingStrategy, - mut dual_module: impl DualModuleImpl + MWPSVisualizer, + mut dual_module: impl DualModuleImpl + MWPSVisualizer + Send + Sync, model_graph: Arc, mut visualizer: Option, ) -> ( @@ -468,7 +1271,7 @@ pub mod tests { // primal_module.config = serde_json::from_value(json!({"timeout":1})).unwrap(); // try to work on a simple syndrome let decoding_graph = DecodingHyperGraph::new_defects(model_graph, defect_vertices.clone()); - let interface_ptr = DualModuleInterfacePtr::new(decoding_graph.model_graph.clone()); + let interface_ptr = DualModuleInterfacePtr::new(); primal_module.solve_visualizer( &interface_ptr, decoding_graph.syndrome_pattern.clone(), @@ -476,10 +1279,7 @@ pub mod tests { visualizer.as_mut(), ); - // Question: should this be called here - // dual_module.update_dual_nodes(&interface_ptr.read_recursive().nodes); - - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, 0); if let Some(visualizer) = visualizer.as_mut() { visualizer .snapshot_combined( @@ -532,13 +1332,14 @@ pub mod tests { }; // create dual module let model_graph = code.get_model_graph(); + let mut dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); primal_module_serial_basic_standard_syndrome_optional_viz( code, defect_vertices, final_dual, plugins, growing_strategy, - DualModuleSerial::new_empty(&model_graph.initializer), + dual_module, model_graph, Some(visualizer), ) @@ -583,25 +1384,28 @@ pub mod tests { /// test a simple case #[test] - fn primal_module_serial_basic_1() { - // cargo test primal_module_serial_basic_1 -- --nocapture - let visualize_filename = "primal_module_serial_basic_1.json".to_string(); - let defect_vertices = vec![23, 24, 29, 30]; - let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); + fn primal_module_serial_basic_1_m() { + // cargo test primal_module_serial_basic_1_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_1_m.json".to_string(); + // let defect_vertices = vec![23, 24, 29, 30]; + // let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); + let weight = 1; + let code = CodeCapacityPlanarCode::new(7, 0.1, weight); + let defect_vertices = vec![13, 20, 29, 32, 39]; primal_module_serial_basic_standard_syndrome( code, visualize_filename, defect_vertices, - 1, + 4, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_1_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_1_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_1_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_1_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_1_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_1_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![23, 24, 29, 30]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -610,14 +1414,14 @@ pub mod tests { defect_vertices, 1, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_2() { - // cargo test primal_module_serial_basic_2 -- --nocapture - let visualize_filename = "primal_module_serial_basic_2.json".to_string(); + fn primal_module_serial_basic_2_m() { + // cargo test primal_module_serial_basic_2_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_2_m.json".to_string(); let defect_vertices = vec![16, 17, 23, 25, 29, 30]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -626,14 +1430,14 @@ pub mod tests { defect_vertices, 2, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_2_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_2_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_2_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_2_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_2_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_2_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![16, 17, 23, 25, 29, 30]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -642,17 +1446,16 @@ pub mod tests { defect_vertices, 2, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } // should fail because single growing will have sum y_S = 3 instead of 5 - #[test] - #[should_panic] - fn primal_module_serial_basic_3_single() { - // cargo test primal_module_serial_basic_3_single -- --nocapture - let visualize_filename = "primal_module_serial_basic_3_single.json".to_string(); + // #[should_panic] no more panics, as we are not using the single growing strategy + fn primal_module_serial_basic_3_single_m() { + // cargo test primal_module_serial_basic_3_single_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_single_m.json".to_string(); let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -661,15 +1464,15 @@ pub mod tests { defect_vertices, 5, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - #[should_panic] - fn primal_module_serial_basic_3_single_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_3_single_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_3_single_with_dual_pq_impl.json".to_string(); + // #[should_panic] no more panics, as we are not using the single growing strategy + fn primal_module_serial_basic_3_single_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_3_single_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_single_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -678,14 +1481,14 @@ pub mod tests { defect_vertices, 5, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_3_improved() { - // cargo test primal_module_serial_basic_3_improved -- --nocapture - let visualize_filename = "primal_module_serial_basic_3_improved.json".to_string(); + fn primal_module_serial_basic_3_improved_m() { + // cargo test primal_module_serial_basic_3_improved_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_improved_m.json".to_string(); let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -697,14 +1500,14 @@ pub mod tests { PluginUnionFind::entry(), PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), ], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_3_improved_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_3_improved_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_3_improved_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_3_improved_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_3_improved_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_improved_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -716,14 +1519,14 @@ pub mod tests { PluginUnionFind::entry(), PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), ], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_3_multi() { - // cargo test primal_module_serial_basic_3_multi -- --nocapture - let visualize_filename = "primal_module_serial_basic_3_multi.json".to_string(); + fn primal_module_serial_basic_3_multi_m() { + // cargo test primal_module_serial_basic_3_multi_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_multi_m.json".to_string(); let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -732,14 +1535,14 @@ pub mod tests { defect_vertices, 5, vec![], - GrowingStrategy::MultipleClusters, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_3_multi_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_3_multi_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_3_multi_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_3_multi_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_3_multi_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_3_multi_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![14, 15, 16, 17, 22, 25, 28, 31, 36, 37, 38, 39]; let code = CodeCapacityTailoredCode::new(7, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -748,15 +1551,15 @@ pub mod tests { defect_vertices, 5, vec![], - GrowingStrategy::MultipleClusters, + GrowingStrategy::ModeBased, ); } #[test] #[should_panic] - fn primal_module_serial_basic_4_single() { - // cargo test primal_module_serial_basic_4_single -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_single.json".to_string(); + fn primal_module_serial_basic_4_single_m() { + // cargo test primal_module_serial_basic_4_single_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_single_m.json".to_string(); let defect_vertices = vec![10, 11, 12, 15, 16, 17, 18]; let code = CodeCapacityTailoredCode::new(5, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -765,15 +1568,15 @@ pub mod tests { defect_vertices, 4, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] #[should_panic] - fn primal_module_serial_basic_4_single_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_4_single_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_single_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_4_single_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_4_single_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_single_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![10, 11, 12, 15, 16, 17, 18]; let code = CodeCapacityTailoredCode::new(5, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -782,14 +1585,14 @@ pub mod tests { defect_vertices, 4, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_4_single_improved() { - // cargo test primal_module_serial_basic_4_single_improved -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_single_improved.json".to_string(); + fn primal_module_serial_basic_4_single_improved_m() { + // cargo test primal_module_serial_basic_4_single_improved_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_single_improved_m.json".to_string(); let defect_vertices = vec![10, 11, 12, 15, 16, 17, 18]; let code = CodeCapacityTailoredCode::new(5, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -801,14 +1604,14 @@ pub mod tests { PluginUnionFind::entry(), PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), ], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_4_single_improved_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_4_single_improved_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_single_improved_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_4_single_improved_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_4_single_improved_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_single_improved_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![10, 11, 12, 15, 16, 17, 18]; let code = CodeCapacityTailoredCode::new(5, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -820,7 +1623,7 @@ pub mod tests { PluginUnionFind::entry(), PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), ], - GrowingStrategy::SingleCluster, + GrowingStrategy::MultipleClusters, ); } @@ -828,9 +1631,9 @@ pub mod tests { /// because not all edges are fully grown and those fully grown edges lead to suboptimal result #[test] #[should_panic] - fn primal_module_serial_basic_4_multi() { - // cargo test primal_module_serial_basic_4_multi -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_multi.json".to_string(); + fn primal_module_serial_basic_4_multi_m() { + // cargo test primal_module_serial_basic_4_multi_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_multi_m.json".to_string(); let defect_vertices = vec![10, 11, 12, 15, 16, 17, 18]; let code = CodeCapacityTailoredCode::new(5, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -839,15 +1642,15 @@ pub mod tests { defect_vertices, 4, vec![], - GrowingStrategy::MultipleClusters, + GrowingStrategy::ModeBased, ); } #[test] #[should_panic] - fn primal_module_serial_basic_4_multi_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_4_multi_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_multi_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_4_multi_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_4_multi_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_multi_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![10, 11, 12, 15, 16, 17, 18]; let code = CodeCapacityTailoredCode::new(5, 0., 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -856,15 +1659,15 @@ pub mod tests { defect_vertices, 4, vec![], - GrowingStrategy::MultipleClusters, + GrowingStrategy::ModeBased, ); } /// verify that each cluster is indeed growing one by one #[test] - fn primal_module_serial_basic_4_cluster_single_growth() { - // cargo test primal_module_serial_basic_4_cluster_single_growth -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_cluster_single_growth.json".to_string(); + fn primal_module_serial_basic_4_cluster_single_growth_m() { + // cargo test primal_module_serial_basic_4_cluster_single_growth_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_cluster_single_growth_m.json".to_string(); let defect_vertices = vec![32, 33, 37, 47, 86, 87, 72, 82]; let code = CodeCapacityPlanarCode::new(11, 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -873,14 +1676,14 @@ pub mod tests { defect_vertices, 4, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_4_cluster_single_growth_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_4_cluster_single_growth_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_cluster_single_growth_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_4_cluster_single_growth_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_4_cluster_single_growth_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_cluster_single_growth_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![32, 33, 37, 47, 86, 87, 72, 82]; let code = CodeCapacityPlanarCode::new(11, 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -889,15 +1692,15 @@ pub mod tests { defect_vertices, 4, vec![], - GrowingStrategy::SingleCluster, + GrowingStrategy::ModeBased, ); } /// verify that the plugins are applied one by one #[test] - fn primal_module_serial_basic_4_plugin_one_by_one() { - // cargo test primal_module_serial_basic_4_plugin_one_by_one -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_plugin_one_by_one.json".to_string(); + fn primal_module_serial_basic_4_plugin_one_by_one_m() { + // cargo test primal_module_serial_basic_4_plugin_one_by_one_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_plugin_one_by_one_m.json".to_string(); let defect_vertices = vec![12, 22, 23, 32, 17, 26, 27, 37, 62, 72, 73, 82, 67, 76, 77, 87]; let code = CodeCapacityPlanarCode::new(11, 0.01, 1); primal_module_serial_basic_standard_syndrome( @@ -909,14 +1712,14 @@ pub mod tests { PluginUnionFind::entry(), PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), ], - GrowingStrategy::MultipleClusters, + GrowingStrategy::ModeBased, ); } #[test] - fn primal_module_serial_basic_4_plugin_one_by_one_with_dual_pq_impl() { - // cargo test primal_module_serial_basic_4_plugin_one_by_one_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_basic_4_plugin_one_by_one_with_dual_pq_impl.json".to_string(); + fn primal_module_serial_basic_4_plugin_one_by_one_with_dual_pq_impl_m() { + // cargo test primal_module_serial_basic_4_plugin_one_by_one_with_dual_pq_impl_m -- --nocapture + let visualize_filename = "primal_module_serial_basic_4_plugin_one_by_one_with_dual_pq_impl_m.json".to_string(); let defect_vertices = vec![12, 22, 23, 32, 17, 26, 27, 37, 62, 72, 73, 82, 67, 76, 77, 87]; let code = CodeCapacityPlanarCode::new(11, 0.01, 1); primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( @@ -928,100 +1731,51 @@ pub mod tests { PluginUnionFind::entry(), PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), ], - GrowingStrategy::MultipleClusters, + GrowingStrategy::ModeBased, ); } - #[allow(dead_code)] - /// timeout functionality does not work, panic with - /// bug occurs: cluster should be solved, but the subgraph is not yet generated - /// {"[0][6][8]":"Z","[0][6][10]":"X","[0][7][1]":"Y","[0][8][6]":"Y","[0][8][8]":"Z","[0][9][5]":"X"} - // #[test] - fn primal_module_serial_debug_1() { - // cargo test primal_module_serial_debug_1 -- --nocapture - let visualize_filename = "primal_module_serial_debug_1.json".to_string(); - let defect_vertices = vec![10, 23, 16, 41, 29, 17, 3, 37, 25, 43]; - let code = CodeCapacityTailoredCode::new(7, 0.1, 0.1, 1); - primal_module_serial_basic_standard_syndrome( - code, - visualize_filename, - defect_vertices, - 6, - vec![ - PluginUnionFind::entry(), - PluginSingleHair::entry_with_strategy(RepeatStrategy::Multiple { - max_repetition: usize::MAX, - }), - ], - GrowingStrategy::MultipleClusters, - ); - } + #[test] + fn primal_module_serial_circuit_level_noise_1() { + // cargo test primal_module_serial_circuit_level_noise_1 -- --nocapture + let config = json!({ + "code_type": qecp::code_builder::CodeType::RotatedPlanarCode, + }); + + let mut code = QECPlaygroundCode::new(3, 0.005, config); - #[allow(dead_code)] - // #[test] - fn primal_module_serial_debug_1_with_dual_pq_impl() { - // cargo test primal_module_serial_debug_1_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_debug_1_with_dual_pq_impl.json".to_string(); - let defect_vertices = vec![10, 23, 16, 41, 29, 17, 3, 37, 25, 43]; - let code = CodeCapacityTailoredCode::new(7, 0.1, 0.1, 1); - primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( - code, - visualize_filename, - defect_vertices, - 6, - vec![ - PluginUnionFind::entry(), - PluginSingleHair::entry_with_strategy(RepeatStrategy::Multiple { - max_repetition: usize::MAX, - }), - ], - GrowingStrategy::MultipleClusters, - ); - } + + // let defect_vertices = code.clone().generate_random_errors(seed).0.defect_vertices; + let defect_vertices = vec![11, 12, 19]; - #[allow(dead_code)] - /// runs too slow - /// the issue is that the relaxer optimizer runs too slowly... - // #[test] - fn primal_module_serial_debug_2() { - // cargo test primal_module_serial_debug_2 -- --nocapture - let visualize_filename = "primal_module_serial_debug_2.json".to_string(); - let defect_vertices = vec![2, 4, 5, 8, 13, 14, 15, 16, 18, 24, 25, 26, 28, 29]; - let code = CodeCapacityColorCode::new(9, 0.05, 1); - primal_module_serial_basic_standard_syndrome( + let visualize_filename = "primal_module_serial_circuit_level_noise_1.json".to_string(); + primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( code, visualize_filename, - defect_vertices, - 6, - vec![ - PluginUnionFind::entry(), - PluginSingleHair::entry_with_strategy(RepeatStrategy::Multiple { - max_repetition: usize::MAX, - }), - ], - GrowingStrategy::MultipleClusters, + defect_vertices.clone(), + 5914274, + vec![], + GrowingStrategy::ModeBased, ); } - #[allow(dead_code)] + // /// feasible but non-optimal solution // #[test] - fn primal_module_serial_debug_2_with_dual_pq_impl() { - // cargo test primal_module_serial_debug_2_with_dual_pq_impl -- --nocapture - let visualize_filename = "primal_module_serial_debug_2_with_dual_pq_impl.json".to_string(); - let defect_vertices = vec![2, 4, 5, 8, 13, 14, 15, 16, 18, 24, 25, 26, 28, 29]; - let code = CodeCapacityColorCode::new(9, 0.05, 1); - primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( - code, - visualize_filename, - defect_vertices, - 6, - vec![ - PluginUnionFind::entry(), - PluginSingleHair::entry_with_strategy(RepeatStrategy::Multiple { - max_repetition: usize::MAX, - }), - ], - GrowingStrategy::MultipleClusters, - ); - } + // fn primal_module_serial_test_for_seed_131() { + // // cargo test primal_module_serial_test_for_seed_131 -- --nocapture + // let visualize_filename = "primal_module_serial_test_for_seed_131.json".to_string(); + // let defect_vertices = vec![24, 42, 50, 51, 53, 56, 57, 60, 62, 68, 75, 80, 86, 88, 93, 94, 96, 98, 104, 106, 115, 127, 128, 129, 133, 134, 136, 141, 142, 146, 150, 151, 152, 154, 164, 172, 173, 182, 183, 191, 192, 199, 207, 218, 225, 226, 229, 230, 231, 232, 235, 243, 245, 246, 247, 259, 260, 281, 282, 292, 293, 309, 326]; + // let code = CodeCapacityPlanarCode::new(19, 0.05, 1000); + // primal_module_serial_basic_standard_syndrome_with_dual_pq_impl( + // code, + // visualize_filename, + // defect_vertices, + // 39000, + // vec![ + // // PluginUnionFind::entry(), + // // PluginSingleHair::entry_with_strategy(RepeatStrategy::Once), + // ], + // GrowingStrategy::ModeBased, + // ); + // } } diff --git a/src/primal_module_union_find.rs b/src/primal_module_union_find.rs index 6f4f9fa6..4568a1bf 100644 --- a/src/primal_module_union_find.rs +++ b/src/primal_module_union_find.rs @@ -6,6 +6,8 @@ //! there might be some minor difference with Delfosse's paper, but the idea is the same //! +use weak_table::PtrWeakHashSet; + use crate::derivative::Derivative; use crate::dual_module::*; use crate::invalid_subgraph::*; @@ -18,6 +20,11 @@ use crate::util::*; use crate::visualize::*; use std::collections::BTreeSet; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + #[derive(Derivative)] #[derivative(Debug)] pub struct PrimalModuleUnionFind { @@ -28,10 +35,10 @@ pub struct PrimalModuleUnionFind { type UnionFind = UnionFindGeneric; /// define your own union-find node data structure like this -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Clone)] pub struct PrimalModuleUnionFindNode { /// all the internal edges - pub internal_edges: BTreeSet, + pub internal_edges: BTreeSet, /// the corresponding node index with these internal edges pub node_index: NodeIndex, } @@ -40,7 +47,7 @@ pub struct PrimalModuleUnionFindNode { impl UnionNodeTrait for PrimalModuleUnionFindNode { #[inline] fn union(left: &Self, right: &Self) -> (bool, Self) { - let mut internal_edges = BTreeSet::new(); + let mut internal_edges: BTreeSet = BTreeSet::new(); internal_edges.extend(left.internal_edges.iter().cloned()); internal_edges.extend(right.internal_edges.iter().cloned()); let result = Self { @@ -106,15 +113,14 @@ impl PrimalModuleImpl for PrimalModuleUnionFind { mut group_max_update_length: GroupMaxUpdateLength, interface_ptr: &DualModuleInterfacePtr, dual_module: &mut impl DualModuleImpl, - ) { + ) -> bool { debug_assert!(!group_max_update_length.is_unbounded() && group_max_update_length.get_valid_growth().is_none()); let mut active_clusters = BTreeSet::::new(); while let Some(conflict) = group_max_update_length.pop() { - // println!("conflict: {conflict:?}"); match conflict { MaxUpdateLength::Conflicting(edge_index) => { // union all the dual nodes in the edge index and create new dual node by adding this edge to `internal_edges` - let dual_nodes = dual_module.get_edge_nodes(edge_index); + let dual_nodes = dual_module.get_edge_nodes(edge_index.clone()); debug_assert!( !dual_nodes.is_empty(), "should not conflict if no dual nodes are contributing" @@ -129,7 +135,7 @@ impl PrimalModuleImpl for PrimalModuleUnionFind { self.union_find .get_mut(cluster_index as usize) .internal_edges - .insert(edge_index); + .insert(edge_index.clone()); active_clusters.insert(self.union_find.find(cluster_index as usize) as NodeIndex); } _ => { @@ -139,8 +145,6 @@ impl PrimalModuleImpl for PrimalModuleUnionFind { } for &cluster_index in active_clusters.iter() { if interface_ptr - .read_recursive() - .decoding_graph .is_valid_cluster_auto_vertices(&self.union_find.get(cluster_index as usize).internal_edges) { // do nothing @@ -152,15 +156,19 @@ impl PrimalModuleImpl for PrimalModuleUnionFind { }); self.union_find.union(cluster_index as usize, new_cluster_node_index as usize); let invalid_subgraph = InvalidSubgraph::new_ptr( - self.union_find.get(cluster_index as usize).internal_edges.clone(), - &interface_ptr.read_recursive().decoding_graph, + &self.union_find.get(cluster_index as usize).internal_edges.clone(), ); interface_ptr.create_node(invalid_subgraph, dual_module); } } + false } - fn subgraph(&mut self, interface_ptr: &DualModuleInterfacePtr, _dual_module: &mut impl DualModuleImpl) -> Subgraph { + fn subgraph( + &mut self, + interface_ptr: &DualModuleInterfacePtr, + _seed: u64, + ) -> Subgraph { let mut valid_clusters = BTreeSet::new(); let mut subgraph = vec![]; for i in 0..self.union_find.size() { @@ -168,11 +176,9 @@ impl PrimalModuleImpl for PrimalModuleUnionFind { if !valid_clusters.contains(&root_index) { valid_clusters.insert(root_index); let cluster_subgraph = interface_ptr - .read_recursive() - .decoding_graph .find_valid_subgraph_auto_vertices(&self.union_find.get(root_index).internal_edges) .expect("must be valid cluster"); - subgraph.extend(cluster_subgraph.iter()); + subgraph.extend(cluster_subgraph); } } subgraph @@ -194,8 +200,12 @@ pub mod tests { use super::super::dual_module_serial::*; use super::super::example_codes::*; use super::*; + use crate::dual_module; use crate::dual_module_pq::DualModulePQ; use crate::dual_module_pq::FutureObstacleQueue; + // use crate::dual_module_pq::_FutureObstacleQueue; + // use crate::dual_module_pq::PairingPQ; + // use crate::dual_module_pq::RankPairingPQ; use crate::more_asserts::*; use crate::num_traits::{FromPrimitive, ToPrimitive}; use std::sync::Arc; @@ -204,7 +214,7 @@ pub mod tests { mut code: impl ExampleCode, defect_vertices: Vec, final_dual: Weight, - mut dual_module: impl DualModuleImpl + MWPSVisualizer, + mut dual_module: impl DualModuleImpl + MWPSVisualizer + Send + Sync, model_graph: Arc, mut visualizer: Option, ) -> ( @@ -216,14 +226,14 @@ pub mod tests { let mut primal_module = PrimalModuleUnionFind::new_empty(&model_graph.initializer); // try to work on a simple syndrome code.set_defect_vertices(&defect_vertices); - let interface_ptr = DualModuleInterfacePtr::new(model_graph.clone()); + let interface_ptr = DualModuleInterfacePtr::new(); primal_module.solve_visualizer( &interface_ptr, Arc::new(code.get_syndrome()), &mut dual_module, visualizer.as_mut(), ); - let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, &mut dual_module); + let (subgraph, weight_range) = primal_module.subgraph_range(&interface_ptr, 0); if let Some(visualizer) = visualizer.as_mut() { visualizer .snapshot_combined( @@ -278,12 +288,13 @@ pub mod tests { // create dual module let model_graph = code.get_model_graph(); + let dual_module: DualModulePQ> = DualModulePQ::new_empty(&model_graph.initializer); primal_module_union_find_basic_standard_syndrome_optional_viz( code, defect_vertices, final_dual, - DualModuleSerial::new_empty(&model_graph.initializer), + dual_module, model_graph, Some(visualizer), ) diff --git a/src/relaxer.rs b/src/relaxer.rs index aa09f6c5..85743319 100644 --- a/src/relaxer.rs +++ b/src/relaxer.rs @@ -2,11 +2,16 @@ use crate::derivative::Derivative; use crate::invalid_subgraph::*; use crate::util::*; use num_traits::{Signed, Zero}; +use weak_table::PtrWeakKeyHashMap; use std::cmp::Ordering; use std::collections::hash_map::DefaultHasher; use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; use std::sync::Arc; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; #[derive(Clone, PartialEq, Eq, Derivative)] #[derivative(Debug)] @@ -18,9 +23,9 @@ pub struct Relaxer { direction: BTreeMap, Rational>, /// the edges that will be untightened after growing along `direction`; /// basically all the edges that have negative `overall_growing_rate` - untighten_edges: BTreeMap, + untighten_edges: BTreeMap, /// the edges that will grow - growing_edges: BTreeMap, + growing_edges: BTreeMap, } impl Hash for Relaxer { @@ -58,24 +63,28 @@ impl Relaxer { relaxer } + pub fn clear(&mut self) { + self.direction.clear(); + } + pub fn new_raw(direction: BTreeMap, Rational>) -> Self { let mut edges = BTreeMap::new(); for (invalid_subgraph, speed) in direction.iter() { - for &edge_index in invalid_subgraph.hair.iter() { - if let Some(edge) = edges.get_mut(&edge_index) { + for edge_ptr in invalid_subgraph.hair.iter() { + if let Some(edge) = edges.get_mut(&edge_ptr) { *edge += speed; } else { - edges.insert(edge_index, speed.clone()); + edges.insert(edge_ptr, speed.clone()); } } } let mut untighten_edges = BTreeMap::new(); let mut growing_edges = BTreeMap::new(); - for (edge_index, speed) in edges { + for (edge_ptr, speed) in edges { if speed.is_negative() { - untighten_edges.insert(edge_index, speed); + untighten_edges.insert(edge_ptr.clone(), speed); } else if speed.is_positive() { - growing_edges.insert(edge_index, speed); + growing_edges.insert(edge_ptr.clone(), speed); } } let mut relaxer = Self { @@ -119,74 +128,74 @@ impl Relaxer { &self.direction } - pub fn get_growing_edges(&self) -> &BTreeMap { + pub fn get_growing_edges(&self) -> &BTreeMap { &self.growing_edges } - pub fn get_untighten_edges(&self) -> &BTreeMap { + pub fn get_untighten_edges(&self) -> &BTreeMap { &self.untighten_edges } } -#[cfg(test)] -mod tests { - use super::*; - use crate::decoding_hypergraph::tests::*; - use crate::invalid_subgraph::tests::*; - use num_traits::One; - use std::collections::BTreeSet; - - #[test] - fn relaxer_good() { - // cargo test relaxer_good -- --nocapture - let visualize_filename = "relaxer_good.json".to_string(); - let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); - let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete( - vec![7].into_iter().collect(), - BTreeSet::new(), - decoding_graph.as_ref(), - )); - use num_traits::One; - let relaxer = Relaxer::new([(invalid_subgraph, Rational::one())].into()); - println!("relaxer: {relaxer:?}"); - assert!(relaxer.untighten_edges.is_empty()); - } - - #[test] - #[should_panic] - fn relaxer_bad() { - // cargo test relaxer_bad -- --nocapture - let visualize_filename = "relaxer_bad.json".to_string(); - let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); - let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete( - vec![7].into_iter().collect(), - BTreeSet::new(), - decoding_graph.as_ref(), - )); - let relaxer: Relaxer = Relaxer::new([(invalid_subgraph, Rational::zero())].into()); - println!("relaxer: {relaxer:?}"); // should not print because it panics - } - - #[test] - fn relaxer_hash() { - // cargo test relaxer_hash -- --nocapture - let vertices: BTreeSet = [1, 2, 3].into(); - let edges: BTreeSet = [4, 5].into(); - let hair: BTreeSet = [6, 7, 8].into(); - let invalid_subgraph = InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), hair.clone()); - let relaxer_1 = Relaxer::new([(Arc::new(invalid_subgraph.clone()), Rational::one())].into()); - let relaxer_2 = Relaxer::new([(Arc::new(invalid_subgraph), Rational::one())].into()); - assert_eq!(relaxer_1, relaxer_2); - // they should have the same hash value - assert_eq!( - get_default_hash_value(&relaxer_1), - get_default_hash_value(&relaxer_1.hash_value) - ); - assert_eq!(get_default_hash_value(&relaxer_1), get_default_hash_value(&relaxer_2)); - // the pointer should also have the same hash value - let ptr_1 = Arc::new(relaxer_1); - let ptr_2 = Arc::new(relaxer_2); - assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_1.hash_value)); - assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_2)); - } -} +// #[cfg(test)] +// mod tests { +// use super::*; +// use crate::decoding_hypergraph::tests::*; +// use crate::invalid_subgraph::tests::*; +// use num_traits::One; +// use std::collections::BTreeSet; + +// #[test] +// fn relaxer_good() { +// // cargo test relaxer_good -- --nocapture +// let visualize_filename = "relaxer_good.json".to_string(); +// let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); +// let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete( +// vec![7].into_iter().collect(), +// BTreeSet::new(), +// decoding_graph.as_ref(), +// )); +// use num_traits::One; +// let relaxer = Relaxer::new([(invalid_subgraph, Rational::one())].into()); +// println!("relaxer: {relaxer:?}"); +// assert!(relaxer.untighten_edges.is_empty()); +// } + +// #[test] +// #[should_panic] +// fn relaxer_bad() { +// // cargo test relaxer_bad -- --nocapture +// let visualize_filename = "relaxer_bad.json".to_string(); +// let (decoding_graph, ..) = color_code_5_decoding_graph(vec![7, 1], visualize_filename); +// let invalid_subgraph = Arc::new(InvalidSubgraph::new_complete( +// vec![7].into_iter().collect(), +// BTreeSet::new(), +// decoding_graph.as_ref(), +// )); +// let relaxer: Relaxer = Relaxer::new([(invalid_subgraph, Rational::zero())].into()); +// println!("relaxer: {relaxer:?}"); // should not print because it panics +// } + +// #[test] +// fn relaxer_hash() { +// // cargo test relaxer_hash -- --nocapture +// let vertices: BTreeSet = [1, 2, 3].into(); +// let edges: BTreeSet = [4, 5].into(); +// let hair: BTreeSet = [6, 7, 8].into(); +// let invalid_subgraph = InvalidSubgraph::new_raw(vertices.clone(), edges.clone(), hair.clone()); +// let relaxer_1 = Relaxer::new([(Arc::new(invalid_subgraph.clone()), Rational::one())].into()); +// let relaxer_2 = Relaxer::new([(Arc::new(invalid_subgraph), Rational::one())].into()); +// assert_eq!(relaxer_1, relaxer_2); +// // they should have the same hash value +// assert_eq!( +// get_default_hash_value(&relaxer_1), +// get_default_hash_value(&relaxer_1.hash_value) +// ); +// assert_eq!(get_default_hash_value(&relaxer_1), get_default_hash_value(&relaxer_2)); +// // the pointer should also have the same hash value +// let ptr_1 = Arc::new(relaxer_1); +// let ptr_2 = Arc::new(relaxer_2); +// assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_1.hash_value)); +// assert_eq!(get_default_hash_value(&ptr_1), get_default_hash_value(&ptr_2)); +// } +// } diff --git a/src/relaxer_forest.rs b/src/relaxer_forest.rs index b990815f..1b9ad357 100644 --- a/src/relaxer_forest.rs +++ b/src/relaxer_forest.rs @@ -8,22 +8,29 @@ use crate::num_traits::Zero; use crate::relaxer::*; use crate::util::*; use num_traits::Signed; +use weak_table::PtrWeakHashSet; +use weak_table::PtrWeakKeyHashMap; use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + pub type RelaxerVec = Vec; /// a forest of relaxers that possibly depend on each other pub struct RelaxerForest { /// keep track of the remaining tight edges for quick validation: /// these edges cannot grow unless untightened by some relaxers - tight_edges: BTreeSet, + tight_edges: BTreeSet, /// keep track of the subgraphs that are allowed to shrink: /// these should be all positive dual variables, all others are yS = 0 shrinkable_subgraphs: BTreeSet>, /// each untightened edge corresponds to a relaxer with speed: /// to untighten the edge for a unit length, how much should a relaxer be executed - edge_untightener: BTreeMap, Rational)>, + edge_untightener: BTreeMap, Rational)>, /// expanded relaxer results, as part of the dynamic programming: /// the expanded relaxer is a valid relaxer only growing of initial un-tight edges, /// not any edges untightened by other relaxers @@ -36,11 +43,11 @@ pub const FOREST_ERR_MSG_UNSHRINKABLE: &str = "invalid relaxer: try to shrink a impl RelaxerForest { pub fn new(tight_edges: IterEdge, shrinkable_subgraphs: IterSubgraph) -> Self where - IterEdge: Iterator, + IterEdge: Iterator, IterSubgraph: Iterator>, { Self { - tight_edges: BTreeSet::from_iter(tight_edges), + tight_edges: tight_edges.map(|e| e.upgrade_force()).collect(), shrinkable_subgraphs: BTreeSet::from_iter(shrinkable_subgraphs), edge_untightener: BTreeMap::new(), expanded_relaxers: BTreeMap::new(), @@ -53,9 +60,9 @@ impl RelaxerForest { // non-negative overall speed and effectiveness check relaxer.sanity_check()?; // a relaxer cannot grow any tight edge - for (edge_index, _) in relaxer.get_growing_edges().iter() { - if self.tight_edges.contains(edge_index) && !self.edge_untightener.contains_key(edge_index) { - return Err(format!("{FOREST_ERR_MSG_GROW_TIGHT_EDGE}: {edge_index}")); + for (edge_ptr, _) in relaxer.get_growing_edges().iter() { + if self.tight_edges.contains(&edge_ptr) && !self.edge_untightener.contains_key(&edge_ptr) { + return Err(format!("{FOREST_ERR_MSG_GROW_TIGHT_EDGE}: {:?}", edge_ptr.read_recursive().edge_index)); } } // a relaxer cannot shrink any zero dual variable @@ -72,10 +79,10 @@ impl RelaxerForest { // validate only at debug mode to improve speed debug_assert_eq!(self.validate(&relaxer), Ok(())); // add this relaxer to the forest - for (edge_index, speed) in relaxer.get_untighten_edges() { + for (edge_ptr, speed) in relaxer.get_untighten_edges() { debug_assert!(speed.is_negative()); - if !self.edge_untightener.contains_key(edge_index) { - self.edge_untightener.insert(*edge_index, (relaxer.clone(), -speed.recip())); + if !self.edge_untightener.contains_key(&edge_ptr) { + self.edge_untightener.insert(edge_ptr.clone(), (relaxer.clone(), -speed.recip())); } } } @@ -84,13 +91,18 @@ impl RelaxerForest { if self.expanded_relaxers.contains_key(relaxer) { return; } - let mut untightened_edges: BTreeMap = BTreeMap::new(); + let mut untightened_edges: BTreeMap = BTreeMap::new(); let mut directions: BTreeMap, Rational> = relaxer.get_direction().clone(); - for (edge_index, speed) in relaxer.get_growing_edges() { + // println!("relaxer.growing_edges: {:?}", relaxer.get_growing_edges()); + for (edge_ptr, speed) in relaxer.get_growing_edges().iter() { + // println!("edge_ptr index: {:?}", edge_ptr.read_recursive().edge_index); + // println!("speed: {:?}", speed); debug_assert!(speed.is_positive()); - if self.tight_edges.contains(edge_index) { - debug_assert!(self.edge_untightener.contains_key(edge_index)); - let require_speed = if let Some(existing_speed) = untightened_edges.get_mut(edge_index) { + if self.tight_edges.contains(&edge_ptr) { + debug_assert!(self.edge_untightener.contains_key(&edge_ptr)); + // println!("untightened_edges: {:?}", untightened_edges); + let require_speed = if let Some(existing_speed) = untightened_edges.get_mut(&edge_ptr) { + // println!("existing speed: {:?}", existing_speed); if &*existing_speed >= speed { *existing_speed -= speed; Rational::zero() @@ -102,12 +114,19 @@ impl RelaxerForest { } else { speed.clone() }; + // println!("require_speed: {:?}", require_speed); if require_speed.is_positive() { // we need to invoke another relaxer to untighten this edge - let edge_relaxer = self.edge_untightener.get(edge_index).unwrap().0.clone(); + let edge_relaxer = self.edge_untightener.get(&edge_ptr).unwrap().0.clone(); self.compute_expanded(&edge_relaxer); - let (edge_relaxer, speed_ratio) = self.edge_untightener.get(edge_index).unwrap(); + // println!("edge_ptr need to find is {:?}", edge_ptr); + // println!("self.edge_untightener: {:?}", self.edge_untightener); + let (edge_relaxer, speed_ratio) = self.edge_untightener.get(&edge_ptr).unwrap(); + // println!("edge_relaxer found: {:?}", edge_relaxer); + // println!("speed_ratio: {:?}", speed_ratio); debug_assert!(speed_ratio.is_positive()); + // println!("edge_relaxer: {:?}", edge_relaxer); + // println!("self.expanded_relaxers: {:?}", self.expanded_relaxers); let expanded_edge_relaxer = self.expanded_relaxers.get(edge_relaxer).unwrap(); for (subgraph, original_speed) in expanded_edge_relaxer.get_direction() { let new_speed = original_speed * speed_ratio; @@ -120,14 +139,22 @@ impl RelaxerForest { for (edge_index, original_speed) in expanded_edge_relaxer.get_untighten_edges() { debug_assert!(original_speed.is_negative()); let new_speed = -original_speed * speed_ratio; - if let Some(speed) = untightened_edges.get_mut(edge_index) { + // println!("untightened_edges: {:?}", untightened_edges); + // println!("edge_index: {:?}", edge_index); + // println!("new_speed: {:?}", new_speed); + // println!("original_speed: {:?}", original_speed); + // println!("speed ratio: {:?}", speed_ratio); + if let Some(speed) = untightened_edges.get_mut(&edge_index) { *speed += new_speed; } else { - untightened_edges.insert(*edge_index, new_speed); + untightened_edges.insert(edge_index.clone(), new_speed); } } - debug_assert_eq!(untightened_edges.get(edge_index), Some(&require_speed)); - *untightened_edges.get_mut(edge_index).unwrap() -= require_speed; + // println!("ungithtended_edges final: {:?}", untightened_edges); + // println!("left assert: edge ptr: {:?}", edge_ptr); + // println!("right assert: require speed: {:?}", require_speed); + debug_assert_eq!(untightened_edges.get(&edge_ptr), Some(&require_speed)); + *untightened_edges.get_mut(&edge_ptr).unwrap() -= require_speed; } } } @@ -136,7 +163,7 @@ impl RelaxerForest { debug_assert!(expanded .get_growing_edges() .iter() - .all(|(edge_index, _)| !self.tight_edges.contains(edge_index))); + .all(|(edge_index, _)| !self.tight_edges.contains(&edge_index))); self.expanded_relaxers.insert(relaxer.clone(), expanded); } @@ -152,17 +179,76 @@ impl RelaxerForest { pub mod tests { use super::*; use num_traits::{FromPrimitive, One}; + use crate::{pointers::*, relaxer}; + #[cfg(feature = "pq")] + use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr, Edge, Vertex}; + #[cfg(feature = "non-pq")] + use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr, Edge, Vertex}; + #[test] fn relaxer_forest_example() { // cargo test relaxer_forest_example -- --nocapture - let tight_edges = [0, 1, 2, 3, 4, 5, 6]; + // // create vertices + // let vertices: Vec = (0..parity_checks.len()) + // .map(|vertex_index| { + // VertexPtr::new_value(Vertex { + // vertex_index, + // is_defect: false, + // edges: vec![], + // }) + // }) + // .collect(); + + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = (0..11) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let mut tight_edges = vec![]; + for edge_index in [0, 1, 2, 3, 4, 5, 6] { + tight_edges.push(edges[edge_index].downgrade()); + } + + let mut local_hair_1 = BTreeSet::new(); + local_hair_1.insert(edges[1].clone()); + local_hair_1.insert(edges[2].clone()); + local_hair_1.insert(edges[3].clone()); + let mut local_hair_2 = BTreeSet::new(); + local_hair_2.insert(edges[4].clone()); + local_hair_2.insert(edges[5].clone()); + let local_vertice_1 = BTreeSet::new(); + let local_edge_1 = BTreeSet::new(); + let local_vertice_2 = BTreeSet::new(); + let local_edge_2 = BTreeSet::new(); let shrinkable_subgraphs = [ - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1, 2, 3].into())), - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [4, 5].into())), + Arc::new(InvalidSubgraph::new_raw(&local_vertice_1, &local_edge_1, &local_hair_1)), + Arc::new(InvalidSubgraph::new_raw(&local_vertice_2, &local_edge_2, &local_hair_2)), ]; let mut relaxer_forest = RelaxerForest::new(tight_edges.into_iter(), shrinkable_subgraphs.iter().cloned()); - let invalid_subgraph_1 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [7, 8, 9].into())); + + let mut local_hair_3 = BTreeSet::new(); + local_hair_3.insert(edges[7].clone()); + local_hair_3.insert(edges[8].clone()); + local_hair_3.insert(edges[9].clone()); + let local_vertice_3 = BTreeSet::new(); + let local_edge_3 = BTreeSet::new(); + let invalid_subgraph_1 = Arc::new(InvalidSubgraph::new_raw(&local_vertice_3, &local_edge_3, &local_hair_3)); let relaxer_1 = Arc::new(Relaxer::new_raw( [ (invalid_subgraph_1.clone(), Rational::one()), @@ -174,7 +260,13 @@ pub mod tests { assert_eq!(expanded_1, *relaxer_1); relaxer_forest.add(relaxer_1); // now add a relaxer that is relying on relaxer_1 - let invalid_subgraph_2 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1, 2, 7].into())); + let mut local_hair_4 = BTreeSet::new(); + local_hair_4.insert(edges[1].clone()); + local_hair_4.insert(edges[2].clone()); + local_hair_4.insert(edges[7].clone()); + let local_vertice_4 = BTreeSet::new(); + let local_edge_4 = BTreeSet::new(); + let invalid_subgraph_2 = Arc::new(InvalidSubgraph::new_raw(&local_vertice_4, &local_edge_4, &local_hair_4)); let relaxer_2 = Arc::new(Relaxer::new_raw([(invalid_subgraph_2.clone(), Rational::one())].into())); let expanded_2 = relaxer_forest.expand(&relaxer_2); assert_eq!( @@ -194,13 +286,56 @@ pub mod tests { #[test] fn relaxer_forest_require_multiple() { // cargo test relaxer_forest_require_multiple -- --nocapture - let tight_edges = [0, 1, 2, 3, 4, 5, 6]; + let global_time = ArcRwLock::new_value(Rational::zero()); + + // create edges + let edges: Vec = (0..11) + .map(|edge_index| { + EdgePtr::new_value(Edge { + edge_index: edge_index, + weight: Rational::zero(), + dual_nodes: vec![], + vertices: vec![], + last_updated_time: Rational::zero(), + growth_at_last_updated_time: Rational::zero(), + grow_rate: Rational::zero(), + unit_index: None, + connected_to_boundary_vertex: false, + #[cfg(feature = "incr_lp")] + cluster_weights: hashbrown::HashMap::new(), + }) + }).collect(); + + let mut tight_edges = vec![]; + for edge_index in [0, 1, 2, 3, 4, 5, 6] { + tight_edges.push(edges[edge_index].downgrade()); + } + + let mut local_hair_1 = BTreeSet::new(); + local_hair_1.insert(edges[1].clone()); + local_hair_1.insert(edges[2].clone()); + let mut local_hair_2 = BTreeSet::new(); + local_hair_2.insert(edges[3].clone()); + let local_vertice_1 = BTreeSet::new(); + let local_edge_1 = BTreeSet::new(); + let local_vertice_2 = BTreeSet::new(); + let local_edge_2 = BTreeSet::new(); + let shrinkable_subgraphs = [ - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1, 2].into())), - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [3].into())), + Arc::new(InvalidSubgraph::new_raw(&local_vertice_1, &local_edge_1, &local_hair_1)), + Arc::new(InvalidSubgraph::new_raw(&local_vertice_2, &local_edge_2, &local_hair_2)), ]; + + // println!("shrinkable_subgraphs: {:?}", shrinkable_subgraphs); let mut relaxer_forest = RelaxerForest::new(tight_edges.into_iter(), shrinkable_subgraphs.iter().cloned()); - let invalid_subgraph_1 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [7, 8, 9].into())); + + let mut local_hair_3 = BTreeSet::new(); + local_hair_3.insert(edges[7].clone()); + local_hair_3.insert(edges[8].clone()); + local_hair_3.insert(edges[9].clone()); + let local_vertice_3 = BTreeSet::new(); + let local_edge_3 = BTreeSet::new(); + let invalid_subgraph_1 = Arc::new(InvalidSubgraph::new_raw(&local_vertice_3, &local_edge_3, &local_hair_3)); let relaxer_1 = Arc::new(Relaxer::new_raw( [ (invalid_subgraph_1.clone(), Rational::one()), @@ -208,9 +343,23 @@ pub mod tests { ] .into(), )); + // println!("relaxer_1: {:?}", relaxer_1); relaxer_forest.add(relaxer_1); - let invalid_subgraph_2 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1, 2, 7].into())); - let invalid_subgraph_3 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [2].into())); + + + let mut local_hair_4 = BTreeSet::new(); + local_hair_4.insert(edges[1].clone()); + local_hair_4.insert(edges[2].clone()); + local_hair_4.insert(edges[7].clone()); + let local_vertice_4 = BTreeSet::new(); + let local_edge_4 = BTreeSet::new(); + let invalid_subgraph_2 = Arc::new(InvalidSubgraph::new_raw(&local_vertice_4, &local_edge_4, &local_hair_4)); + + let mut local_hair_5 = BTreeSet::new(); + local_hair_5.insert(edges[2].clone()); + let local_vertice_5 = BTreeSet::new(); + let local_edge_5 = BTreeSet::new(); + let invalid_subgraph_3 = Arc::new(InvalidSubgraph::new_raw(&local_vertice_5, &local_edge_5, &local_hair_5)); let relaxer_2 = Arc::new(Relaxer::new_raw( [ (invalid_subgraph_2.clone(), Rational::one()), @@ -219,113 +368,116 @@ pub mod tests { .into(), )); let expanded_2 = relaxer_forest.expand(&relaxer_2); + let intended_relaxer = Relaxer::new( + [ + (invalid_subgraph_2, Rational::one()), + (invalid_subgraph_3, Rational::one()), + (invalid_subgraph_1, Rational::from_usize(2).unwrap()), + (shrinkable_subgraphs[0].clone(), -Rational::from_usize(2).unwrap()), + ] + .into() + ); + println!("expanded_2: {:?}", expanded_2); + println!("intended relaxer: {:?}", intended_relaxer); assert_eq!( expanded_2, - Relaxer::new( - [ - (invalid_subgraph_2, Rational::one()), - (invalid_subgraph_3, Rational::one()), - (invalid_subgraph_1, Rational::from_usize(2).unwrap()), - (shrinkable_subgraphs[0].clone(), -Rational::from_usize(2).unwrap()), - ] - .into() - ) + intended_relaxer ); // println!("{expanded_2:#?}"); } - #[test] - fn relaxer_forest_relaxing_same_edge() { - // cargo test relaxer_forest_relaxing_same_edge -- --nocapture - let tight_edges = [0, 1, 2, 3, 4, 5, 6]; - let shrinkable_subgraphs = [ - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1, 2].into())), - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [2, 3].into())), - ]; - let mut relaxer_forest = RelaxerForest::new(tight_edges.into_iter(), shrinkable_subgraphs.iter().cloned()); - let invalid_subgraph_1 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [7, 8, 9].into())); - let relaxer_1 = Arc::new(Relaxer::new_raw( - [ - (invalid_subgraph_1.clone(), Rational::one()), - (shrinkable_subgraphs[0].clone(), -Rational::one()), - ] - .into(), - )); - relaxer_forest.add(relaxer_1); - let invalid_subgraph_2 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [10, 11].into())); - let relaxer_2 = Arc::new(Relaxer::new_raw( - [ - (invalid_subgraph_2.clone(), Rational::one()), - (shrinkable_subgraphs[1].clone(), -Rational::one()), - ] - .into(), - )); - relaxer_forest.add(relaxer_2); - } + // #[test] + // fn relaxer_forest_relaxing_same_edge() { + // // cargo test relaxer_forest_relaxing_same_edge -- --nocapture + // let tight_edges = [0, 1, 2, 3, 4, 5, 6]; + // let shrinkable_subgraphs = [ + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1, 2].into())), + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [2, 3].into())), + // ]; + // let mut relaxer_forest = RelaxerForest::new(tight_edges.into_iter(), shrinkable_subgraphs.iter().cloned()); + // let invalid_subgraph_1 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [7, 8, 9].into())); + // let relaxer_1 = Arc::new(Relaxer::new_raw( + // [ + // (invalid_subgraph_1.clone(), Rational::one()), + // (shrinkable_subgraphs[0].clone(), -Rational::one()), + // ] + // .into(), + // )); + // relaxer_forest.add(relaxer_1); + // let invalid_subgraph_2 = Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [10, 11].into())); + // let relaxer_2 = Arc::new(Relaxer::new_raw( + // [ + // (invalid_subgraph_2.clone(), Rational::one()), + // (shrinkable_subgraphs[1].clone(), -Rational::one()), + // ] + // .into(), + // )); + // relaxer_forest.add(relaxer_2); + // } - #[test] - fn relaxer_forest_validate() { - // cargo test relaxer_forest_validate -- --nocapture - let tight_edges = [0, 1, 2, 3, 4, 5, 6]; - let shrinkable_subgraphs = [ - Arc::new(InvalidSubgraph::new_raw([1].into(), [].into(), [1, 2].into())), - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [].into())), - ]; - let relaxer_forest = RelaxerForest::new(tight_edges.into_iter(), shrinkable_subgraphs.iter().cloned()); - println!("relaxer_forest: {:?}", relaxer_forest.shrinkable_subgraphs); - // invalid relaxer is forbidden - let invalid_relaxer = Relaxer::new_raw( - [( - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [].into())), - -Rational::one(), - )] - .into(), - ); - let error_message = relaxer_forest.validate(&invalid_relaxer).expect_err("should panic"); - assert_eq!( - &error_message[..RELAXER_ERR_MSG_NEGATIVE_SUMMATION.len()], - RELAXER_ERR_MSG_NEGATIVE_SUMMATION - ); - // relaxer that increases a tight edge is forbidden - let relaxer = Relaxer::new_raw( - [( - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1].into())), - Rational::one(), - )] - .into(), - ); - let error_message = relaxer_forest.validate(&relaxer).expect_err("should panic"); - assert_eq!( - &error_message[..FOREST_ERR_MSG_GROW_TIGHT_EDGE.len()], - FOREST_ERR_MSG_GROW_TIGHT_EDGE - ); - // relaxer that shrinks a zero dual variable is forbidden - let relaxer = Relaxer::new_raw( - [ - ( - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [9].into())), - Rational::one(), - ), - ( - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [2, 3].into())), - -Rational::one(), - ), - ] - .into(), - ); - let error_message = relaxer_forest.validate(&relaxer).expect_err("should panic"); - assert_eq!( - &error_message[..FOREST_ERR_MSG_UNSHRINKABLE.len()], - FOREST_ERR_MSG_UNSHRINKABLE - ); - // otherwise a relaxer is ok - let relaxer = Relaxer::new_raw( - [( - Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [9].into())), - Rational::one(), - )] - .into(), - ); - relaxer_forest.validate(&relaxer).unwrap(); - } + // #[test] + // fn relaxer_forest_validate() { + // // cargo test relaxer_forest_validate -- --nocapture + // let tight_edges = [0, 1, 2, 3, 4, 5, 6]; + // let shrinkable_subgraphs = [ + // Arc::new(InvalidSubgraph::new_raw([1].into(), [].into(), [1, 2].into())), + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [].into())), + // ]; + // let relaxer_forest = RelaxerForest::new(tight_edges.into_iter(), shrinkable_subgraphs.iter().cloned()); + // println!("relaxer_forest: {:?}", relaxer_forest.shrinkable_subgraphs); + // // invalid relaxer is forbidden + // let invalid_relaxer = Relaxer::new_raw( + // [( + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [].into())), + // -Rational::one(), + // )] + // .into(), + // ); + // let error_message = relaxer_forest.validate(&invalid_relaxer).expect_err("should panic"); + // assert_eq!( + // &error_message[..RELAXER_ERR_MSG_NEGATIVE_SUMMATION.len()], + // RELAXER_ERR_MSG_NEGATIVE_SUMMATION + // ); + // // relaxer that increases a tight edge is forbidden + // let relaxer = Relaxer::new_raw( + // [( + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [1].into())), + // Rational::one(), + // )] + // .into(), + // ); + // let error_message = relaxer_forest.validate(&relaxer).expect_err("should panic"); + // assert_eq!( + // &error_message[..FOREST_ERR_MSG_GROW_TIGHT_EDGE.len()], + // FOREST_ERR_MSG_GROW_TIGHT_EDGE + // ); + // // relaxer that shrinks a zero dual variable is forbidden + // let relaxer = Relaxer::new_raw( + // [ + // ( + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [9].into())), + // Rational::one(), + // ), + // ( + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [2, 3].into())), + // -Rational::one(), + // ), + // ] + // .into(), + // ); + // let error_message = relaxer_forest.validate(&relaxer).expect_err("should panic"); + // assert_eq!( + // &error_message[..FOREST_ERR_MSG_UNSHRINKABLE.len()], + // FOREST_ERR_MSG_UNSHRINKABLE + // ); + // // otherwise a relaxer is ok + // let relaxer = Relaxer::new_raw( + // [( + // Arc::new(InvalidSubgraph::new_raw([].into(), [].into(), [9].into())), + // Rational::one(), + // )] + // .into(), + // ); + // relaxer_forest.validate(&relaxer).unwrap(); + // } } diff --git a/src/relaxer_optimizer.rs b/src/relaxer_optimizer.rs index 62c0202b..efb403b7 100644 --- a/src/relaxer_optimizer.rs +++ b/src/relaxer_optimizer.rs @@ -9,13 +9,72 @@ use crate::invalid_subgraph::*; use crate::relaxer::*; use crate::util::*; -use derivative::Derivative; -use num_traits::Signed; -use num_traits::{One, Zero}; + use std::collections::{BTreeMap, BTreeSet}; -use std::str::FromStr; use std::sync::Arc; +use derivative::Derivative; + +use num_traits::{Signed, Zero}; +use weak_table::PtrWeakKeyHashMap; + +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; + +#[cfg(feature = "slp")] +use num_traits::One; +#[cfg(feature = "incr_lp")] +use parking_lot::Mutex; +#[cfg(feature = "incr_lp")] +use std::ops::Index; + +#[cfg(all(feature = "incr_lp", feature = "highs"))] +pub struct IncrLPSolution { + pub edge_constraints: BTreeMap)>, + pub edge_row_map: BTreeMap, + pub dv_col_map: BTreeMap, + pub solution: Option, +} + +#[cfg(all(feature = "incr_lp", feature = "highs"))] +impl IncrLPSolution { + pub fn constraints_len(&self) -> usize { + self.edge_row_map.len() + self.dv_col_map.len() + } +} + +#[cfg(all(feature = "incr_lp", feature = "highs"))] +unsafe impl Send for IncrLPSolution {} + +#[derive(Default, Debug)] +pub enum OptimizerResult { + #[default] + Init, + Optimized, // normal + EarlyReturned, // early return when the result is positive + Skipped, // when the `should_optimize` check returns false +} + +impl OptimizerResult { + pub fn or(&mut self, other: Self) { + match self { + OptimizerResult::EarlyReturned => {} + _ => match other { + OptimizerResult::Init => {} + OptimizerResult::EarlyReturned => { + *self = OptimizerResult::EarlyReturned; + } + OptimizerResult::Skipped => { + *self = OptimizerResult::Skipped; + } + _ => {} + }, + } + } +} + #[derive(Derivative)] #[derivative(Default(new = "true"))] pub struct RelaxerOptimizer { @@ -71,12 +130,13 @@ impl RelaxerOptimizer { true } + #[cfg(not(feature = "float_lp"))] pub fn optimize( &mut self, relaxer: Relaxer, edge_slacks: BTreeMap, mut dual_variables: BTreeMap, Rational>, - ) -> Relaxer { + ) -> (Relaxer, bool) { for invalid_subgraph in relaxer.get_direction().keys() { if !dual_variables.contains_key(invalid_subgraph) { dual_variables.insert(invalid_subgraph.clone(), Rational::zero()); @@ -137,37 +197,330 @@ impl RelaxerOptimizer { .map(|constraint| constraint.to_string()) .collect::>() .join(",\n"); + + // println!("\n input:\n {}\n", input); + let mut solver = slp::Solver::>::new(&input); let solution = solver.solve(); let mut direction: BTreeMap, Rational> = BTreeMap::new(); match solution { slp::Solution::Optimal(optimal_objective, model) => { if !optimal_objective.is_positive() { - return relaxer; + return (relaxer, true); } - for (var_index, (invalid_subgraph, _)) in dual_variables.iter().enumerate() { + for (var_index, (invalid_subgraph, _)) in dual_variables.into_iter().enumerate() { let overall_growth = model[var_index].clone() - model[var_index + x_vars.len()].clone(); if !overall_growth.is_zero() { - direction.insert( - invalid_subgraph.clone(), - Rational::from_str(&overall_growth.numer().to_string()).unwrap() - / Rational::from_str(&overall_growth.denom().to_string()).unwrap(), - ); + // println!("overall_growth: {:?}", overall_growth); + direction.insert(invalid_subgraph, overall_growth); } } } _ => unreachable!(), } self.relaxers.insert(relaxer); - Relaxer::new(direction) + (Relaxer::new(direction), false) + } + + #[cfg(feature = "float_lp")] + // the same method, but with f64 weight + pub fn optimize( + &mut self, + relaxer: Relaxer, + edge_slacks: BTreeMap, + mut dual_variables: BTreeMap, Rational>, + ) -> (Relaxer, bool) { + use highs::{HighsModelStatus, RowProblem, Sense}; + use num_traits::ToPrimitive; + + use crate::ordered_float::OrderedFloat; + + for invalid_subgraph in relaxer.get_direction().keys() { + if !dual_variables.contains_key(invalid_subgraph) { + dual_variables.insert(invalid_subgraph.clone(), OrderedFloat::zero()); + } + } + + let mut model = RowProblem::default().optimise(Sense::Maximise); + model.set_option("parallel", "off"); + model.set_option("threads", 1); + + let mut x_vars = vec![]; + let mut y_vars = vec![]; + let mut invalid_subgraphs = Vec::with_capacity(dual_variables.len()); + let mut edge_contributor: BTreeMap> = + edge_slacks.keys().map(|edge_index| (edge_index.clone(), vec![])).collect(); + + for (var_index, (invalid_subgraph, dual_variable)) in dual_variables.iter().enumerate() { + // constraint of the dual variable >= 0 + let x = model.add_col(1.0, 0.0.., []); + let y = model.add_col(-1.0, 0.0.., []); + x_vars.push(x); + y_vars.push(y); + + // constraint for xs ys <= dual_variable + model.add_row( + ..dual_variable.to_f64().unwrap(), + [(x_vars[var_index], -1.0), (y_vars[var_index], 1.0)], + ); + invalid_subgraphs.push(invalid_subgraph.clone()); + + for edge_index in invalid_subgraph.hair.iter() { + edge_contributor.get_mut(&edge_index).unwrap().push(var_index); + } + } + + for (edge_index, &slack) in edge_slacks.iter() { + let mut row_entries = vec![]; + for &var_index in edge_contributor[&edge_index].iter() { + row_entries.push((x_vars[var_index], 1.0)); + row_entries.push((y_vars[var_index], -1.0)); + } + + // constraint of edge: sum(y_S) <= weight + model.add_row(..=slack.to_f64().unwrap(), row_entries); + } + + let solved = model.solve(); + + let mut direction: BTreeMap, OrderedFloat> = BTreeMap::new(); + if solved.status() == HighsModelStatus::Optimal { + let solution = solved.get_solution(); + + // calculate the objective function + let mut res = OrderedFloat::new(0.0); + let cols = solution.columns(); + for i in 0..x_vars.len() { + res += OrderedFloat::new(cols[2 * i] - cols[2 * i + 1]); + } + + // check positivity of the objective + if !(res.is_positive()) { + return (relaxer, true); + } + + for (var_index, invalid_subgraph) in invalid_subgraphs.iter().enumerate() { + let overall_growth = cols[2 * var_index] - cols[2 * var_index + 1]; + if !overall_growth.is_zero() { + direction.insert(invalid_subgraph.clone(), OrderedFloat::from(overall_growth)); + } + } + } else { + println!("solved status: {:?}", solved.status()); + unreachable!(); + } + + self.relaxers.insert(relaxer); + (Relaxer::new(direction), false) + } + + #[cfg(all(feature = "float_lp", feature = "incr_lp"))] + // the same method, but with f64 weight + pub fn optimize_incr( + &mut self, + relaxer: Relaxer, + edge_free_weights: BTreeMap, + dual_nodes: BTreeMap, Rational)>, + option_incr_lp_solution: &mut Option>>, + ) -> (Relaxer, bool) { + use highs::{HighsModelStatus, RowProblem, Sense}; + use num_traits::ToPrimitive; + + use crate::ordered_float::OrderedFloat; + + return match option_incr_lp_solution { + Some(incr_lp_solution) => { + let mut incr_lp_solution_ptr = incr_lp_solution.lock(); + let mut model: highs::Model = incr_lp_solution_ptr.solution.take().unwrap().into(); + + let mut edge_contributor: BTreeMap)> = edge_free_weights + .iter() + .map(|(&edge_index, &edge_free_weight)| (edge_index, (edge_free_weight, BTreeSet::new()))) + .collect(); + + for (dual_node_index, (invalid_subgraph, _)) in dual_nodes.iter() { + for edge_index in invalid_subgraph.hair.iter() { + edge_contributor + .get_mut(&edge_index) + .unwrap() + .1 + .insert(dual_node_index.clone()); + } + if incr_lp_solution_ptr.dv_col_map.contains_key(dual_node_index) { + continue; + } + let col = model.add_col(1.0, 0.0.., []); + + incr_lp_solution_ptr.dv_col_map.insert(dual_node_index.clone(), col); + } + + let mut new_edges = BTreeSet::new(); + let mut update_deges_weight = BTreeSet::new(); + let mut update_edges_contributors = BTreeSet::new(); + + // get difference between edges + for (&edge_index, &free_weight) in edge_free_weights.iter() { + match incr_lp_solution_ptr.edge_constraints.get(&edge_index) { + Some((_free_weight, _edge_contributors)) => { + if _free_weight != free_weight { + update_deges_weight.insert(edge_index.clone()); + } + if _edge_contributors != &edge_contributor[&edge_index].1 { + update_edges_contributors.insert(edge_index.clone()); + } + } + None => { + new_edges.insert(edge_index.clone()); + } + } + } + + for edge_index in new_edges.into_iter() { + let mut row_entries = vec![]; + for var_index in edge_contributor[&edge_index].1.iter() { + row_entries.push((incr_lp_solution_ptr.dv_col_map[var_index], 1.0)); + } + + // constraint of edge: sum(y_S) <= weight + let row = model.add_row(..=edge_free_weights[&edge_index].to_f64().unwrap(), row_entries); + incr_lp_solution_ptr.edge_row_map.insert(edge_index, row); + } + + for edge_index in update_deges_weight.into_iter() { + let row = incr_lp_solution_ptr.edge_row_map.get(&edge_index).unwrap(); + model.change_row_bounds(*row, ..=edge_free_weights[&edge_index].to_f64().unwrap()); + } + + for edge_index in update_edges_contributors.into_iter() { + let row = incr_lp_solution_ptr.edge_row_map.get(&edge_index).unwrap(); + let diff = edge_contributor[&edge_index] + .1 + .difference(&incr_lp_solution_ptr.edge_constraints[&edge_index].1); + for invalid_subgraph in diff { + model.change_matrix_coefficient(*row, incr_lp_solution_ptr.dv_col_map[invalid_subgraph], 1.0) + } + } + + let solved = model.solve(); + + let mut direction: BTreeMap, OrderedFloat> = BTreeMap::new(); + if solved.status() == HighsModelStatus::Optimal { + let solution = solved.get_solution(); + + // calculate the objective function + let new_dual_variable_sum = OrderedFloat::from(solution.columns().iter().sum::()); + + let delta: OrderedFloat = + new_dual_variable_sum - dual_nodes.values().map(|(_, grow_rate)| grow_rate).sum::(); + + // check positivity of the objective + if !(delta.is_positive()) { + incr_lp_solution_ptr.solution = Some(solved); + return (relaxer, true); + } + + for (node_index, (invalid_subgraph, dv)) in dual_nodes.iter() { + let overall_growth = + OrderedFloat::from(*solution.index(incr_lp_solution_ptr.dv_col_map[node_index])) - dv; + if !overall_growth.is_zero() { + direction.insert(invalid_subgraph.clone(), overall_growth); + } + } + } else { + println!("solved status: {:?}", solved.status()); + unreachable!(); + } + + incr_lp_solution_ptr.solution = Some(solved); + incr_lp_solution_ptr.edge_constraints = edge_contributor; + + self.relaxers.insert(relaxer); + (Relaxer::new(direction), false) + } + None => { + let mut model = RowProblem::default().optimise(Sense::Maximise); + model.set_option("parallel", "off"); + model.set_option("threads", 1); + + let mut edge_row_map: BTreeMap = BTreeMap::new(); + let mut dv_col_map: BTreeMap = BTreeMap::new(); + + let mut edge_contributor: BTreeMap)> = edge_free_weights + .iter() + .map(|(&edge_index, &edge_free_weight)| (edge_index, (edge_free_weight, BTreeSet::new()))) + .collect(); + + for (dual_node_index, (invalid_subgraph, _)) in dual_nodes.iter() { + // constraint of the dual variable >= 0 + let col = model.add_col(1.0, 0.0.., []); + + dv_col_map.insert(dual_node_index.clone(), col); + + for &edge_index in invalid_subgraph.hair.iter() { + edge_contributor + .get_mut(&edge_index) + .unwrap() + .1 + .insert(dual_node_index.clone()); + } + } + + for (&edge_index, &free_weight) in edge_free_weights.iter() { + let mut row_entries = vec![]; + for var_index in edge_contributor[&edge_index].1.iter() { + row_entries.push((dv_col_map[var_index], 1.0)); + } + + // constraint of edge: sum(y_S) <= weight + let row = model.add_row(..=free_weight.to_f64().unwrap(), row_entries); + edge_row_map.insert(edge_index, row); + } + + let solved = model.solve(); + + let mut direction: BTreeMap, OrderedFloat> = BTreeMap::new(); + if solved.status() == HighsModelStatus::Optimal { + let solution = solved.get_solution(); + + // calculate the objective function + let new_dual_variable_sum = OrderedFloat::from(solution.columns().iter().sum::()); + + let delta: OrderedFloat = + new_dual_variable_sum - dual_nodes.values().map(|(_, grow_rate)| grow_rate).sum::(); + + // check positivity of the objective + if !(delta.is_positive()) { + return (relaxer, true); + } + + for (node_index, (invalid_subgraph, dv)) in dual_nodes.iter() { + let overall_growth = OrderedFloat::from(*solution.index(dv_col_map[node_index])) - dv; + if !overall_growth.is_zero() { + direction.insert(invalid_subgraph.clone(), overall_growth); + } + } + } else { + println!("solved status: {:?}", solved.status()); + unreachable!(); + } + + *option_incr_lp_solution = Some(Arc::new(Mutex::new(IncrLPSolution { + edge_constraints: edge_contributor, + edge_row_map, + dv_col_map, + solution: Some(solved), + }))); + + self.relaxers.insert(relaxer); + (Relaxer::new(direction), false) + } + }; } } #[cfg(test)] -#[cfg(feature = "highs")] pub mod tests { // use super::*; - use highs::{ColProblem, HighsModelStatus, Model, Sense}; // #[test] // fn relaxer_optimizer_simple() { @@ -175,43 +528,53 @@ pub mod tests { // let mut relaxer_optimizer = RelaxerOptimizer::new(); // } + #[cfg(feature = "slp")] #[test] - fn lp_solver_simple() { - // cargo test lp_solver_simple -- --nocapture - // https://docs.rs/slp/latest/slp/ - let input = " - vars x1>=0, y2>=0 - max 2x1+3y2 - subject to - 2x1 + y2 <= 18, - 6x1 + 5y2 <= 60, - 2x1 + 5y2 <= 40 - "; - let mut solver = slp::Solver::::new(input); - let solution = solver.solve(); - assert_eq!( - solution, - slp::Solution::Optimal( - slp::Rational::from_integer(28), - vec![slp::Rational::from_integer(5), slp::Rational::from_integer(6)] - ) - ); - match solution { - slp::Solution::Infeasible => println!("INFEASIBLE"), - slp::Solution::Unbounded => println!("UNBOUNDED"), - slp::Solution::Optimal(obj, model) => { - println!("OPTIMAL {}", obj); - print!("SOLUTION"); - for v in model { - print!(" {}", v); - } - println!(); - } - } - } + // fn lp_solver_simple() { + // use crate::util::Rational; + // use slp::BigInt; + + // // cargo test lp_solver_simple -- --nocapture + // // https://docs.rs/slp/latest/slp/ + // let input = " + // vars x1>=0, y2>=0 + // max 2x1+3y2 + // subject to + // 2x1 + y2 <= 18, + // 6x1 + 5y2 <= 60, + // 2x1 + 5y2 <= 40 + // "; + // let mut solver = slp::Solver::::new(input); + // let solution = solver.solve(); + // assert_eq!( + // solution, + // slp::Solution::Optimal( + // Rational::from_integer(BigInt::from(28)), + // vec![ + // Rational::from_integer(BigInt::from(5)), + // Rational::from_integer(BigInt::from(6)) + // ] + // ) + // ); + // match solution { + // slp::Solution::Infeasible => println!("INFEASIBLE"), + // slp::Solution::Unbounded => println!("UNBOUNDED"), + // slp::Solution::Optimal(obj, model) => { + // println!("OPTIMAL {}", obj); + // print!("SOLUTION"); + // for v in model { + // print!(" {}", v); + // } + // println!(); + // } + // } + // } + #[cfg(feature = "highs")] #[test] fn highs_simple() { + use highs::{ColProblem, HighsModelStatus, Model, Sense}; + let mut model = ColProblem::default().optimise(Sense::Maximise); let row1 = model.add_row(..=6., []); // x*3 + y*1 <= 6 let row2 = model.add_row(..=7., []); // y*1 + z*2 <= 7 @@ -249,4 +612,185 @@ pub mod tests { assert_eq!(solution.rows(), vec![6., 7., 10.]); // model.add_row(..=6, row_factors); } + + #[cfg(feature = "highs")] + #[test] + fn highs_change_incr() { + use highs::{ColProblem, HighsModelStatus, Model, Sense}; + // max: x + 2y + z + // under constraints: + // c1: 3x + y <= 6 + // c2: y + 2z <= 7 + + let mut model = ColProblem::default().optimise(Sense::Maximise); + let c1 = model.add_row(..6., []); + let c2 = model.add_row(..7., []); + // x + model.add_col(1., (0.).., [(c1, 3.)]); + // y + model.add_col(2., (0.).., [(c1, 1.), (c2, 1.)]); + // z + model.add_col(1., (0.).., [(c2, 2.)]); + + let solved = model.solve(); + + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + // The expected solution is x=0 y=6 z=0.5 + assert_eq!(solution.columns(), vec![0., 6., 0.5]); + // All the constraints are at their maximum + assert_eq!(solution.rows(), vec![6., 7.]); + + // Now we want to change the problem and solve it on top of it + let mut model: Model = solved.into(); + + // modify row c2 to be y + 2z <= 10 + // Now: + // max: x + 2y + z + // under constraints: + // c1: 3x + y <= 6 + // c2: y + 2z <= 10 + model.change_row_bounds(c2, ..10.); + + let solved = model.solve(); + + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + // The expected solution is x=0 y=6 z=2 + assert_eq!(solution.columns(), vec![0., 6., 2.]); + // All the constraints are at their maximum + assert_eq!(solution.rows(), vec![6., 10.]); + } + + #[cfg(feature = "highs")] + #[test] + fn highs_change_incr_coeff() { + use highs::{HighsModelStatus, Model, RowProblem, Sense}; + // max: x + 2y + z + // under constraints: + // c1: 3x + y <= 6 + // c2: y + 2z <= 7 + + let mut model = RowProblem::default().optimise(Sense::Maximise); + // x + let x = model.add_col(1., (0.).., []); + // y + let y = model.add_col(2., (0.).., []); + // z + let z = model.add_col(1., (0.).., []); + + let c1 = model.add_row(..6., [(x, 3.), (y, 1.)]); + let c2 = model.add_row(..7., [(y, 1.), (z, 2.)]); + + let solved = model.solve(); + + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + // The expected solution is x=0 y=6 z=0.5 + assert_eq!(solution.columns(), vec![0., 6., 0.5]); + // All the constraints are at their maximum + assert_eq!(solution.rows(), vec![6., 7.]); + + // Now we want to change the problem and solve it on top of it + let mut model: Model = solved.into(); + + // modify row c2 to be y + 2z <= 10 + // Now: + // max: x + 2y + z + a + // under constraints: + // c1: 3x + y <= 6 + // c2: y + 3z + a <= 10 + model.change_row_bounds(c2, ..10.); + + let a = model.add_col(1., (0.).., []); + model.change_matrix_coefficient(c2, a, 1.); + + let solved = model.solve(); + + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + // The expected solution is x=0 y=6 z=2 + assert_eq!(solution.columns(), vec![0., 6., 0., 4.]); + // All the constraints are at their maximum + assert_eq!(solution.rows(), vec![6., 10.]); + } + + #[cfg(feature = "highs")] + #[test] + fn highs_change_matrix_coefficient() { + use highs::{ColProblem, HighsModelStatus, Model, Sense}; + + // Create initial problem + let mut model = ColProblem::default().optimise(Sense::Maximise); + let c1 = model.add_row(..=6., []); + let c2 = model.add_row(..=7., []); + let x = model.add_col(1., (0.).., [(c1, 3.)]); + let _y = model.add_col(2., (0.).., [(c1, 1.), (c2, 1.)]); + let z = model.add_col(1., (0.).., [(c2, 2.)]); + + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + assert_eq!(solution.columns(), vec![0., 6., 0.5]); + assert_eq!(solution.rows(), vec![6., 7.]); + + // Change a coefficient in the constraint matrix + let mut model: Model = solved.into(); + model.change_matrix_coefficient(c1, x, 1.0); + + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + assert_eq!(solution.columns(), vec![0., 6., 0.5]); + assert_eq!(solution.rows(), vec![6., 7.]); + + let mut model: Model = solved.into(); + + // Change another coefficient in the constraint matrix + model.change_matrix_coefficient(c2, z, 1.0); + + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + // The expected solution should change due to the modification + // Let's assume the new expected solution is x=0, y=6, z=1 + assert_eq!(solution.columns(), vec![0., 6., 1.]); + assert_eq!(solution.rows(), vec![6., 7.]); + } + + #[cfg(feature = "highs")] + #[test] + fn highs_change_matrix_coefficient_with_infeasibility() { + use highs::{ColProblem, HighsModelStatus, Model, Sense}; + + // Create initial problem + let mut model = ColProblem::default().optimise(Sense::Maximise); + let c1 = model.add_row(..=6., []); + let c2 = model.add_row(..=7., []); + let x = model.add_col(1., (0.).., [(c1, 3.)]); + let _y = model.add_col(2., (0.).., [(c1, 1.), (c2, 1.)]); + let lazy_staticz = model.add_col(1., (0.).., [(c2, 2.)]); + + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Optimal); + + let solution = solved.get_solution(); + assert_eq!(solution.columns(), vec![0., 6., 0.5]); + assert_eq!(solution.rows(), vec![6., 7.]); + + // Change a coefficient to create an infeasible problem + let mut model: Model = solved.into(); + model.change_matrix_coefficient(c1, x, 10.0); + model.change_col_bounds(x, 1.7..); + + let solved = model.solve(); + assert_eq!(solved.status(), HighsModelStatus::Infeasible); + } } diff --git a/src/slp/.gitignore b/src/slp/.gitignore new file mode 100644 index 00000000..f0e3bcac --- /dev/null +++ b/src/slp/.gitignore @@ -0,0 +1,2 @@ +/target +**/*.rs.bk \ No newline at end of file diff --git a/src/slp/Cargo.toml b/src/slp/Cargo.toml new file mode 100644 index 00000000..6522f193 --- /dev/null +++ b/src/slp/Cargo.toml @@ -0,0 +1,41 @@ +[badges.maintenance] +status = "deprecated" +[dependencies.num-traits] +version = "0.2.15" + +[dependencies.pest] +version = "2.1" + +[dependencies.pest_derive] +version = "2.1" + +[dependencies.rayon] +version = "1.4" + +[dependencies.structopt] +version = "0.3" + +[dependencies.num-rational] +version = "0.4.1" + +[dependencies.num-bigint] +version = "0.4.5" + +[package] +authors = ["Prateek Kumar "] +categories = ["science"] +description = "Linear Programming Solver\n" +documentation = "https://docs.rs/slp/" +edition = "2018" +exclude = ["/.vscode/", "/.github/"] +homepage = "https://docs.rs/crate/slp/" +keywords = ["Optimization", "Linear", "Programming", "Solver", "Math"] +license = "MIT" +name = "slp" +readme = "README.md" +repository = "https://github.com/solhop/slp" +version = "0.2.0" + +[lib] +name = "slp" +path = "src/lib.rs" diff --git a/src/slp/LICENSE b/src/slp/LICENSE new file mode 100644 index 00000000..b4a1e3e8 --- /dev/null +++ b/src/slp/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-2020 Prateek Kumar + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/src/slp/src/common.rs b/src/slp/src/common.rs new file mode 100644 index 00000000..79d20a74 --- /dev/null +++ b/src/slp/src/common.rs @@ -0,0 +1,114 @@ +use crate::num_traits::{One, Zero}; +use crate::*; + +/// Number trait used in this library. +pub trait Number: + Clone + + Send + + Sync + + One + + Zero + + std::str::FromStr + + std::ops::Neg + + std::ops::Add + + std::ops::Sub + + std::ops::Mul + + std::ops::Div + + std::ops::AddAssign + + std::ops::SubAssign + + std::ops::MulAssign + + std::ops::DivAssign + + std::cmp::PartialOrd + + std::fmt::Debug + + std::fmt::Display +{ + /// Returns greatest integer less than or equal to. + fn floor(&self) -> Self; + /// Returns least integer greater than or equal to. + fn ceil(&self) -> Self; + /// Checks if it is an integer. + fn is_integer(&self) -> bool; +} + +impl Number for f32 { + fn floor(&self) -> Self { + f32::floor(*self) + } + fn ceil(&self) -> Self { + f32::ceil(*self) + } + fn is_integer(&self) -> bool { + self.fract().abs() <= std::f32::EPSILON + } +} + +impl Number for f64 { + fn floor(&self) -> Self { + f64::floor(*self) + } + fn ceil(&self) -> Self { + f64::ceil(*self) + } + fn is_integer(&self) -> bool { + self.fract().abs() <= std::f64::EPSILON + } +} + +impl Number for Rational32 { + fn floor(&self) -> Self { + Rational32::floor(self) + } + fn ceil(&self) -> Self { + Rational32::ceil(self) + } + fn is_integer(&self) -> bool { + Rational32::is_integer(self) + } +} + +impl Number for Rational64 { + fn floor(&self) -> Self { + Rational64::floor(self) + } + fn ceil(&self) -> Self { + Rational64::ceil(self) + } + fn is_integer(&self) -> bool { + Rational64::is_integer(self) + } +} + +impl Number for Ratio { + fn floor(&self) -> Self { + Self::floor(self) + } + fn ceil(&self) -> Self { + Self::ceil(self) + } + fn is_integer(&self) -> bool { + Self::is_integer(self) + } +} + +/// Solution to an LP instance as returned by +/// the solve method of an LP instance. +#[derive(Debug, PartialEq)] +pub enum Solution { + /// Represents that LP is infeasible. + Infeasible, + /// Represents that LP is unbounded. + Unbounded, + /// The first value is the optimal value of the objective and + /// the second value is the assignment. + Optimal(T, Vec), +} + +/// Solver settings that can be passed to the solver instance. +pub enum SolverSettings { + /// Enables data parallelism while solving. + EnableDataParallelism, +} + +pub(crate) struct SolverOptions { + pub parallel: bool, +} diff --git a/src/slp/src/lib.rs b/src/slp/src/lib.rs new file mode 100644 index 00000000..897f2836 --- /dev/null +++ b/src/slp/src/lib.rs @@ -0,0 +1,59 @@ +//! `slp` is a Linear Programming Solver. +//! +//! To see the usage docs, visit [here](https://docs.rs/crate/slp/). +//! +//! ## An example +//! +//! ```rust +//! fn main() { +//! use slp::*; +//! use slp::Rational64; +//! use slp::Solution; +//! let input = " +//! vars x1>=0, x2>=0 +//! max 2x1+3x2 +//! subject to +//! 2x1 + x2 <= 18, +//! 6x1 + 5x2 <= 60, +//! 2x1 + 5x2 <= 40 +//! "; +//! let mut solver = Solver::::new(&input); +//! let solution = solver.solve(); +//! assert_eq!(solution, Solution::Optimal(Rational64::from_integer(28), vec![ +//! Rational64::from_integer(5), +//! Rational64::from_integer(6) +//! ])); +//! match solution { +//! Solution::Infeasible => println!("INFEASIBLE"), +//! Solution::Unbounded => println!("UNBOUNDED"), +//! Solution::Optimal(obj, model) => { +//! println!("OPTIMAL {}", obj); +//! print!("SOLUTION"); +//! for v in model { +//! print!(" {}", v); +//! } +//! println!(); +//! } +//! } +//! } +//! ``` + +#![deny(missing_docs)] + +#[macro_use] +extern crate pest_derive; + +mod common; +pub use common::*; +mod lp; + +pub use num_bigint::BigInt; +pub use num_rational::{BigRational, Ratio, Rational32, Rational64}; +pub use num_traits; + +/// A General Linear Programming Solver. +mod solver; +pub use solver::*; + +/// Parser module for Linear Programming Problems. +pub mod parser; diff --git a/src/slp/src/lp.rs b/src/slp/src/lp.rs new file mode 100644 index 00000000..9d2df621 --- /dev/null +++ b/src/slp/src/lp.rs @@ -0,0 +1,295 @@ +use crate::{Number, Solution}; +use rayon::prelude::*; + +/// Represents an LP instance. +pub struct LP { + pub n_constraints: usize, + pub n_vars: usize, + pub basic_indices: Vec, + pub tableau: Vec>, // Row major format +} + +impl LP { + pub fn solve(&mut self, parallel: bool) -> Solution { + if self.is_dual_feasible() { + return self.dual_simplex(parallel); + } + + let mut is_b_negative = vec![false; self.n_constraints + 1]; + let no_b_negative = if parallel { + is_b_negative[1..] + .par_iter_mut() + .zip(&self.tableau[1..]) + .map(|(is_b_i_neg, constr)| { + if constr[0] < N::zero() { + *is_b_i_neg = true; + 1 + } else { + 0 + } + }) + .sum() + } else { + is_b_negative[1..] + .iter_mut() + .zip(&self.tableau[1..]) + .map(|(is_b_i_neg, constr)| { + if constr[0] < N::zero() { + *is_b_i_neg = true; + 1 + } else { + 0 + } + }) + .sum() + }; + + let tot_col = self.tableau[0].len(); + if no_b_negative != 0 { + let mut auxi_lp = self.create_auxi_lp(is_b_negative, no_b_negative); + match auxi_lp.simplex(parallel) { + Solution::Infeasible => return Solution::Infeasible, + Solution::Unbounded => return Solution::Unbounded, + Solution::Optimal(obj, _) => { + if obj != N::zero() { + return Solution::Infeasible; + } + if parallel { + self.tableau[1..=self.n_constraints] + .par_iter_mut() + .zip(&auxi_lp.tableau[1..=self.n_constraints]) + .for_each(|(t, a)| { + t[..tot_col].clone_from_slice(&a[..tot_col]); + }); + self.basic_indices + .par_iter_mut() + .zip(&auxi_lp.basic_indices) + .for_each(|(b, &a)| { + *b = a; + }); + } else { + self.tableau[1..=self.n_constraints] + .iter_mut() + .zip(&auxi_lp.tableau[1..=self.n_constraints]) + .for_each(|(t, a)| { + t[..tot_col].clone_from_slice(&a[..tot_col]); + }); + self.basic_indices + .iter_mut() + .zip(&auxi_lp.basic_indices) + .for_each(|(b, &a)| { + *b = a; + }); + } + for i in 1..=self.n_constraints { + let multipler = self.tableau[0][self.basic_indices[i]].clone(); + for j in 0..tot_col { + let num_to_sub = multipler.clone() * self.tableau[i][j].clone(); + self.tableau[0][j] -= num_to_sub; + } + } + } + } + } + self.simplex(parallel) + } + + pub fn create_auxi_lp(&self, is_b_negative: Vec, no_b_negative: usize) -> LP { + let mut tableau = vec![]; + let tot_col = self.tableau[0].len(); + + tableau.push(vec![]); + + let mut curr_neg_index = 1; + for (i, &is_b_i_neg) in is_b_negative.iter().enumerate() { + if i == 0 { + continue; + } + let mut row = vec![]; + for j in 0..tot_col { + row.push(if is_b_i_neg { + -self.tableau[i][j].clone() + } else { + self.tableau[i][j].clone() + }); + } + for j in 1..=no_b_negative { + if is_b_i_neg && curr_neg_index == j { + row.push(N::one()); + } else { + row.push(N::zero()); + } + } + if is_b_i_neg { + curr_neg_index += 1; + } + tableau.push(row); + } + + let mut auxi_obj = vec![N::zero(); tot_col + no_b_negative]; + for j in 1..=self.n_constraints { + if is_b_negative[j] { + for (k, v) in auxi_obj.iter_mut().enumerate() { + *v -= tableau[j][k].clone(); + } + } + } + for j in 0..no_b_negative { + auxi_obj[tot_col + j] = N::one(); + } + tableau[0] = auxi_obj; + + let mut auxi_basic_indices = self.basic_indices.clone(); + let mut curr_neg_index = 0; + for (j, &v) in is_b_negative.iter().enumerate() { + if v { + auxi_basic_indices[j] = tot_col + curr_neg_index; + curr_neg_index += 1; + } + } + + LP { + n_constraints: self.n_constraints, + n_vars: self.n_vars + no_b_negative, + basic_indices: auxi_basic_indices, + tableau, + } + } + + pub fn simplex(&mut self, parallel: bool) -> Solution { + loop { + let mut entering_var = 1; + for (i, v) in self.tableau[0].iter().enumerate() { + if *v < N::zero() && i != 0 && *v < self.tableau[0][entering_var] { + entering_var = i; + } + } + + if self.tableau[0][entering_var] >= N::zero() { + let mut model = vec![]; + for i in 1..=self.n_vars { + let mut found = 0; + for (j, &v) in self.basic_indices.iter().enumerate() { + if i != 0 && i == v { + found = j; + break; + } + } + if found == 0 { + model.push(N::zero()); + } else { + model.push(self.tableau[found][0].clone()); + } + } + break Solution::Optimal(self.tableau[0][0].clone(), model); + } + + let mut leaving_var = 1; + for i in 1..=self.n_constraints { + if self.tableau[i][entering_var] > N::zero() + && (self.tableau[leaving_var][entering_var] <= N::zero() + || self.tableau[i][0].clone() / self.tableau[i][entering_var].clone() + < self.tableau[leaving_var][0].clone() + / self.tableau[leaving_var][entering_var].clone()) + { + leaving_var = i; + } + } + + if self.tableau[leaving_var][entering_var] <= N::zero() { + break Solution::Unbounded; + } + + LP::pivot(&mut self.tableau, entering_var, leaving_var, parallel); + self.basic_indices[leaving_var] = entering_var; + } + } + + pub fn dual_simplex(&mut self, parallel: bool) -> Solution { + loop { + let mut leaving_var = 1; + for i in 2..=self.n_constraints { + if self.tableau[i][0] < self.tableau[leaving_var][0] { + leaving_var = i; + } + } + + if self.tableau[leaving_var][0] >= N::zero() { + let mut model = vec![]; + for i in 1..=self.n_vars { + let mut found = 0; + for (j, &v) in self.basic_indices.iter().enumerate() { + if i != 0 && i == v { + found = j; + break; + } + } + if found == 0 { + model.push(N::zero()); + } else { + model.push(self.tableau[found][0].clone()); + } + } + break Solution::Optimal(self.tableau[0][0].clone(), model); + } + + let mut entering_var = 1; + for i in 1..self.tableau[0].len() { + if self.tableau[leaving_var][entering_var] == N::zero() { + entering_var = i; + continue; + } + if self.tableau[leaving_var][i] < N::zero() + && (-self.tableau[0][i].clone() / self.tableau[leaving_var][i].clone() + < -self.tableau[0][entering_var].clone() + / self.tableau[leaving_var][entering_var].clone()) + { + entering_var = i; + } + } + + if self.tableau[leaving_var][entering_var] >= N::zero() { + break Solution::Infeasible; + } + + LP::pivot(&mut self.tableau, entering_var, leaving_var, parallel); + self.basic_indices[leaving_var] = entering_var; + } + } + + pub fn pivot( + tableau: &mut Vec>, + entering_var: usize, + leaving_var: usize, + parallel: bool, + ) { + let pivot_coeff = tableau[leaving_var][entering_var].clone(); + if parallel { + tableau[leaving_var].par_iter_mut().for_each(|v| { + *v /= pivot_coeff.clone(); + }); + } else { + tableau[leaving_var].iter_mut().for_each(|v| { + *v /= pivot_coeff.clone(); + }); + } + for k in 0..tableau.len() { + if k != leaving_var { + let multiplier = tableau[k][entering_var].clone(); + for i in 0..tableau[k].len() { + let num_to_sub = multiplier.clone() * tableau[leaving_var][i].clone(); + tableau[k][i] -= num_to_sub; + } + } + } + } + + pub fn is_dual_feasible(&self) -> bool { + for v in &self.tableau[0] { + if *v < N::zero() { + return false; + } + } + true + } +} \ No newline at end of file diff --git a/src/slp/src/parser/grammar.pest b/src/slp/src/parser/grammar.pest new file mode 100644 index 00000000..8a081b53 --- /dev/null +++ b/src/slp/src/parser/grammar.pest @@ -0,0 +1,26 @@ +WHITESPACE = _{ " " | "\t" | NEWLINE } +COMMENT = _{ "/*" ~ (!"*/" ~ ANY)* ~ "*/" } +keyword = @{ "max" | "min" | "subject" | "to" } +identifier = @{ !(keyword) ~ ASCII_ALPHA ~ (ASCII_ALPHANUMERIC | "_")* } +lteq = @{ "<=" } +gteq = @{ ">=" } +operator = @{ lteq | gteq } +number = @{ (ASCII_DIGIT|"."|"/")+ } +pos_number = @{ "+" ~ number } +neg_number = @{ "-" ~ number } +coefficient = { number? } +unsigned_term = { coefficient ~ identifier } +pos_signed_term = { "+" ~ unsigned_term } +neg_signed_term = { "-" ~ unsigned_term } +signed_term = { pos_signed_term | neg_signed_term } +expression = { (signed_term | unsigned_term) ~ signed_term* } +max_objective = { ("max") ~ expression } +min_objective = { ("min") ~ expression } +objective = { max_objective | min_objective } +constraint = { expression ~ operator ~ (number | pos_number | neg_number) } +constraints = { constraint ~ ("," ~ constraint)* } +variable_real = { identifier ~ ">=" ~ "0" } +variable_int = { identifier ~ "(" ~ "Z" ~ ")" ~ ">=" ~ "0" } +variable = { variable_real | variable_int } +variables = { "vars" ~ variable ~ ( "," ~ variable)* } +lp_problem = { SOI ~ variables ~ objective ~ "subject" ~ "to" ~ constraints ~ EOI } \ No newline at end of file diff --git a/src/slp/src/parser/mod.rs b/src/slp/src/parser/mod.rs new file mode 100644 index 00000000..d03c6ab6 --- /dev/null +++ b/src/slp/src/parser/mod.rs @@ -0,0 +1,266 @@ +use crate::Number; +use pest::error::Error; +use pest::Parser; + +mod lp_parser { + #[derive(Parser)] + #[grammar = "parser/grammar.pest"] + pub struct LpParser; +} + +use lp_parser::*; + +/// LP Problem instance. +pub struct LpProblem { + /// Variables list. + pub(crate) vars_list: Vec, + /// ith value is true if ith variable has insteger constraint. + pub(crate) is_int_constraints: Vec, + /// Constraints. + pub(crate) constraints: Vec<(Vec, N)>, + /// Objective to be maximized. + pub(crate) objective: Vec, + /// Objective type. + pub(crate) objective_type: ObjectiveType, +} + +#[derive(PartialEq)] +enum OperatorType { + LtEq, + GtEq, +} + +/// Objective type. +#[derive(PartialEq)] +pub enum ObjectiveType { + /// Maximize. + Max, + /// Minimize. + Min, +} + +enum AstNode { + Lp { + objective: Vec, + constraints: Vec<(Vec, N)>, + }, + Variable(String), + VariableInt(String), + Objective(Vec), + Constraints(Vec<(Vec, N)>), + Constraint(Vec, N), + Expression(Vec), + Term(N, usize), + Number(N), + None, +} + +struct AstInternal { + n_vars: usize, + variables: Vec, + is_int_constraints: Vec, + objective_type: ObjectiveType, +} + +/// Parses LP Problem from given input. +pub fn parse_lp_problem(input: &str) -> Result, Error> +where + N: Number, + N::Err: std::fmt::Debug, +{ + let lp_problem = LpParser::parse(Rule::lp_problem, input)?.next().unwrap(); + use pest::iterators::Pair; + + let mut internal = AstInternal { + n_vars: 0, + variables: vec![], + is_int_constraints: vec![], + objective_type: ObjectiveType::Max, + }; + + fn parse_pair(pair: Pair, internal: &mut AstInternal) -> AstNode + where + N: Number, + N::Err: std::fmt::Debug, + { + match pair.as_rule() { + Rule::lp_problem => { + let mut inner_pairs = pair.into_inner(); + let vars_rule = inner_pairs.next().unwrap(); + parse_pair::(vars_rule, internal); + let obj_rule = inner_pairs.next().unwrap(); + let obj = parse_pair(obj_rule, internal); + let cons_rule = inner_pairs.next().unwrap(); + let cons = parse_pair(cons_rule, internal); + AstNode::Lp { + objective: if let AstNode::Objective(vs) = obj { + vs + } else { + unreachable!() + }, + constraints: if let AstNode::Constraints(cs) = cons { + cs + } else { + unreachable!() + }, + } + } + Rule::variables => { + let mut vars = vec![]; + let mut is_int_constraints = vec![]; + for rule in pair.into_inner() { + match parse_pair::(rule, internal) { + AstNode::Variable(var) => { + vars.push(var); + is_int_constraints.push(false); + } + AstNode::VariableInt(var) => { + vars.push(var); + is_int_constraints.push(true); + } + _ => unreachable!(), + } + } + internal.n_vars = vars.len(); + internal.variables = vars; + internal.is_int_constraints = is_int_constraints; + AstNode::None + } + Rule::variable_real => { + let var = pair.into_inner().next().unwrap().as_str(); + AstNode::Variable(var.to_string()) + } + Rule::variable_int => { + let var = pair.into_inner().next().unwrap().as_str(); + AstNode::VariableInt(var.to_string()) + } + Rule::variable => parse_pair(pair.into_inner().next().unwrap(), internal), + Rule::max_objective => { + internal.objective_type = ObjectiveType::Max; + let exp = pair.into_inner().next().unwrap(); + if let AstNode::Expression(exp) = parse_pair(exp, internal) { + AstNode::Objective(exp) + } else { + unreachable!() + } + } + Rule::min_objective => { + let exp = pair.into_inner().next().unwrap(); + internal.objective_type = ObjectiveType::Min; + if let AstNode::Expression(mut exp) = parse_pair::(exp, internal) { + for v in exp.iter_mut() { + *v = -v.clone(); + } + AstNode::Objective(exp) + } else { + unreachable!() + } + } + Rule::objective => parse_pair(pair.into_inner().next().unwrap(), internal), + Rule::constraints => { + let mut cons = vec![]; + for rule in pair.into_inner() { + if let AstNode::Constraint(exp, rhs) = parse_pair(rule, internal) { + cons.push((exp, rhs)); + } else { + unreachable!() + } + } + AstNode::Constraints(cons) + } + Rule::constraint => { + let mut inner_pairs = pair.into_inner(); + let exp_rule = inner_pairs.next().unwrap(); + let exp = parse_pair::(exp_rule, internal); + let opr_rule = inner_pairs.next().unwrap(); + let oper = match opr_rule.as_str() { + "<=" => OperatorType::LtEq, + ">=" => OperatorType::GtEq, + _ => unreachable!(), + }; + let rhs_rule = inner_pairs.next().unwrap(); + let rhs = parse_pair::(rhs_rule, internal); + match (exp, rhs) { + (AstNode::Expression(mut exp), AstNode::Number(mut rhs)) => { + if oper == OperatorType::GtEq { + for t in exp.iter_mut() { + *t = -t.clone(); + } + rhs = -rhs; + } + AstNode::Constraint(exp, rhs) + } + _ => unreachable!(), + } + } + Rule::expression => { + let mut terms = vec![N::zero(); internal.n_vars]; + for rule in pair.into_inner() { + if let AstNode::Term(r, i) = parse_pair(rule, internal) { + terms[i] = r; + } else { + unreachable!(); + } + } + AstNode::Expression(terms) + } + Rule::signed_term => parse_pair(pair.into_inner().next().unwrap(), internal), + Rule::pos_signed_term => parse_pair(pair.into_inner().next().unwrap(), internal), + Rule::neg_signed_term => { + let term = parse_pair::(pair.into_inner().next().unwrap(), internal); + if let AstNode::Term(r, i) = term { + AstNode::Term(-r, i) + } else { + unreachable!() + } + } + Rule::unsigned_term => { + let mut inner_pairs = pair.into_inner(); + let coeff_rule = inner_pairs.next().unwrap(); + if let AstNode::Number(r) = parse_pair(coeff_rule, internal) { + let var = inner_pairs.next().unwrap().as_str(); + let mut index = internal.variables.len(); + for (i, v) in internal.variables.iter().enumerate() { + if v == var { + index = i; + break; + } + } + if index == internal.variables.len() { + panic!("Unknown identifier {}", var); + } + AstNode::Term(r, index) + } else { + unreachable!() + } + } + Rule::coefficient => { + let rule = pair.into_inner().next(); + match rule { + Some(rule) => parse_pair(rule, internal), + None => AstNode::Number(N::one()), + } + } + Rule::number | Rule::pos_number | Rule::neg_number => { + AstNode::Number(pair.as_str().parse().unwrap()) + } + _ => AstNode::None, + } + } + + let parsed = parse_pair(lp_problem, &mut internal); + + match parsed { + AstNode::Lp { + constraints, + objective, + } => Ok(LpProblem { + vars_list: internal.variables, + is_int_constraints: internal.is_int_constraints, + constraints, + objective, + objective_type: internal.objective_type, + }), + _ => unreachable!(), + } +} \ No newline at end of file diff --git a/src/slp/src/solver.rs b/src/slp/src/solver.rs new file mode 100644 index 00000000..32bb18b8 --- /dev/null +++ b/src/slp/src/solver.rs @@ -0,0 +1,231 @@ +use crate::lp::*; +use crate::parser::{LpProblem, ObjectiveType}; +use crate::{Number, Solution, SolverOptions, SolverSettings}; + +/// Linear Programming Solver. +pub struct Solver { + lp: LP, + options: SolverOptions, + is_int_constraints: Vec, + negate_objective: bool, +} + +impl Solver { + /// Creates a new Solver instance from mprog input format. + pub fn new(input: &str) -> Solver + where + N::Err: std::fmt::Debug, + { + crate::parser::parse_lp_problem(input).unwrap().into() + } + + /// Creates a new Solver instance with integer constraints. + pub(crate) fn new_with_int_constraints( + lp: LP, + is_int_constraints: Vec, + negate_objective: bool, + ) -> Self { + Solver { + lp, + options: SolverOptions { parallel: false }, + is_int_constraints, + negate_objective, + } + } + + /// Enable a setting. + pub fn setting(&mut self, setting: SolverSettings) { + match setting { + SolverSettings::EnableDataParallelism => self.options.parallel = true, + } + } + + /// Solves the LP. + /// + /// Uses naive version of simplex method. + /// + /// Returns [a solution](enum.Solution.html). + pub fn solve(&mut self) -> Solution { + match self.lp.solve(self.options.parallel) { + Solution::Infeasible => Solution::Infeasible, + Solution::Unbounded => Solution::Unbounded, + Solution::Optimal(opt, model) => { + let solution = Self::branch_and_bound( + &self.lp, + self.options.parallel, + opt, + model, + &self.is_int_constraints, + None, + ); + if let Solution::Optimal(opt, model) = solution { + if self.negate_objective { + Solution::Optimal(-opt, model) + } else { + Solution::Optimal(opt, model) + } + } else { + solution + } + } + } + } + + fn branch_and_bound( + lp: &LP, + parallel: bool, + lp_opt: N, + model: Vec, + is_int_constraints: &[bool], + mut known_opt: Option, + ) -> Solution { + let mut non_int_index = 0; + for (i, v) in model.iter().enumerate() { + if is_int_constraints[i] && !v.is_integer() { + non_int_index = i + 1; + break; + } + } + if non_int_index == 0 { + return Solution::Optimal(lp_opt, model); + } + + let mut basic_index = 0; + for i in 1..=lp.n_constraints { + if lp.basic_indices[i] == non_int_index { + basic_index = i; + break; + } + } + + let mut tableau = lp.tableau.clone(); + for row in &mut tableau { + row.push(N::zero()); + } + let mut new_constr = vec![N::zero(); tableau[0].len()]; + new_constr[non_int_index] = N::one(); + new_constr[0] = model[non_int_index - 1].floor(); + new_constr[tableau[0].len() - 1] = N::one(); + if basic_index != 0 { + for (i, v) in new_constr.iter_mut().enumerate() { + *v -= tableau[basic_index][i].clone(); + } + } + tableau.push(new_constr); + let mut basic_indices = lp.basic_indices.clone(); + basic_indices.push(tableau[0].len() - 1); + + let mut new_lp = LP { + n_constraints: lp.n_constraints + 1, + n_vars: lp.n_vars, + tableau, + basic_indices, + }; + + let sol1 = new_lp.dual_simplex(parallel); + let sol1_int = match sol1 { + Solution::Infeasible => Solution::Infeasible, + Solution::Unbounded => Solution::Unbounded, + Solution::Optimal(opt, model) => Self::branch_and_bound( + &new_lp, + parallel, + opt, + model, + is_int_constraints, + known_opt.clone(), + ), + }; + + if let Solution::Optimal(opt, _) = &sol1_int { + known_opt = match known_opt { + None => Some(opt.clone()), + Some(k_opt) => Some(if k_opt > *opt { k_opt } else { opt.clone() }), + }; + } + + tableau = lp.tableau.clone(); + for row in &mut tableau { + row.push(N::zero()); + } + let mut new_constr = vec![N::zero(); tableau[0].len()]; + new_constr[non_int_index] = -N::one(); + new_constr[0] = -model[non_int_index - 1].ceil(); + new_constr[tableau[0].len() - 1] = N::one(); + if basic_index != 0 { + for (i, v) in new_constr.iter_mut().enumerate() { + *v += tableau[basic_index][i].clone(); + } + } + tableau.push(new_constr); + basic_indices = lp.basic_indices.clone(); + basic_indices.push(tableau[0].len() - 1); + + let mut new_lp = LP { + n_constraints: lp.n_constraints + 1, + n_vars: lp.n_vars, + tableau, + basic_indices, + }; + let sol2 = new_lp.dual_simplex(parallel); + let sol2_int = match sol2 { + Solution::Infeasible => Solution::Infeasible, + Solution::Unbounded => Solution::Unbounded, + Solution::Optimal(opt, model) => { + Self::branch_and_bound(&new_lp, parallel, opt, model, is_int_constraints, known_opt) + } + }; + + match (sol1_int, sol2_int) { + (Solution::Infeasible, Solution::Infeasible) => Solution::Infeasible, + (Solution::Unbounded, _) | (_, Solution::Unbounded) => Solution::Unbounded, + (Solution::Optimal(opt1, model1), Solution::Optimal(opt2, model2)) => { + if opt1 > opt2 { + Solution::Optimal(opt1, model1) + } else { + Solution::Optimal(opt2, model2) + } + } + (Solution::Optimal(opt, model), _) => Solution::Optimal(opt, model), + (_, Solution::Optimal(opt, model)) => Solution::Optimal(opt, model), + } + } +} + +impl From> for Solver { + fn from(mut lp_problem: LpProblem) -> Self { + let mut tableau = vec![]; + let mut basic_indices = vec![0]; + let n_constraints = lp_problem.constraints.len(); + let n_vars = lp_problem.vars_list.len(); + let mut obj = lp_problem.objective; + for i in obj.iter_mut() { + *i = -i.clone(); + } + obj.insert(0, N::zero()); + for _ in 0..n_constraints { + obj.push(N::zero()); + } + tableau.push(obj); + for (i, constr) in lp_problem.constraints.iter_mut().enumerate() { + constr.0.insert(0, constr.1.clone()); + for j in 0..n_constraints { + constr.0.push(if i == j { N::one() } else { N::zero() }); + } + // TODO Remove clone + tableau.push(constr.0.clone()); + basic_indices.push(n_vars + i + 1); + } + + let lp = LP { + n_constraints, + n_vars, + basic_indices, + tableau, + }; + Solver::new_with_int_constraints( + lp, + lp_problem.is_int_constraints, + lp_problem.objective_type == ObjectiveType::Min, + ) + } +} \ No newline at end of file diff --git a/src/util.rs b/src/util.rs index 7b5ecbba..536575c1 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,23 +1,41 @@ use crate::mwpf_solver::*; +#[cfg(not(feature = "float_lp"))] use crate::num_rational; use crate::num_traits::ToPrimitive; use crate::rand_xoshiro; use crate::rand_xoshiro::rand_core::RngCore; use crate::visualize::*; +use num_traits::Zero; #[cfg(feature = "python_binding")] use pyo3::prelude::*; +#[cfg(feature = "python_binding")] +use pyo3::types::PyFloat; use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; +use std::collections::HashMap; +use crate::pointers::{ArcRwLock, WeakRwLock}; use std::collections::BTreeSet; use std::fs::File; use std::io::prelude::*; use std::time::Instant; +use petgraph::Graph; +use petgraph::Undirected; +use std::sync::Arc; +use crate::itertools::Itertools; + +#[cfg(feature = "pq")] +use crate::dual_module_pq::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; +#[cfg(feature = "non-pq")] +use crate::dual_module_serial::{EdgeWeak, VertexWeak, EdgePtr, VertexPtr}; pub type Weight = usize; // only used as input, all internal weight representation will use `Rational` cfg_if::cfg_if! { if #[cfg(feature="r64_weight")] { pub type Rational = num_rational::Rational64; - } else { + } else if #[cfg(feature="float_lp")] { + pub type Rational = crate::ordered_float::OrderedFloat; + } else { pub type Rational = num_rational::BigRational; } } @@ -56,6 +74,8 @@ pub struct HyperEdge { /// the weight of the hyperedge #[cfg_attr(feature = "python_binding", pyo3(get, set))] pub weight: Weight, + /// whether this hyperedge is connected to any boundary vertex, used for parallel implementation + pub connected_to_boundary_vertex: bool, } #[cfg_attr(feature = "python_binding", cfg_eval)] @@ -63,7 +83,7 @@ pub struct HyperEdge { impl HyperEdge { #[cfg_attr(feature = "python_binding", new)] pub fn new(vertices: Vec, weight: Weight) -> Self { - Self { vertices, weight } + Self { vertices, weight , connected_to_boundary_vertex: false} } #[cfg(feature = "python_binding")] @@ -110,39 +130,74 @@ impl SolverInitializer { } pub fn matches_subgraph_syndrome(&self, subgraph: &Subgraph, defect_vertices: &[VertexIndex]) -> bool { - let subgraph_defect_vertices: Vec<_> = self.get_subgraph_syndrome(subgraph).into_iter().collect(); - let mut defect_vertices = defect_vertices.to_owned(); - defect_vertices.sort(); - if defect_vertices.len() != subgraph_defect_vertices.len() { + let subgraph_defect_vertices: BTreeSet = self.get_subgraph_syndrome(subgraph); + // let subgraph_vertices: std::collections::HashSet<_> = subgraph_defect_vertices.clone().into_iter().map(|v| v.read_recursive().vertex_index).collect(); + let defect_vertices_hash:BTreeSet = BTreeSet::from_iter(defect_vertices.to_vec()); + let difference: Vec = subgraph_defect_vertices.difference(&defect_vertices_hash).cloned().collect(); + if difference.is_empty() { + return true; + } else { + println!( + "defect vertices: {:?}\nsubgraph_defect_vertices: {:?}\ndifference: {:?}", + defect_vertices, subgraph_defect_vertices, difference + ); return false; } - for i in 0..defect_vertices.len() { - if defect_vertices[i] != subgraph_defect_vertices[i] { - return false; - } + // defect_vertices.sort(); + // if defect_vertices.len() != subgraph_defect_vertices.len() { + // println!( + // "defect vertices: {:?}\nsubgraph_defect_vertices: {:?}", + // defect_vertices, subgraph_defect_vertices + // ); + // return false; + // } + // for i in 0..defect_vertices.len() { + // if defect_vertices[i] != subgraph_defect_vertices[i].upgradable_read().vertex_index { + // println!( + // "defect vertices: {:?}\nsubgraph_defect_vertices: {:?}", + // defect_vertices, subgraph_defect_vertices + // ); + // return false; + // } + // } + // true + } + + #[allow(clippy::unnecessary_cast)] + pub fn get_subgraph_total_weight(&self, subgraph: &Subgraph) -> Rational { + let mut weight = Rational::zero(); + for edge_weak in subgraph.iter() { + // weight += self.weighted_edges[edge_index as usize].weight; + weight += edge_weak.upgrade_force().read_recursive().weight; } - true + weight } #[allow(clippy::unnecessary_cast)] - pub fn get_subgraph_total_weight(&self, subgraph: &Subgraph) -> Weight { - let mut weight = 0; - for &edge_index in subgraph.iter() { - weight += self.weighted_edges[edge_index as usize].weight; + pub fn get_subgraph_index_total_weight(&self, subgraph_index: &Vec) -> Rational { + use crate::num_traits::FromPrimitive; + let mut weight = Rational::zero(); + for &edge_index in subgraph_index.iter() { + weight += Rational::from_usize(self.weighted_edges[edge_index as usize].weight).unwrap(); } weight } #[allow(clippy::unnecessary_cast)] - pub fn get_subgraph_syndrome(&self, subgraph: &Subgraph) -> BTreeSet { + pub fn get_subgraph_syndrome(&self, subgraph: &Subgraph) -> BTreeSet { let mut defect_vertices = BTreeSet::new(); - for &edge_index in subgraph.iter() { - let HyperEdge { vertices, .. } = &self.weighted_edges[edge_index as usize]; - for &vertex_index in vertices.iter() { - if defect_vertices.contains(&vertex_index) { - defect_vertices.remove(&vertex_index); + for edge_weak in subgraph.iter() { + // let HyperEdge { vertices, .. } = &self.weighted_edges[edge_index as usize]; + let edge_ptr = edge_weak.upgrade_force(); + // let edge = edge_ptr.read_recursive(); + // let vertices = &edge.vertices; + let vertices = &edge_ptr.get_vertex_neighbors(); + let unique_vertices = vertices.into_iter().map(|v| v.upgrade_force().read_recursive().vertex_index).unique().collect::>(); + for vertex_index in unique_vertices.iter() { + if defect_vertices.contains(vertex_index) { + defect_vertices.remove(vertex_index); } else { - defect_vertices.insert(vertex_index); + defect_vertices.insert(*vertex_index); } } } @@ -157,7 +212,7 @@ impl MWPSVisualizer for SolverInitializer { for _ in 0..self.vertex_num { vertices.push(json!({})); } - for HyperEdge { vertices, weight } in self.weighted_edges.iter() { + for HyperEdge { vertices, weight , connected_to_boundary_vertex: _} in self.weighted_edges.iter() { edges.push(json!({ if abbrev { "w" } else { "weight" }: weight, if abbrev { "v" } else { "vertices" }: vertices, @@ -243,12 +298,13 @@ impl F64Rng for DeterministicRng { /// the result of MWPF algorithm: a parity subgraph (defined by some edges that, /// if are selected, will generate the parity result in the syndrome) -pub type Subgraph = Vec; +pub type Subgraph = Vec; impl MWPSVisualizer for Subgraph { fn snapshot(&self, _abbrev: bool) -> serde_json::Value { + let subgraph_by_index: Vec = self.into_iter().map(|e| e.upgrade_force().read_recursive().edge_index).collect(); json!({ - "subgraph": self, + "subgraph": subgraph_by_index, }) } } @@ -257,10 +313,14 @@ impl MWPSVisualizer for Subgraph { #[cfg(feature = "python_binding")] pub fn rational_to_pyobject(value: &Rational) -> PyResult> { Python::with_gil(|py| { - let frac = py.import("fractions")?; - let numer = value.numer().clone(); - let denom = value.denom().clone(); - frac.call_method("Fraction", (numer, denom), None).map(Into::into) + if cfg!(feature = "float_lp") { + PyResult::Ok(PyFloat::new(py, value.to_f64().unwrap()).into()) + } else { + let frac = py.import("fractions")?; + let numer = value.numer().clone(); + let denom = value.denom().clone(); + frac.call_method("Fraction", (numer, denom), None).map(Into::into) + } }) } @@ -293,7 +353,7 @@ impl WeightRange { #[getter] fn upper(&self) -> PyResult> { - rational_to_pyobject(&self.lower) + rational_to_pyobject(&self.upper) } fn __repr__(&self) -> String { @@ -357,7 +417,7 @@ impl BenchmarkProfiler { } } /// record the beginning of a decoding procedure - pub fn begin(&mut self, syndrome_pattern: &SyndromePattern, error_pattern: &Subgraph) { + pub fn begin(&mut self, syndrome_pattern: &SyndromePattern, error_pattern: &Vec) { // sanity check last entry, if exists, is complete if let Some(last_entry) = self.records.last() { assert!( @@ -421,7 +481,7 @@ pub struct BenchmarkProfilerEntry { /// the syndrome pattern of this decoding problem pub syndrome_pattern: SyndromePattern, /// the error pattern - pub error_pattern: Subgraph, + pub error_pattern: Vec, /// the time of beginning a decoding procedure begin_time: Option, /// record additional events @@ -431,7 +491,7 @@ pub struct BenchmarkProfilerEntry { } impl BenchmarkProfilerEntry { - pub fn new(syndrome_pattern: &SyndromePattern, error_pattern: &Subgraph) -> Self { + pub fn new(syndrome_pattern: &SyndromePattern, error_pattern: &Vec) -> Self { Self { syndrome_pattern: syndrome_pattern.clone(), error_pattern: error_pattern.clone(), @@ -547,3 +607,571 @@ pub(crate) fn register(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_class::()?; Ok(()) } + + +/// for parallel implementation +/// +/// an efficient representation of partitioned vertices and erasures when they're ordered +#[derive(Debug, Clone, Serialize)] + +pub struct PartitionedSyndromePattern<'a> { + /// the original syndrome pattern to be partitioned + pub syndrome_pattern: &'a SyndromePattern, + /// the defect range of this partition: it must be continuous if the defect vertices are ordered + pub owned_defect_range: DefectRange, +} + +impl<'a> PartitionedSyndromePattern<'a> { + pub fn new(syndrome_pattern: &'a SyndromePattern) -> Self { + assert!( + syndrome_pattern.erasures.is_empty(), + "erasure partition not supported yet; + even if the edges in the erasure is well ordered, they may not be able to be represented as + a single range simply because the partition is vertex-based. need more consideration" + ); + Self { + syndrome_pattern, + owned_defect_range: DefectRange::new(0, syndrome_pattern.defect_vertices.len() as DefectIndex), + } + } +} + +// //////////////////////////////////////////////////////////////////////////////////////// +// //////////////////////////////////////////////////////////////////////////////////////// +// /////////////// We implement the HashSet to specify vertices in set //////////////////// + +// #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +// pub struct IndexSet { +// // spaced-out individual index +// pub individual_indices: BTreeSet, +// // indices that can be described using range, we assume that there is only one big range among all vertex indices +// pub range: [VertexNodeIndex; 2], +// } + +// // just to distinguish them in code, essentially nothing different +// pub type VertexSet = IndexSet; +// pub type DefectSet = IndexSet; +// pub type NodeSet = IndexSet; + +// impl IndexSet { +// // initialize a IndexSet that only has a continuous range of indices but no spaced out individual indices +// fn new_range(start: VertexNodeIndex, end: VertexNodeIndex) -> Self { +// debug_assert!(end > start, "invalid range [{}, {})", start, end); +// Self { +// individual_indices: BTreeSet::::new(), +// range: [start, end], +// } +// } + +// // initialize a IndexSet that only has spaced out individual indicies +// fn new_individual_indices(indices: Vec) -> Self { +// let mut new_set = BTreeSet::::new(); +// for index in indices { +// new_set.insert(index); +// } +// Self { +// individual_indices: new_set, +// range: [0, 0], +// } +// } + +// // initialize a IndexSet that has both continuous range of indices and individual spaced out indices +// pub fn new(start: VertexNodeIndex, end: VertexNodeIndex, indices: Vec) -> Self { +// debug_assert!(end > start, "invalid range [{}, {})", start, end); +// if start == end && indices.len() == 0{ +// // range is invalid, we check whether indices are empty +// // indices are empty too +// panic!("both the input range and individual indices are invalid"); +// } else if start == end { +// return Self::new_individual_indices(indices); +// } else if indices.len() == 0{ +// return Self::new_range(start, end); +// } else { +// let mut new_set = BTreeSet::::new(); +// for index in indices { +// new_set.insert(index); +// } + +// return Self { +// individual_indices: new_set, +// range: [start, end], +// } +// } +// } + +// // add more individual index to the already created IndexSet +// pub fn add_individual_index(&mut self, index: VertexNodeIndex) { +// self.individual_indices.insert(index); +// } + +// pub fn new_range_by_length(start: VertexNodeIndex, length: VertexNodeIndex) -> Self { +// Self::new_range(start, start + length) +// } + +// pub fn is_empty(&self) -> bool { +// self.range[1] == self.range[0] && self.individual_indices.is_empty() +// } + +// #[allow(clippy::unnecessary_cast)] +// pub fn len(&self) -> usize { +// (self.range[1] - self.range[0] + self.individual_indices.len()) as usize +// } +// pub fn range_start(&self) -> VertexNodeIndex { +// self.range[0] +// } +// pub fn range_end(&self) -> VertexNodeIndex { +// self.range[1] +// } +// pub fn extend_range_by(&mut self, append_count: VertexNodeIndex) { +// self.range[1] += append_count; +// } +// pub fn bias_by(&mut self, bias: VertexNodeIndex) { +// self.range[0] += bias; +// self.range[1] += bias; + +// let set = std::mem::replace(&mut self.individual_indices, BTreeSet::new()); +// self.individual_indices = set.into_iter() +// .map(|p| p + bias) +// .collect(); +// } +// pub fn sanity_check(&self) { +// assert!(self.range_start() <= self.range_end(), "invalid vertex range {:?}", self); +// } +// pub fn contains(&self, vertex_index: VertexNodeIndex) -> bool { +// (vertex_index >= self.range_start() && vertex_index < self.range_end()) || self.individual_indices.contains(&vertex_index) +// } +// // /// fuse two ranges together, returning (the whole range, the interfacing range) +// // pub fn fuse(&self, other: &Self) -> (Self, Self) { +// // self.sanity_check(); +// // other.sanity_check(); +// // assert!(self.range[1] <= other.range[0], "only lower range can fuse higher range"); +// // ( +// // Self::new(self.range[0], other.range[1]), +// // Self::new(self.range[1], other.range[0]), +// // ) +// // } +// } + + + +// we leave the code here just in case we need to describe the vertices in continuos range +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] +#[serde(transparent)] +pub struct IndexRange { + pub range: [VertexNodeIndex; 2], +} + +// just to distinguish them in code, essentially nothing different +pub type VertexRange = IndexRange; +pub type DefectRange = IndexRange; +pub type NodeRange = IndexRange; +pub type EdgeRange = IndexRange; + +impl IndexRange { + pub fn new(start: VertexNodeIndex, end: VertexNodeIndex) -> Self { + debug_assert!(end >= start, "invalid range [{}, {})", start, end); + Self { range: [start, end] } + } + pub fn new_length(start: VertexNodeIndex, length: VertexNodeIndex) -> Self { + Self::new(start, start + length) + } + pub fn is_empty(&self) -> bool { + self.range[1] == self.range[0] + } + #[allow(clippy::unnecessary_cast)] + pub fn len(&self) -> usize { + (self.range[1] - self.range[0]) as usize + } + pub fn start(&self) -> VertexNodeIndex { + self.range[0] + } + pub fn end(&self) -> VertexNodeIndex { + self.range[1] + } + pub fn append_by(&mut self, append_count: VertexNodeIndex) { + self.range[1] += append_count; + } + pub fn bias_by(&mut self, bias: VertexNodeIndex) { + self.range[0] += bias; + self.range[1] += bias; + } + pub fn sanity_check(&self) { + assert!(self.start() <= self.end(), "invalid vertex range {:?}", self); + } + pub fn contains(&self, vertex_index: VertexNodeIndex) -> bool { + vertex_index >= self.start() && vertex_index < self.end() + } + /// fuse two ranges together, returning (the whole range, the interfacing range) + pub fn fuse(&self, other: &Self) -> (Self, Self) { + self.sanity_check(); + other.sanity_check(); + assert!(self.range[1] <= other.range[0], "only lower range can fuse higher range"); + ( + Self::new(self.range[0], other.range[1]), + Self::new(self.range[1], other.range[0]), + ) + } +} + +impl IndexRange { + pub fn iter(&self) -> std::ops::Range { + self.range[0]..self.range[1] + } + pub fn contains_any(&self, vertex_indices: &[VertexNodeIndex]) -> bool { + for vertex_index in vertex_indices.iter() { + if self.contains(*vertex_index) { + return true; + } + } + false + } +} + + +impl Hash for IndexRange { + fn hash(&self, state: &mut H) { + self.range[0].hash(state); + self.range[1].hash(state); + } +} + +// /// a general partition unit that could contain mirrored vertices +// #[derive(Debug, Clone)] +// pub struct PartitionUnit { +// /// unit index +// pub unit_index: usize, +// } + +// pub type PartitionUnitPtr = ArcRwLock; +// pub type PartitionUnitWeak = WeakRwLock; + +// impl std::fmt::Debug for PartitionUnitPtr { +// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +// let partition_unit = self.read_recursive(); +// write!( +// f, +// "{}", +// partition_unit.unit_index +// ) +// } +// } + +// impl std::fmt::Debug for PartitionUnitWeak { +// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +// self.upgrade_force().fmt(f) +// } +// } + +/// user input partition configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct PartitionConfig { + /// the number of vertices + pub vertex_num: VertexNum, + /// detailed plan of partitioning serial modules: each serial module possesses a list of vertices, including all interface vertices + pub partitions: Vec, + /// detailed plan of interfacing vertices + pub fusions: Vec<(usize, usize)>, + /// undirected acyclic graph (DAG) to keep track of the relationship between different partition units + pub dag_partition_units: Graph::<(), bool, Undirected>, + /// defect vertices (global index) + pub defect_vertices: BTreeSet, +} + +impl PartitionConfig { + pub fn new(vertex_num: VertexNum) -> Self { + Self { + vertex_num, + partitions: vec![VertexRange::new(0, vertex_num as VertexIndex)], + fusions: vec![], + dag_partition_units: Graph::new_undirected(), + defect_vertices: BTreeSet::new(), + } + } + + /// the partition below relies on the fact that the vertices' indices are continuous + #[allow(clippy::unnecessary_cast)] + pub fn info(&self) -> PartitionInfo { + assert!(!self.partitions.is_empty(), "at least one partition must exist"); + let mut owning_ranges = vec![]; + let unit_count = self.partitions.len() + self.fusions.len(); + let partitions_len = self.partitions.len(); + + for &partition in self.partitions.iter() { + partition.sanity_check(); + assert!( + partition.end() <= self.vertex_num as VertexIndex, + "invalid vertex index {} in partitions", + partition.end() + ); + owning_ranges.push(partition); + } + + // find boundary vertices + let mut interface_ranges = vec![]; + let mut unit_index_to_adjacent_indices: HashMap> = HashMap::new(); + + for (boundary_unit_index, (left_index, right_index)) in self.fusions.iter().enumerate() { + let boundary_unit_index = boundary_unit_index + partitions_len; + // find the interface_range + let (_whole_range, interface_range) = self.partitions[*left_index].fuse(&self.partitions[*right_index]); + interface_ranges.push(interface_range); + owning_ranges.push(interface_range); + if let Some(adjacent_indices) = unit_index_to_adjacent_indices.get_mut(left_index) { + adjacent_indices.push(boundary_unit_index); + } else { + let mut adjacent_indices = vec![]; + adjacent_indices.push(boundary_unit_index); + unit_index_to_adjacent_indices.insert(*left_index, adjacent_indices.clone()); + } + + if let Some(adjacent_indices) = unit_index_to_adjacent_indices.get_mut(right_index) { + adjacent_indices.push(boundary_unit_index); + } else { + let mut adjacent_indices = vec![]; + adjacent_indices.push(boundary_unit_index); + unit_index_to_adjacent_indices.insert(*right_index, adjacent_indices.clone()); + } + + // now we insert the key-value pair for boundary_unit_index and its adjacent + if let Some(adjacent_indices) = unit_index_to_adjacent_indices.get_mut(&boundary_unit_index) { + adjacent_indices.push(*left_index); + adjacent_indices.push(*right_index); + } else { + let mut adjacent_indices = vec![]; + adjacent_indices.push(*left_index); + adjacent_indices.push(*right_index); + unit_index_to_adjacent_indices.insert(boundary_unit_index, adjacent_indices.clone()); + } + } + + let mut boundary_vertices: HashMap> = HashMap::new(); + for (unit_index, adjacent_unit_indices) in unit_index_to_adjacent_indices.iter() { + if let Some(adjacent_vertices) = boundary_vertices.get_mut(&unit_index) { + for adjacent_unit_index in adjacent_unit_indices { + adjacent_vertices.push(owning_ranges[*adjacent_unit_index]); + } + } else { + let mut adjacent_vertices = vec![]; + for adjacent_unit_index in adjacent_unit_indices { + adjacent_vertices.push(owning_ranges[*adjacent_unit_index]); + } + boundary_vertices.insert(*unit_index, adjacent_vertices.clone()); + } + } + + // construct partition info, assuming partition along the time axis + let partition_unit_info: Vec<_> = (0..unit_count) + .map(|i| PartitionUnitInfo { + // owning_range: if i == self.partitions.len() - 1 { + // owning_ranges[i] + // }else { + // IndexRange::new(owning_ranges[i].start(), interface_ranges[i].end()) // owning_ranges[i], + // }, + owning_range: owning_ranges[i], + unit_index: i, + is_boundary_unit: if i < partitions_len {false} else {true}, + adjacent_parallel_units: unit_index_to_adjacent_indices.get(&i).unwrap().clone(), + boundary_vertices: boundary_vertices.get(&i).unwrap().clone(), + }) + .collect(); + + // create vertex_to_owning_unit for owning_ranges + let mut vertex_to_owning_unit = HashMap::new(); + for partition_unit in partition_unit_info.iter() { + // create vertex_to_owning_unit for owning_ranges + for vertex_index in partition_unit.owning_range.iter() { + vertex_to_owning_unit.insert(vertex_index, partition_unit.unit_index); + } + } + + PartitionInfo { + config: self.clone(), + units: partition_unit_info, + vertex_to_owning_unit, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PartitionInfo { + /// the initial configuration that creates this info + pub config: PartitionConfig, + /// individual info of each unit + pub units: Vec, + /// the mapping from vertices to the owning unit: serial unit (holding real vertices) as well as parallel units (holding interfacing vertices); + /// used for loading syndrome to the holding units + pub vertex_to_owning_unit: HashMap, +} + +// impl PartitionInfo { + /// split a sequence of syndrome into multiple parts, each corresponds to a unit; + /// this is a slow method and should only be used when the syndrome pattern is not well-ordered + // #[allow(clippy::unnecessary_cast)] + // pub fn partition_syndrome_unordered(&self, syndrome_pattern: &SyndromePattern) -> Vec { + // let mut partitioned_syndrome: Vec<_> = (0..self.units.len()).map(|_| SyndromePattern::new_empty()).collect(); + // for defect_vertex in syndrome_pattern.defect_vertices.iter() { + // let unit_index = self.vertex_to_owning_unit.get(defect_vertex); + // match unit_index { + // Some(unit_index) => partitioned_syndrome[*unit_index].defect_vertices.push(*defect_vertex), + // None => // the syndrome is on the boudnary vertices + + // } + // } + // // TODO: partition edges + // partitioned_syndrome + // } +// } + +// for primal module parallel +impl<'a> PartitionedSyndromePattern<'a> { + /// partition the syndrome pattern into 2 partitioned syndrome pattern and my whole range + #[allow(clippy::unnecessary_cast)] + pub fn partition(&self, partition_unit_info: &PartitionUnitInfo) -> Self { + // first binary search the start of owning defect vertices + let owning_start_index = { + let mut left_index = self.owned_defect_range.start(); // since owned_defect_range is initialized to the length of all defect vertices + let mut right_index = self.owned_defect_range.end(); + while left_index != right_index { + let mid_index = (left_index + right_index) / 2; + let mid_defect_vertex = self.syndrome_pattern.defect_vertices[mid_index as usize]; + if mid_defect_vertex < partition_unit_info.owning_range.start() { + left_index = mid_index + 1; + } else { + right_index = mid_index; + } + } + left_index + }; + // println!("start of owning defect vertice: {owning_start_index:?}"); + // second binary search the end of owning defect vertices + let owning_end_index = { + let mut left_index = self.owned_defect_range.start(); + let mut right_index = self.owned_defect_range.end(); + while left_index != right_index { + let mid_index = (left_index + right_index) / 2; + let mid_defect_vertex = self.syndrome_pattern.defect_vertices[mid_index as usize]; + if mid_defect_vertex < partition_unit_info.owning_range.end() { + left_index = mid_index + 1; + } else { + right_index = mid_index; + } + } + left_index + }; + // println!("end of owning defect vertice: {owning_end_index:?}"); + + Self { + syndrome_pattern: self.syndrome_pattern, + owned_defect_range: DefectRange::new(owning_start_index, owning_end_index), + } + + // ( + // Self { + // syndrome_pattern: self.syndrome_pattern, + // whole_defect_range: DefectRange::new(owning_start_index, owning_end_index), + // }, + // ( + // Self { + // syndrome_pattern: self.syndrome_pattern, + // whole_defect_range: DefectRange::new(self.whole_defect_range.start(), owning_start_index), + // }, + // Self { + // syndrome_pattern: self.syndrome_pattern, + // whole_defect_range: DefectRange::new(owning_end_index, self.whole_defect_range.end()), + // }, + // ), + // ) + } + + #[allow(clippy::unnecessary_cast)] + pub fn expand(&self) -> SyndromePattern { + let mut defect_vertices = Vec::with_capacity(self.owned_defect_range.len()); + for defect_index in self.owned_defect_range.iter() { + defect_vertices.push(self.syndrome_pattern.defect_vertices[defect_index as usize]); + } + SyndromePattern::new(defect_vertices, vec![]) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PartitionUnitInfo { + /// the owning range of units, the vertices exlusive to this unit + pub owning_range: VertexRange, + /// partition unit index + pub unit_index: usize, + /// if this unit is boundary unit + pub is_boundary_unit: bool, + + pub adjacent_parallel_units: Vec, + + /// the boundary vertices near to this unit + pub boundary_vertices: Vec, + + // /// boundary vertices, following the global vertex index + // /// key: indexrange of the boundary vertices. value: (unit_index, unit_index), the pair of unit_index of the two partition units adjacent to the boundary + // pub boundary_vertices: Option>, + // /// adjacent PartitionUnits, vector of partition unit_index + // pub adjacent_partition_units: Vec, +} + +#[derive(Debug, Clone)] +pub struct PartitionedSolverInitializer { + /// unit index + pub unit_index: usize, + /// the number of all vertices (including those partitioned into other serial modules) + pub vertex_num: VertexNum, + /// the number of all edges (including those partitioned into other serial modules) + pub edge_num: usize, + /// vertices exclusively owned by this partition; this part must be a continuous range + pub owning_range: VertexRange, + /// weighted edges, where the first vertex index is within the range [vertex_index_bias, vertex_index_bias + vertex_num) and + /// the second is either in [vertex_index_bias, vertex_index_bias + vertex_num) or inside + /// the second element in the tuple is the global edge index of the respective hyper_edge + pub weighted_edges: Vec<(HyperEdge, usize)>, + // /// (not sure whether we need it, just in case) + pub boundary_vertices: Vec, + /// whether this unit is boundary-unit + pub is_boundary_unit: bool, + /// all defect vertices (global index), not just for this unit + pub defect_vertices: BTreeSet, + // /// (not sure whether we need it, just in case) + // pub adjacent_partition_units: Vec, + // /// applicable when all the owning vertices are partitioned (i.e. this belongs to a fusion unit) + // pub owning_interface: Option, +} + +/// perform index transformation +#[allow(clippy::unnecessary_cast)] +pub fn build_old_to_new(reordered_vertices: &Vec) -> Vec> { + let mut old_to_new: Vec> = (0..reordered_vertices.len()).map(|_| None).collect(); + for (new_index, old_index) in reordered_vertices.iter().enumerate() { + assert_eq!(old_to_new[*old_index as usize], None, "duplicate vertex found {}", old_index); + old_to_new[*old_index as usize] = Some(new_index as VertexIndex); + } + old_to_new +} + +/// translate defect vertices into the current new index given reordered_vertices +#[allow(clippy::unnecessary_cast)] +pub fn translated_defect_to_reordered( + reordered_vertices: &Vec, + old_defect_vertices: &[VertexIndex], +) -> Vec { + let old_to_new = build_old_to_new(reordered_vertices); + old_defect_vertices + .iter() + .map(|old_index| old_to_new[*old_index as usize].unwrap()) + .collect() +} + + +#[cfg(test)] +pub mod tests { + use super::*; + + // #[test] + // fn util_test() { + // let x = VertexSet::new(0, 72, indices) + // } +} \ No newline at end of file diff --git a/src/visualize.rs b/src/visualize.rs index 92be5e09..5b8a1eeb 100644 --- a/src/visualize.rs +++ b/src/visualize.rs @@ -192,7 +192,6 @@ pub fn snapshot_combine_values(value: &mut serde_json::Value, mut value_2: serde *vertex = vertex_2.clone(); continue; } - // println!("vertex_idx: {vertex_idx}"); let vertex = vertex.as_object_mut().expect("each vertex must be an object"); let vertex_2 = vertex_2.as_object_mut().expect("each vertex must be an object"); // list known keys diff --git a/visualize/gui3d.js b/visualize/gui3d.js index 6dd1f3c2..40fcb517 100644 --- a/visualize/gui3d.js +++ b/visualize/gui3d.js @@ -144,7 +144,9 @@ if (!is_mock) { export function animate() { requestAnimationFrame(animate) - orbit_control.value.update() + if (orbit_control.value.enabled) { + orbit_control.value.update() + } renderer.render(scene, camera.value) if (stats) stats.update() } @@ -476,14 +478,6 @@ export async function refresh_snapshot_data() { } edge_caches = [] // clear cache for (let [i, edge] of snapshot.edges.entries()) { - // calculate the center point of all vertices - let sum_position = new THREE.Vector3(0, 0, 0) - for (let j = 0; j < edge.v.length; ++j) { - const vertex_index = edge.v[j] - const vertex_position = mwpf_data.positions[vertex_index] - sum_position = sum_position.add(compute_vector3(vertex_position)) - } - const center_position = sum_position.multiplyScalar(1 / edge.v.length) let local_edge_cache = [] edge_caches.push(local_edge_cache) while (edge_vec_meshes.length <= i) { @@ -494,11 +488,22 @@ export async function refresh_snapshot_data() { scene.remove(edge_vec_mesh[j]) } edge_vec_mesh.splice(0, edge_vec_mesh.length) // clear + if (edge == null) { + continue + } + // calculate the center point of all vertices + let sum_position = new THREE.Vector3(0, 0, 0) + for (let j = 0; j < edge.v.length; ++j) { + const vertex_index = edge.v[j] + const vertex_position = mwpf_data.positions[vertex_index] + sum_position = sum_position.add(compute_vector3(vertex_position)) + } + const center_position = sum_position.multiplyScalar(1 / edge.v.length) const edge_material = get_edge_material(edge.g, edge.w) const segmented_dual_indices = [] if (segmented.value && snapshot.dual_nodes != null) { // check the non-zero contributing dual variables for (let node_index of edge_to_dual_indices.value[i]) { - if (snapshot.dual_nodes[node_index].d != 0) { + if (node_index < snapshot.dual_nodes.length && snapshot.dual_nodes[node_index].d != 0) { segmented_dual_indices.push(node_index) } } @@ -645,7 +650,7 @@ export async function refresh_snapshot_data() { if (vertex.s) { vertex_outline_mesh.material = defect_vertex_outline_material } else if (vertex.v) { - vertex_outline_mesh.material = virtual_vertex_outline_material + vertex_outline_mesh.material = normal_vertex_outline_material // virtual_vertex_outline_material } else { vertex_outline_mesh.material = normal_vertex_outline_material }