diff --git a/Cargo.toml b/Cargo.toml index 520b537..ba8eb94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ version = "0.2.0" # Don't forget to update html_root_url in lib.rs. authors = ["Robin Freyler ", "Aaron Kutch "] license = "MIT/Apache-2.0" readme = "README.md" +edition = "2018" keywords = ["int", "integer", "precision", "arbitrary"] @@ -13,15 +14,17 @@ description = """Arbitrary precision integers library.""" categories = ["data-structures"] [dependencies] -smallvec = { version = "0.6.5" } -specialized-div-rem = { version = "0.0.2", optional = true } -rand = { version = "0.5.5", optional = true } -serde = { version = "1.0.75", optional = true } -serde_derive = { version = "1.0.75", optional = true } +smallvec = "0.6.10" +specialized-div-rem = { version = "0.0.5", optional = true } +rand = { version = "0.7.0", features = ["small_rng"], optional = true } + +serde = { version = "1.0.94", optional = true } +serde_derive = { version = "1.0.94", optional = true } [dev-dependencies] -serde_test = "1.0.75" -itertools = "0.7.8" +rand_xoshiro = "0.3.0" +serde_test = "1.0.94" +itertools = "0.8.0" [features] default = ["rand_support", "serde_support", "specialized-div-rem"] diff --git a/README.md b/README.md index 8d0545f..40024f3 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -ApInt - Arbitrary Precision Integer -=================================== +# ApInt - Arbitrary Precision Integer | Linux | Windows | Codecov | Coveralls | Docs | Crates.io | |:-------------------:|:-------------------:|:--------------------:|:--------------------:|:----------------:|:--------------------:| @@ -7,7 +6,7 @@ ApInt - Arbitrary Precision Integer **Development in progress:** *The implementation has not been finished and may not work.* -**A**rbitrary **p**recision **Int**egers (**ApInt**) represent integers that have an arbitrary but +**A**rbitrary **p**recision **Int**egers (**ApInt**) represent integers that have an arbitrary but fixed runtime bit-width and offers two's complement modulo arithmetic equal to machine integers. The integer types offered by this library are: @@ -24,19 +23,27 @@ The API is based on the LLVM [`APInt`](http://llvm.org/doxygen/classllvm_1_1APIn - SMT solvers may use this as an underlying model for the theory of bitvectors. - Operations and backend for cryptographic keys. - Also usable as a simple bitset with runtime length information. +- Use it like any other big integer library, with the difference that the user manages the bit + widths of all instances, and numbers can be purposely overflowed like machine integers. ## Internals The design focus is at efficiency and robustness. -`ApInt` instances are small-value-optimized. This means that only `ApInt` instances with a bit-width larger than 64 bits allocate dynamic memory. +An `ApInt` constists of a sequence of `Digit`s. +A `Digit` is currently `u64`, but in the future a feature could be added to allow changing this. +The `Digit` struct is never going to be made public for this reason and other internal reasons, +although the term is used often in documentation to convey what happens inside the public interface. +`ApInt` instances are small-value-optimized. This means that only `ApInt` instances with a bit-width +larger than the number of bits in a `Digit` allocate dynamic memory. -An `ApInt` constists of a sequence of 64-bit `Digit`s. -Computations are done within their 128-bit `DoubleDigit` form to prevent bit-loss on over- or underflows. -This implies a dependency on 128-bit integers which are currently unstable in Rust. +By default, very little `unsafe` is used outside of managing internal `union`s. The robustness of +`ApInt` operations is backed by extensive fuzz testing (including unit, regression, random input, +and edge case testing in multiple flag modes). ## Differences & Parallels -The below table lists public and internal differences between `ApInt` and `num::BigInt`. +The below table lists public and internal differences between `ApInt` and `num::BigInt`. Pointer +widths of 64 bits are assumed. | Topic | `num::BigInt` | `ApInt` | |:------------------------:|:------------------------------------------|:----------------------------------------| @@ -45,28 +52,29 @@ The below table lists public and internal differences between `ApInt` and `num:: | Small Value Optimization | No | Yes: Up to 64-bits. | | Building Blocks | 32-bit `BigDigit` aka `u32` | 64-bit `Digit` | | Compute Unit | 64-bit `DoubleBigDigit` aka `u64` | 128-bit `DoubleDigit` | -| Signed | Yes: `num::BigUint` is for unsigned. | No: Operations know signedness instead. | -| `mem::size_of<..>` | About 24 bytes + some signedness info. | Exactly 128 bits (16 bytes). | +| Signed | Yes: `num::BigInt` is for signed | No: Operations know signedness instead | +| `mem::size_of<..>` | About 24 bytes + some signedness info | Exactly 128 bits | | Width interoperability | No restriction to operate between `BigInt` instances with different bit-widths. | Only `ApInt` instances with the same bit-width can interoperate. | -| Memory footprint | Determined by current value stored. | Determined by bit-width. | -| Can grow and shrink? | Yes | No, see above. | -| Unstable features? | None | Stable as of Rust 1.26. | +| Memory footprint | Determined by current value stored | Determined by bit-width | +| Auto-resize | Yes | No | +| Unstable features? | None | Stable as of Rust 1.36 | ## Current State -Currently only a few parts of the implementation are done - especially the implementation of `ApInt`'s with bit-widths greater than 64 bits is incomplete. +Currently most of the implementation is done. Many parts could use performance or ergonomics +improvements however. State of the API modules implemented so far: | Module | Design | Implementation | Testing | TODO | |:-------------------:|:------:|:--------------:|:-------:|:----:| -| `arithmetic` | **done** | unfinished | unfinished | | +| `arithmetic` | **done** | **done** | **done** | | | `constructors` | **done** | **done** | **done** | | | `casting` | **done** | **done** | *not started* | issue [#4](https://github.com/Robbepop/apint/issues/4) | | `bitwise` | **done** | **done** | *not started* | | | `shift` | **done** | **done** | **done** | | -| `relational` | **done** | **done** | *not started* | | -| `utils` | **done** | **done** | *not started* | | +| `relational` | **done** | **done** | unfinished | | +| `utils` | **done** | **done** | unfinished | | | `serialization` | **done** | unfinished | unfinished | depends on `arithmetic` | | `to_primitive` | **done** | **done** | **done** | | | `serde_impl` (opt.) | **done** | **done** | **done** | | @@ -75,16 +83,21 @@ State of the API modules implemented so far: ## Planned Features - Full and efficient `ApInt` implementation and decent test coverage. -- Mid-level `ApsInt` wrapper around `ApInt` that stores a run-time sign information. - This is different from `Int` and `UInt` since those types store - their sign immutable in their type. This is the same as LLVM's `APSInt` data type. +- Mid-level `ApsInt` wrapper around `ApInt` that stores a run-time sign information. This is the + same as LLVM's `APSInt` data type. These allow for constant time complexity changes in sign and + more efficient operations on negative numbers in some cases. +- Low level unsafe functions that have no bounds checking, allow for `ApInt`s of different bit + widths to be operated on, and have access to reusing internal allocations for calculations that + require allocated temporaries. +- More efficient algorithms. ## License Licensed under either of - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +- Apache License, Version 2.0, + ([LICENSE-APACHE](LICENSE-APACHE) or ) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or ) at your option. @@ -92,8 +105,8 @@ at your option. ### Contribution -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the +work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. [1]: https://travis-ci.org/Robbepop/apint.svg?branch=master @@ -123,6 +136,14 @@ additional terms or conditions. ## Release Notes +### Version 0.3.0 TODO + +- Rename many functions from `_checked_` to `_wrapping_` and clarified documentation. +- Added multiplication and division functions. +- Add circular shift functions like `rotate_left_assign`. +- Removed `Bit`, changed `ApInt::from_bit` to `ApInt::from_bool`. +- Fixes a wide variety of internal technical debts, and fixed several bugs. + ### Version 0.2.0 - 2018-05-16 - Add `Binary`, `LowerHex` and `UpperHex` impls to `Int`, `UInt` and `ApInt`. @@ -134,7 +155,9 @@ additional terms or conditions. - Add `into_bitnot` to `ApInt`, `Int` and `UInt`. - Add division-by-zero error and managing around it for respective operations. - Add a crate prelude module for simple usage of commonly used types. -- Fixed bug in `ApInt::sign_extend` and `Int::extend` (issue [#15](https://github.com/Robbepop/apint/issues/15)). Thanks [AaronKutch](https://github.com/AaronKutch) for reporting! +- Fixed bug in `ApInt::sign_extend` and `Int::extend` + (issue [#15](https://github.com/Robbepop/apint/issues/15)). + Thanks [AaronKutch](https://github.com/AaronKutch) for reporting! - Fixed markdown headers of many public impl blocks. - Fixed several documentation comments of public APIs, like `ApInt::from_{i128, u128}`. - Fixed several minor bugs due to forwarding to wrong implementation methods. diff --git a/src/apint/arithmetic.rs b/src/apint/arithmetic.rs deleted file mode 100644 index 45b6d61..0000000 --- a/src/apint/arithmetic.rs +++ /dev/null @@ -1,2328 +0,0 @@ -use apint::{ApInt}; -use apint::utils::DataAccessMut; -use apint::utils::{ZipDataAccessMutSelf::{Inl, Ext},ZipDataAccessMutBoth}; -use traits::{Width}; -use errors::{DivOp, Error, Result}; -use digit; -use digit::{Digit, DoubleDigit}; -use utils::{try_forward_bin_mut_impl, forward_mut_impl}; - -use std::ops::{ - Neg, - Add, - Sub, - Mul, - AddAssign, - SubAssign, - MulAssign -}; - -/// # Basic Arithmetic Operations -/// -/// **Note**: unless otherwise noted in the function specific documentation, -/// -/// - The functions do **not** allocate memory. -/// - The function works for both signed and unsigned interpretations of an `ApInt`. In other words, in the low-level bit-wise representation there is no difference between a signed and unsigned operation by a certain function on fixed bit-width integers. (Cite: LLVM) -impl ApInt { - /// Increments this `ApInt` by one inplace. - pub fn wrapping_inc(&mut self) { - match self.access_data_mut() { - DataAccessMut::Inl(x) => { - *x = x.wrapping_add(Digit::one()); - } - DataAccessMut::Ext(x) => { - for i in 0..x.len() { - match x[i].overflowing_add(Digit::one()) { - (v,false) => { - x[i] = v; - break; - } - (v,true) => { - //if the ApInt was relatively random this should rarely happen - x[i] = v; - } - } - } - } - } - self.clear_unused_bits(); - } - - /// Increments this `ApInt` by one and returns the result. - pub fn into_wrapping_inc(self) -> ApInt { - forward_mut_impl(self, ApInt::wrapping_inc) - } - - /// Decrements this `ApInt` by one inplace. - pub fn wrapping_dec(&mut self) { - match self.access_data_mut() { - DataAccessMut::Inl(x) => { - *x = x.wrapping_sub(Digit::one()); - } - DataAccessMut::Ext(x) => { - for i in 0..x.len() { - match x[i].overflowing_sub(Digit::one()) { - (v,false) => { - x[i] = v; - break; - } - (v,true) => { - //if the ApInt was relatively random this should rarely happen - x[i] = v; - } - } - } - } - } - self.clear_unused_bits(); - } - - /// Decrements this `ApInt` by one and returns the result. - pub fn into_wrapping_dec(self) -> ApInt { - forward_mut_impl(self, ApInt::wrapping_dec) - } - - /// Negates this `ApInt` inplace. - pub fn wrapping_neg(&mut self) { - self.bitnot(); - self.wrapping_inc(); - //`wrapping_inc` handles clearing the unused bits - } - - /// Negates this `ApInt` and returns the result. - pub fn into_wrapping_neg(self) -> ApInt { - forward_mut_impl(self, ApInt::wrapping_neg) - } - - /// Add-assigns `rhs` to `self` inplace. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn wrapping_add_assign(&mut self, rhs: &ApInt) -> Result<()> { - match self.zip_access_data_mut_self(rhs)? { - Inl(lhs, rhs) => { - *lhs = lhs.wrapping_add(rhs); - } - Ext(lhs, rhs) => { - let (temp, mut carry) = lhs[0].carrying_add(rhs[0]); - lhs[0] = temp; - for i in 1..lhs.len() { - let temp = lhs[i].dd() - .wrapping_add(rhs[i].dd()) - .wrapping_add(carry.dd()); - lhs[i] = temp.lo(); - carry = temp.hi(); - } - } - } - self.clear_unused_bits(); - Ok(()) - } - - /// Adds `rhs` to `self` and returns the result. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn into_wrapping_add(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_add_assign) - } - - /// Add-assigns `rhs` to `self` inplace, and returns a boolean indicating if overflow occured, - /// according to the **unsigned** interpretation of overflow. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - // TODO: add tests - #[allow(dead_code)] - pub(crate) fn overflowing_uadd_assign(&mut self, rhs: &ApInt) -> Result { - match self.width().excess_bits() { - Some(excess) => { - let mask = Digit::all_set() >> excess; - match self.zip_access_data_mut_self(rhs)? { - Inl(lhs, rhs) => { - let temp = lhs.wrapping_add(rhs); - *lhs = temp & mask; - //excess bits are cleared by the mask - return Ok((temp & mask) != temp) - } - Ext(lhs, rhs) => { - let (temp, mut carry) = lhs[0].carrying_add(rhs[0]); - lhs[0] = temp; - for i in 1..(lhs.len() - 1) { - let temp = lhs[i].dd() - .wrapping_add(rhs[i].dd()) - .wrapping_add(carry.dd()); - lhs[i] = temp.lo(); - carry = temp.hi(); - } - let temp = lhs[lhs.len() - 1] - .wrapping_add(rhs[lhs.len() - 1]) - .wrapping_add(carry); - lhs[lhs.len() - 1] = temp & mask; - //excess bits are cleared by the mask - return Ok((temp & mask) != temp) - } - } - } - None => { - match self.zip_access_data_mut_self(rhs)? { - Inl(lhs, rhs) => { - let temp = lhs.overflowing_add(rhs); - *lhs = temp.0; - //no excess bits to clear - return Ok(temp.1) - } - Ext(lhs, rhs) => { - let (temp, mut carry) = lhs[0].carrying_add(rhs[0]); - lhs[0] = temp; - for i in 1..lhs.len() { - let temp = lhs[i].dd() - .wrapping_add(rhs[i].dd()) - .wrapping_add(carry.dd()); - lhs[i] = temp.lo(); - carry = temp.hi(); - } - //no excess bits to clear - return Ok(carry != Digit::zero()) - } - } - } - } - } - - /// Add-assigns `rhs` to `self` inplace, and returns a boolean indicating if overflow occured, - /// according to the **signed** interpretation of overflow. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - // TODO: add tests - #[allow(dead_code)] - pub(crate) fn overflowing_sadd_assign(&mut self, rhs: &ApInt) -> Result { - let self_sign = self.is_negative(); - let rhs_sign = rhs.is_negative(); - self.wrapping_add_assign(rhs)?; - Ok((self_sign == rhs_sign) && (self_sign != self.is_negative())) - } - - /// Subtract-assigns `rhs` from `self` inplace. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn wrapping_sub_assign(&mut self, rhs: &ApInt) -> Result<()> { - match self.zip_access_data_mut_self(rhs)? { - Inl(lhs, rhs) => { - *lhs = lhs.wrapping_sub(rhs); - } - Ext(lhs, rhs) => { - let (temp, mut carry) = lhs[0].dd() - .wrapping_add((!rhs[0]).dd()) - .wrapping_add(Digit::one().dd()).lo_hi(); - lhs[0] = temp; - for i in 1..lhs.len() { - let temp = lhs[i].dd() - .wrapping_add((!rhs[i]).dd()) - .wrapping_add(carry.dd()); - lhs[i] = temp.lo(); - carry = temp.hi(); - } - } - } - self.clear_unused_bits(); - Ok(()) - } - - /// Subtracts `rhs` from `self` and returns the result. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn into_wrapping_sub(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_sub_assign) - } - - /// Multiply-assigns `rhs` to `self` inplace. This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - /// - /// # Performance - /// - /// If the function detects a large number of leading zeros in front of the most significant - /// 1 bit, it will apply optimizations so that wasted multiplications and additions of zero are - /// avoided. This function is designed to efficiently handle 5 common kinds of multiplication. - /// Small here means both small ApInt `BitWidth` and/or small **unsigned** numerical - /// significance. (Signed multiplication works, but two's complement negative numbers may have a - /// large number of leading ones, leading to potential inefficiency.) - /// - /// - multiplication of zero by any size integer (no allocation) - /// - multiplication of small (<= 1 `Digit`) integers (no allocation) - /// - wrapping multiplication of medium size (<= 512 bits) integers - /// - multiplication of medium size integers that will not overflow - /// - multiplication of small integers by large integers (or large integers multiplied by small - /// integers) (no allocation) - /// - /// Currently, Karatsuba multiplication is not implemented, so large integer multiplication - /// may be very slow compared to other algorithms. According to Wikipedia, Karatsuba algorithms - /// outperform 𝒪(n^2) algorithms, starting around 320-640 bits. - pub fn wrapping_mul_assign(&mut self, rhs: &ApInt) -> Result<()> { - match self.zip_access_data_mut_self(rhs)? { - Inl(lhs, rhs) => { - *lhs = lhs.wrapping_mul(rhs); - } - Ext(lhs, rhs) => { - //finds the most significant nonzero digit (for later optimizations) and handles - //early return of multiplication by zero. - let rhs_sig_nonzero: usize = match rhs.iter().rposition(|x| x != &Digit::zero()) { - Some(x) => x, - None => { - for x in lhs.iter_mut() { - x.unset_all() - } - return Ok(()); - } - }; - let lhs_sig_nonzero: usize = match lhs.iter().rposition(|x| x != &Digit::zero()) { - Some(x) => x, - None => { - for x in lhs.iter_mut() { - x.unset_all() - } - return Ok(()); - } - }; - //for several routines below there was a nested loop that had its first and last - //iterations unrolled (and the unrolled loops had their first and last iterations - //unrolled), and then some if statements are added for digit overflow checks. - //This is done because the compiler probably cannot properly unroll the carry - //system, overflow system, and figure out that only `Digit` multiplications were - //needed instead of `DoubleDigit` multiplications in some places. - match (lhs_sig_nonzero == 0, rhs_sig_nonzero == 0) { - (false, false) => { - let lhs_sig_bits = (lhs_sig_nonzero * digit::BITS) - + (digit::BITS - (lhs[lhs_sig_nonzero].leading_zeros() as usize)); - let rhs_sig_bits = (rhs_sig_nonzero * digit::BITS) - + (digit::BITS - (rhs[rhs_sig_nonzero].leading_zeros() as usize)); - let tot_sig_bits = lhs_sig_bits + rhs_sig_bits; - if tot_sig_bits <= (lhs.len() * digit::BITS) { - //No possibility of `Digit` wise overflow. Note that end bits still - //have to be trimmed for `ApInt`s with a width that is not a multiple of - //`Digit`s. - //first digit of first row - let mult = lhs[0]; - let temp = mult.carrying_mul(rhs[0]); - //middle digits of first row - //the goal here with `sum` is to allocate and initialize it only once - //here. - let mut sum = Vec::with_capacity(lhs_sig_nonzero + rhs_sig_nonzero + 2); - sum.push(temp.0); - let mut mul_carry = temp.1; - for rhs_i in 1..rhs_sig_nonzero { - let temp = mult.carrying_mul_add(rhs[rhs_i], mul_carry); - sum.push(temp.0); - mul_carry = temp.1; - } - let temp = mult.carrying_mul_add(rhs[rhs_sig_nonzero], mul_carry); - sum.push(temp.0); - sum.push(temp.1); - //middle rows - for lhs_i in 1..lhs_sig_nonzero { - let mult = lhs[lhs_i]; - //first digit of this row - let temp0 = mult.carrying_mul(rhs[0]); - let mut mul_carry = temp0.1; - let temp1 = sum[lhs_i].carrying_add(temp0.0); - sum[lhs_i] = temp1.0; - let mut add_carry = temp1.1; - //middle digits of this row - for rhs_i in 1..rhs_sig_nonzero { - let temp0 = mult.carrying_mul_add(rhs[rhs_i], mul_carry); - mul_carry = temp0.1; - let temp1: DoubleDigit = sum[lhs_i + rhs_i].dd() - .wrapping_add(temp0.0.dd()) - .wrapping_add(add_carry.dd()); - sum[lhs_i + rhs_i] = temp1.lo(); - add_carry = temp1.hi(); - } - //final digits of this row - let temp0 = mult.carrying_mul_add(rhs[rhs_sig_nonzero],mul_carry); - let temp1: DoubleDigit = sum[lhs_i + rhs_sig_nonzero].dd() - .wrapping_add(temp0.0.dd()) - .wrapping_add(add_carry.dd()); - sum[lhs_i + rhs_sig_nonzero] = temp1.lo(); - sum.push(temp1.hi().wrapping_add(temp0.1)); - } - let mult = lhs[lhs_sig_nonzero]; - //first digit of final row - let temp0 = mult.carrying_mul(rhs[0]); - let mut mul_carry = temp0.1; - let temp1 = sum[lhs_sig_nonzero].carrying_add(temp0.0); - sum[lhs_sig_nonzero] = temp1.0; - let mut add_carry = temp1.1; - //middle digits of final row - for rhs_i in 1..rhs_sig_nonzero { - let temp0 = mult.carrying_mul_add(rhs[rhs_i], mul_carry); - mul_carry = temp0.1; - let temp1: DoubleDigit = sum[lhs_sig_nonzero + rhs_i].dd() - .wrapping_add(temp0.0.dd()) - .wrapping_add(add_carry.dd()); - sum[lhs_sig_nonzero + rhs_i] = temp1.lo(); - add_carry = temp1.hi(); - } - let temp0 = mult.carrying_mul_add(rhs[rhs_sig_nonzero], mul_carry); - let temp1: DoubleDigit = sum[lhs_sig_nonzero + rhs_sig_nonzero].dd() - .wrapping_add(temp0.0.dd()) - .wrapping_add(add_carry.dd()); - sum[lhs_sig_nonzero + rhs_sig_nonzero] = temp1.lo(); - sum.push(temp1.hi().wrapping_add(temp0.1)); - if lhs.len() < sum.len() { - for i in 0..lhs.len() { - lhs[i] = sum[i]; - } - } else { - for i in 0..sum.len() { - lhs[i] = sum[i]; - } - } - } else { - //wrapping (modular) multiplication - let sig_nonzero = lhs.len() - 1; - //first digit done and carry - let temp = lhs[0].carrying_mul(rhs[0]); - //the goal here with `sum` is to allocate and initialize it only once - //here. - //first row - let mut sum = Vec::with_capacity(lhs.len()); - sum.push(temp.0); - let mut mul_carry = temp.1; - for rhs_i in 1..sig_nonzero { - let temp = lhs[0].carrying_mul_add(rhs[rhs_i], mul_carry); - sum.push(temp.0); - mul_carry = temp.1; - } - //final digit of first row - sum.push(lhs[0].wrapping_mul_add(rhs[sig_nonzero], mul_carry)); - //middle rows - for lhs_i in 1..sig_nonzero { - //first digit of this row - let temp0 = lhs[lhs_i].carrying_mul(rhs[0]); - mul_carry = temp0.1; - let temp1 = sum[lhs_i].carrying_add(temp0.0); - //sum[lhs_i] does not need to be used again - sum[lhs_i] = temp1.0; - let mut add_carry = temp1.1; - //as we get to the higher lhs digits, the higher rhs digits do not - //need to be considered - let rhs_i_upper = sig_nonzero.wrapping_sub(lhs_i); - //middle digits of this row - for rhs_i in 1..rhs_i_upper { - let temp0 = lhs[lhs_i].carrying_mul_add(rhs[rhs_i], mul_carry); - mul_carry = temp0.1; - let temp1: DoubleDigit = sum[lhs_i + rhs_i].dd() - .wrapping_add(temp0.0.dd()) - .wrapping_add(add_carry.dd()); - sum[lhs_i + rhs_i] = temp1.lo(); - add_carry = temp1.hi(); - } - //final digit of this row - sum[sig_nonzero] = lhs[lhs_i] - .wrapping_mul(rhs[rhs_i_upper]) - .wrapping_add(mul_carry) - .wrapping_add(sum[sig_nonzero]) - .wrapping_add(add_carry); - } - for i in 0..sig_nonzero { - lhs[i] = sum[i]; - } - //final digit (the only one in its row) - lhs[sig_nonzero] = lhs[sig_nonzero] - .wrapping_mul_add(rhs[0], sum[sig_nonzero]); - } - }, - (true, false) => { - let mult = lhs[0]; - //first digit done and carry - let temp = mult.carrying_mul(rhs[0]); - lhs[0] = temp.0; - let mut mul_carry = temp.1; - //middle of row - for rhs_i in 1..rhs_sig_nonzero { - let temp = mult.carrying_mul_add(rhs[rhs_i], mul_carry); - lhs[rhs_i] = temp.0; - mul_carry = temp.1; - } - //final digit - if rhs_sig_nonzero == lhs.len() - 1 { - lhs[rhs_sig_nonzero] = mult - .wrapping_mul_add(rhs[rhs_sig_nonzero], mul_carry); - } else { - let temp = mult.carrying_mul_add(rhs[rhs_sig_nonzero], mul_carry); - lhs[rhs_sig_nonzero] = temp.0; - lhs[rhs_sig_nonzero + 1] = temp.1; - } - }, - (false, true) => { - //first digit done and carry - let temp = rhs[0].carrying_mul(lhs[0]); - lhs[0] = temp.0; - let mut mul_carry = temp.1; - //middle of row - for lhs_i in 1..lhs_sig_nonzero { - let temp = rhs[0].carrying_mul_add(lhs[lhs_i], mul_carry); - lhs[lhs_i] = temp.0; - mul_carry = temp.1; - } - //final digit - if lhs_sig_nonzero == lhs.len() - 1 { - lhs[lhs_sig_nonzero] = rhs[0] - .wrapping_mul_add(lhs[lhs_sig_nonzero], mul_carry); - } else { - let temp = rhs[0].carrying_mul_add(lhs[lhs_sig_nonzero], mul_carry); - lhs[lhs_sig_nonzero] = temp.0; - lhs[lhs_sig_nonzero + 1] = temp.1; - } - }, - (true, true) => { - let temp0 = lhs[0].carrying_mul(rhs[0]); - lhs[0] = temp0.0; - lhs[1] = temp0.1; - } - } - } - } - self.clear_unused_bits(); - Ok(()) - } - - /// Multiplies `rhs` with `self` and returns the result. This function **may** allocate memory. - /// Note: see `wrapping_mul_assign` for more information. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn into_wrapping_mul(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_mul_assign) - } -} - -/// # Division Operations -/// -/// **Note**: unless otherwise noted in the function specific documentation, -/// -/// - The functions do **not** allocate memory. -/// - The function works for both signed and unsigned interpretations of an `ApInt`. In other words, -/// in the low-level bit-wise representation there is no difference between a signed and -/// unsigned operation by the function on fixed bit-width integers. (Cite: LLVM) -/// -/// In almost all integer division algorithms where "just" the quotient is calculated, the remainder -/// is also produced and actually exists in memory (or at least is only one O(n) operation away) -/// prior to being dropped or overwritten, and vice versa for remainder only calculations. Note here -/// that functions with `div` in their names (e.g. `wrapping_div`) should really be called `quo` -/// (quotient) functions, because the division process produces both the quotient and remainder. -/// However, to stay with Rust's naming scheme we have kept `div` naming. The instruction for -/// division on many CPUs sets registers to both results of the division process, and compilers will -/// detect if code uses both results and only use one division instruction. There is no such -/// detection for `ApInt`s, and thus the `divrem` and `remdiv` type instructions exist to explicitly -/// use just one division function for both results. -/// -/// ## Performance -/// -/// All of the division functions in this `impl` quickly check for various edge cases and use an -/// efficient algorithm for these cases. -/// Small here means both small ApInt `BitWidth` and/or small **unsigned** numerical significance. -/// (Signed division works, but two's complement negative numbers may have a large number of -/// leading ones, leading to potential inefficiency.) -/// -/// - division of zero by any size integer (no allocation) -/// - division of small (1 `Digit`) integers (no allocation) -/// - any division that will lead to the quotient being zero or one (no allocation) -/// - division of any integer by small (1 `Digit`) very small (0.5 `Digit`) integers (no allocation) -/// - division where the number of leading zeros of both arguments are within one `Digit` (less -/// allocation than what long division normally requires) -/// - during long division, the algorithm may encounter a case from above and will use that instead -/// - division of medium size (<= 512 bits) integers -/// -/// Currently, algorithms faster than 𝒪(n^2) are not implemented, so large integer division may be -/// very slow compared to other algorithms. -impl ApInt { - //Note: the invariant of `ApInt`s where unused bits beyond the bit width must be all zero is - //used heavily here, so that no `clear_unused_bits` needs to be used. - - /// This function is intended to be inlined into all of the unsigned quotient and remainder - /// functions for optimal assembly. - /// `duo` is divided by `div`, and the quotient is assigned to `duo` and remainder assigned - /// to `div` - /// `false` is returned if division by zero happened. Nothing is modified in the case of - /// division by zero. - #[inline] - pub(crate) fn aarons_algorithm_divrem(duo: &mut [Digit], div: &mut [Digit]) -> bool { - //Some parts were put into their own functions and macros because indentation levels were - //getting too high, even for me. - - //The algorithm here is just like the algorithm in - //https://github.com/AaronKutch/specialized-div-rem, - //except that there are more branches and preconditions. There are comments in this function - //such as `//quotient is 0 or 1 check` which correspond to comments in that function. - - //assumptions: - // *ini_duo_sd > 0 - // *div_sd == 0 - //modifies `duo` to produce the quotient and returns the remainder - #[inline(always)] - fn large_div_by_small(duo: &mut [Digit], ini_duo_sd: usize, div: &mut [Digit]) { - let div_small = div[0]; - let (mut quo,mut rem) = duo[ini_duo_sd].wrapping_divrem(div_small); - duo[ini_duo_sd] = quo; - for duo_sd_sub1 in (0..ini_duo_sd).rev() { - let duo_double = DoubleDigit::from_lo_hi(duo[duo_sd_sub1],rem); - let temp = duo_double.wrapping_divrem(div_small.dd()); - //the high part is guaranteed to zero out when this is subtracted, - //so only the low parts need to be calculated - quo = temp.0.lo(); - rem = temp.1.lo(); - duo[duo_sd_sub1] = quo; - } - div[0] = rem; - } - - //assumptions: - // *ini_duo_sd > 0 - // *div_sd == 0 - // *div[0].leading_zeros >= 32 - #[inline(always)] - fn large_div_by_u32(duo: &mut [Digit], ini_duo_sd: usize, div: &mut [Digit]) { - let div_u32 = div[0].repr() as u32; - #[inline(always)] - fn dd(x: u32) -> Digit {Digit(u64::from(x))} - #[inline(always)] - fn lo(x: Digit) -> u32 {x.repr() as u32} - #[inline(always)] - fn hi(x: Digit) -> u32 {(x.repr() >> 32) as u32} - #[inline(always)] - fn from_lo_hi(lo: u32, hi: u32) -> Digit {Digit(u64::from(lo) | (u64::from(hi) << 32))} - #[inline(always)] - fn wrapping_divrem(x: u32, y: u32) -> (u32,u32) {(x.wrapping_div(y),x.wrapping_rem(y))} - let (mut quo_hi,mut rem_hi) = wrapping_divrem(hi(duo[ini_duo_sd]),div_u32); - let duo_double = from_lo_hi(lo(duo[ini_duo_sd]), rem_hi); - let temp = duo_double.wrapping_divrem(dd(div_u32)); - let mut quo_lo = lo(temp.0); - let mut rem_lo = lo(temp.1); - duo[ini_duo_sd] = from_lo_hi(quo_lo,quo_hi); - for duo_sd_sub1 in (0..ini_duo_sd).rev() { - let duo_double_hi = from_lo_hi(hi(duo[duo_sd_sub1]),rem_lo); - let temp_hi = duo_double_hi.wrapping_divrem(dd(div_u32)); - quo_hi = lo(temp_hi.0); - rem_hi = lo(temp_hi.1); - let duo_double_lo = from_lo_hi(lo(duo[duo_sd_sub1]),rem_hi); - let temp_lo = duo_double_lo.wrapping_divrem(dd(div_u32)); - quo_lo = lo(temp_lo.0); - rem_lo = lo(temp_lo.1); - duo[duo_sd_sub1] = from_lo_hi(quo_lo,quo_hi); - } - div[0] = Digit(rem_lo as u64); - } - - // modifies the `$array` to be the two's complement of itself, all the way up to a `$len` - // number of digits. - macro_rules! twos_complement { - ($len:expr, $array:ident) => { - for i0 in 0..$len { - let bitnot = !$array[i0]; - match bitnot.overflowing_add(Digit::one()) { - (v,false) => { - $array[i0] = v; - for i1 in (i0 + 1)..$len { - $array[i1] = !$array[i1] - } - break; - } - (v,true) => { - $array[i0] = v; - } - } - } - }; - } - - // uge stands for "unsigned greater or equal to" - // This checks for `$lhs >= $rhs` (checking only up to $lhs_len and $rhs_len respectively), - // and runs `$ge_branch` if true and `$ln_branch` otherwise - macro_rules! uge { - ($lhs_len:expr, - $lhs:ident, - $rhs_len:expr, - $rhs:ident, - $ge_branch:block, - $ln_branch:block) => { - let mut b0 = false; - //allows lhs.len() to be smaller than rhs.len() - for i in ($lhs_len..$rhs_len).rev() { - if $rhs[i] != Digit::zero() { - b0 = true; - break - } - } - if b0 || ({ - let mut b1 = false; - for i in (0..$lhs_len).rev() { - if $lhs[i] < $rhs[i] { - b1 = true; - break - } else if $lhs[i] != $rhs[i] { - break - } - } - b1 - }) {$ln_branch} else {$ge_branch} - }; - } - - //ugt stands for "unsigned greater than" - // This checks for `$lhs > $rhs` (checking only up to $lhs_len and $rhs_len respectively), - // and runs `$gt_branch` if true and `$le_branch` otherwise - macro_rules! ugt { - ($lhs_len:expr, - $lhs:ident, - $rhs_len:expr, - $rhs:ident, - $gt_branch:block, - $le_branch:block) => { - let mut b0 = false; - //allows lhs.len() to be smaller than rhs.len() - for i in ($lhs_len..$rhs_len).rev() { - if $rhs[i] != Digit::zero() { - b0 = true; - break - } - } - if b0 || ({ - let mut b1 = true; - for i in (0..$lhs_len).rev() { - if $lhs[i] > $rhs[i] { - b1 = false; - break - } else if $lhs[i] != $rhs[i] { - break - } - } - b1 - }) {$le_branch} else {$gt_branch} - }; - } - - //assigns `$sum + $sub` to `$target`, - //and zeros out `$sum` except for it sets `$sum[0]` to `$val` - macro_rules! special0 { - ($len:expr,$sum:ident,$sub:ident,$target:ident,$val:expr) => {{ - //subtraction (`sub` is the two's complement of some value) - let (sum, mut carry) = $sum[0].carrying_add($sub[0]); - $target[0] = sum; - $sum[0] = $val; - for i in 1..($len-1) { - let temp = $sum[i].dd() - .wrapping_add($sub[i].dd()) - .wrapping_add(carry.dd()); - $target[i] = temp.lo(); - $sum[i].unset_all(); - carry = temp.hi(); - } - $target[$len-1] = $sum[$len-1] - .wrapping_add($sub[$len-1]) - .wrapping_add(carry); - $sum[$len-1].unset_all(); - }} - } - - //assigns `$sum + $sub` to `$target`, - //and assigns `$val + $add` to `$sum` - macro_rules! special1 { - ($len:expr,$sum:ident,$sub:ident,$target:ident,$val:expr,$add:ident) => {{ - //subtraction (`sub` is the two's complement of some value) - let (temp, mut carry) = $sum[0].carrying_add($sub[0]); - $target[0] = temp; - for i in 1..($len-1) { - let temp = $sum[i].dd() - .wrapping_add($sub[i].dd()) - .wrapping_add(carry.dd()); - $target[i] = temp.lo(); - carry = temp.hi(); - } - $target[$len-1] = $sum[$len-1] - .wrapping_add($sub[$len-1]) - .wrapping_add(carry); - let (temp, mut carry) = $add[0].carrying_add($val); - $sum[0] = temp; - for i0 in 1..$len { - if carry == Digit::zero() { - for i1 in i0..$len { - $sum[i1] = $add[i1]; - break - } - } - let temp = $add[i0].carrying_add(carry); - $sum[i0] = temp.0; - carry = temp.1; - } - }} - } - - //assigns `$sum + $add` to `$sum` - macro_rules! add { - ($len:expr,$sum:ident,$add:ident) => {{ - let (sum, mut carry) = $sum[0].carrying_add($add[0]); - $sum[0] = sum; - for i in 1..($len-1) { - let temp = $sum[i].dd() - .wrapping_add($add[i].dd()) - .wrapping_add(carry.dd()); - $sum[i] = temp.lo(); - carry = temp.hi(); - } - $sum[$len-1] = $sum[$len-1] - .wrapping_add($add[$len-1]) - .wrapping_add(carry); - }} - } - - //assumes that: - //ini_duo_sd > 1 - //div_sd > 1 - #[inline(always)] - fn large_div_by_large( - len: usize, //equal to the length of `duo` and `div`, must be > 2 - duo: &mut [Digit], //the dividend which will become the quotient - ini_duo_sd: usize, //the initial most significant digit of `duo` - div: &mut [Digit], //the divisor which will become the remainder - div_sd: usize //the most significant digit of `div` - ) { - let ini_duo_lz = duo[ini_duo_sd].leading_zeros() as usize; - let div_lz = div[div_sd].leading_zeros() as usize; - //number of significant bits - let ini_duo_sb = (ini_duo_sd * digit::BITS) + (digit::BITS - (ini_duo_lz as usize)); - let div_sb = (div_sd * digit::BITS) + (digit::BITS - div_lz); - //quotient is 0 precheck - if ini_duo_sb < div_sb { - //the quotient should be 0 and remainder should be `duo` - for i in 0..=ini_duo_sd { - div[i] = duo[i]; - duo[i].unset_all(); - } - for i in (ini_duo_sd + 1)..=div_sd { - div[i].unset_all(); - } - return - } - //quotient is 0 or 1 check - if ini_duo_sb == div_sb { - let place = ini_duo_sd + 1; - uge!(place,duo,place,div, - { - twos_complement!(place,div); - special0!(place,duo,div,div,Digit::one()); - return - }, - { - for i in 0..=ini_duo_sd { - div[i] = duo[i]; - duo[i].unset_all(); - } - for i in place..=div_sd { - div[i].unset_all(); - } - return - } - ); - } - let ini_bits = ini_duo_sb - div_sb; - //difference between the places of the significant bits - if ini_bits < digit::BITS { - //the `mul` or `mul - 1` algorithm - let (duo_sig_dd, div_sig_dd) = if ini_duo_lz == 0 { - //avoid shr overflow - ( - DoubleDigit::from_lo_hi(duo[ini_duo_sd - 1], duo[ini_duo_sd]), - DoubleDigit::from_lo_hi(div[ini_duo_sd - 1], div[ini_duo_sd]) - ) - } else { - ( - (duo[ini_duo_sd].dd() << (ini_duo_lz + digit::BITS)) | - (duo[ini_duo_sd - 1].dd() << ini_duo_lz) | - (duo[ini_duo_sd - 2].dd() >> (digit::BITS - ini_duo_lz)), - (div[ini_duo_sd].dd() << (ini_duo_lz + digit::BITS)) | - (div[ini_duo_sd - 1].dd() << ini_duo_lz) | - (div[ini_duo_sd - 2].dd() >> (digit::BITS - ini_duo_lz)) - ) - }; - let mul = duo_sig_dd.wrapping_div(div_sig_dd).lo(); - //Allocation could be avoided but it would involve more long division to recover - //`div`. - //this will become `-(div * mul)` - let mut sub: Vec = Vec::with_capacity(len); - //first digit done and carry - let temp = mul.carrying_mul(div[0]); - sub.push(temp.0); - let mut carry = temp.1; - //middle of row - for i in 1..div_sd { - let temp = mul.carrying_mul_add(div[i], carry); - sub.push(temp.0); - carry = temp.1; - } - //final digit, test for `div * mul > duo`, and then form the two's complement - if div_sd == len - 1 { - let temp = mul.carrying_mul_add(div[div_sd], carry); - sub.push(temp.0); - if temp.1 != Digit::zero() { - //overflow - //the quotient should be `mul - 1` and remainder should be - //`duo + (div - div*mul)` - twos_complement!(len, sub); - add!(len,sub,div); - special0!(len,duo,sub,div,mul.wrapping_sub(Digit::one())); - return - } - //if `div * mul > duo` - ugt!(len,sub,len,duo, - { - twos_complement!(len, sub); - add!(len,sub,div); - special0!(len,duo,sub,div,mul.wrapping_sub(Digit::one())); - return - }, - { - //the quotient is `mult` and remainder is `duo - (div * mult)` - twos_complement!(len, sub); - special0!(len,duo,sub,div,mul); - return - } - ); - } else { - let temp = mul.carrying_mul_add(div[div_sd], carry); - sub.push(temp.0); - sub.push(temp.1); - for _ in sub.len()..len { - sub.push(Digit::zero()); - } - //if `div * mul > duo` - ugt!(len,sub,len,duo, - { - twos_complement!(len, sub); - add!(len,sub,div); - special0!(len,duo,sub,div,mul.wrapping_sub(Digit::one())); - return - }, - { - //the quotient is `mult` and remainder is `duo - (div * mult)` - twos_complement!(len, sub); - special0!(len,duo,sub,div,mul); - return - } - ); - } - } - let mut duo_sd = ini_duo_sd; - let mut duo_lz = ini_duo_lz; - //the number of lesser significant digits and bits not a part of `div_sig_d` - let div_lesser_bits = digit::BITS - (div_lz as usize) + (digit::BITS * (div_sd - 1)); - //the most significant `Digit` bits of div - let div_sig_d = if div_lz == 0 { - div[div_sd] - } else { - (div[div_sd] << div_lz) | (div[div_sd - 1] >> (digit::BITS - div_lz)) - }; - //has to be a `DoubleDigit` in case of overflow - let div_sig_d_add1 = div_sig_d.dd().wrapping_add(Digit::one().dd()); - let mut duo_lesser_bits; - let mut duo_sig_dd; - //TODO: fix sizes here and below - let quo_potential = len; - //if ini_bits % digit::BITS == 0 {ini_bits / digit::BITS} - //else {(ini_bits / digit::BITS) + 1}; - let mut quo: Vec = vec![Digit::zero(); quo_potential as usize]; - loop { - duo_lesser_bits = (digit::BITS - (duo_lz as usize)) + (digit::BITS * (duo_sd - 2)); - duo_sig_dd = if duo_lz == 0 { - DoubleDigit::from_lo_hi(duo[duo_sd - 1],duo[duo_sd]) - } else { - (duo[duo_sd].dd() << (duo_lz + digit::BITS)) | - (duo[duo_sd - 1].dd() << duo_lz) | - (duo[duo_sd - 2].dd() >> (digit::BITS - duo_lz)) - }; - if duo_lesser_bits >= div_lesser_bits { - let bits = duo_lesser_bits - div_lesser_bits; - //bits_ll is the number of lesser bits in the digit that contains lesser and - //greater bits - let (digits, bits_ll) = (bits / digit::BITS, bits % digit::BITS); - //Unfortunately, `mul` here can be up to (2^2n - 1)/(2^(n-1)), where `n` - //is the number of bits in a `Digit`. This means that an `n+1` bit - //integer is needed to store mul. Because only one extra higher bit is involved, - //the algebraic simplification `(mul + 2^n)*div` to `mul*div + div*2^n` can be - //used when that highest bit is set. This just requires faster and simpler - //addition inlining hell instead of long multiplication inlining hell. - let mul = duo_sig_dd.wrapping_div(div_sig_d_add1); - //add `mul << bits` to `quo` - //no inlining hell here because `bits_ll < n` and it takes a shift of `n` - //to overflow - let split_mul = mul << bits_ll; - let (temp, mut carry) = split_mul.lo().carrying_add(quo[digits]); - quo[digits] = temp; - let temp = split_mul.hi().dd() - .wrapping_add(quo[digits + 1].dd()) - .wrapping_add(carry.dd()); - quo[digits + 1] = temp.lo(); - carry = temp.hi(); - for i in (digits+2)..quo.len() { - if carry == digit::ZERO {break} - let temp = quo[i].carrying_add(carry); - quo[i] = temp.0; - carry = temp.1; - } - //special long division algorithm core. - //Note that nearly all branches before this are not just wanted for performance - //reasons but are actually required in order to not break this. - //these blocks subtract `(mul * div) << bits` from `duo` - //check for highest bit set - if mul.hi() == Digit::zero() { - let mul = mul.lo(); - //carry for bits that wrap across digit boundaries when `<< bits_ll` applied - let (temp0, mut wrap_carry) = (div[0].dd() << bits_ll).lo_hi(); - //the regular multiplication carry - let (temp1, mut mul_carry) = mul.dd().wrapping_mul(temp0.dd()).lo_hi(); - //this carry includes the two's complement increment carry - let (temp2, mut add_carry) = (!temp1).dd() - .wrapping_add(duo[digits].dd()) - .wrapping_add(Digit::one().dd()) - .lo_hi(); - duo[digits] = temp2; - for i in (digits + 1)..=duo_sd { - let temp0 = ( - (div[i - digits].dd() << bits_ll) | wrap_carry.dd() - ).lo_hi(); - wrap_carry = temp0.1; - let temp1 = mul.dd() - .wrapping_mul(temp0.0.dd()) - .wrapping_add(mul_carry.dd()) - .lo_hi(); - mul_carry = temp1.1; - let temp2 = (!temp1.0).dd() - .wrapping_add(duo[i].dd()) - .wrapping_add(add_carry.dd()).lo_hi(); - add_carry = temp2.1; - duo[i] = temp2.0; - } - } else { - // 2222x <- mul_carry - // 7987 <- div - // 3 <- mul (13) without high bit - // *_____ - // 23961 <- temp0 - // - // 1111xx <- add0_carry - // 23961 <- temp0 - // 7987 <- div shifted up by one digit - //+______ - // 103831 <- temp1 - // - // subtract duo by temp1 negated (with the carry from the two's complement - //being put into `wrap_carry`) and shifted (with `wrap_carry`) - - let mul = mul.lo(); - let (temp0, mut mul_carry) = mul.carrying_mul(div[0]); - let temp1 = temp0; - let mut add0_carry = Digit::zero(); - //the increment from the two's complement can be stored in `wrap_carry` - let (temp2, mut wrap_carry) = - ( - (!temp1).dd() - .wrapping_add(Digit::one().dd()) - << bits_ll - ).lo_hi(); - let (temp3, mut add1_carry) = temp2.carrying_add(duo[digits]); - duo[digits] = temp3; - for i in (digits + 1)..=duo_sd { - let temp0 = - mul.dd() - .wrapping_mul(div[i - digits].dd()) - .wrapping_add(mul_carry.dd()); - mul_carry = temp0.hi(); - let temp1 = - temp0.lo().dd() - .wrapping_add(div[i - digits - 1].dd()) - .wrapping_add(add0_carry.dd()); - add0_carry = temp1.hi(); - let temp2 = - ((!temp1.lo()).dd() << bits_ll) - .wrapping_add(wrap_carry.dd()); - wrap_carry = temp2.hi(); - let temp3 = - temp2.lo().dd() - .wrapping_add(duo[i].dd()) - .wrapping_add(add1_carry.dd()); - add1_carry = temp3.hi(); - duo[i] = temp3.lo(); - } - } - } else { - //the `mul` or `mul - 1` algorithm with addition from `quo` - let div_sig_dd = if duo_lz == 0 { - //avoid shr overflow - DoubleDigit::from_lo_hi(div[duo_sd - 1], div[duo_sd]) - } else { - (div[duo_sd].dd() << (duo_lz + digit::BITS)) | - (div[duo_sd - 1].dd() << duo_lz) | - (div[duo_sd - 2].dd() >> (digit::BITS - duo_lz)) - }; - let mul = duo_sig_dd.wrapping_div(div_sig_dd).lo(); - //I could avoid allocation but it would involve more long division to recover - //`div`, followed by a second long multiplication with `mul - 1`. - //this will become `-(div * mul)` - //note: div_sd != len - 1 because it would be caught by the first `mul` or - //`mul-1` algorithm - let mut sub: Vec = Vec::with_capacity(len); - //first digit done and carry - let (temp, mut mul_carry) = mul.dd().wrapping_mul(div[0].dd()).lo_hi(); - sub.push(temp); - for i in 1..div_sd { - let temp = mul.carrying_mul_add(div[i], mul_carry); - sub.push(temp.0); - mul_carry = temp.1; - } - let temp = mul.carrying_mul_add(div[div_sd], mul_carry); - sub.push(temp.0); - sub.push(temp.1); - for _ in (div_sd + 2)..len { - sub.push(Digit::zero()); - } - let sub_len = sub.len(); - ugt!(sub_len,sub,len,duo, - { - //the quotient is `quo + (mult - 1)` and remainder is - //`duo + (div - div*mul)` - twos_complement!(sub_len, sub); - add!(sub_len,sub,div); - special1!(sub_len,duo,sub,div,mul.wrapping_sub(Digit::one()),quo); - return - }, - { - //the quotient is `quo + mult` and remainder is `duo - (div * mult)` - twos_complement!(sub_len, sub); - special1!(sub_len,duo,sub,div,mul,quo); - return - } - ); - } - //find the new `duo_sd` - for i in (0..=duo_sd).rev() { - if duo[i] != Digit::zero() { - duo_sd = i; - break - } - if i == 0 { - //the quotient should be `quo` and remainder should be zero - for i in 0..len { - div[i] = Digit::zero(); - duo[i] = quo[i]; - } - return - } - } - duo_lz = duo[duo_sd].leading_zeros() as usize; - let duo_sb = (duo_sd * digit::BITS) + (digit::BITS - duo_lz); - //`quo` should have 0 or 1 added to it check - if duo_sb == div_sb { - //if `div <= duo` - uge!(len,duo,len,div, - { - //the quotient should be `quo + 1` and remainder should be `duo - div` - twos_complement!(len,div); - add!(len,div,duo); - for i0 in 0..len { - match quo[i0].overflowing_add(Digit::one()) { - (v,false) => { - duo[i0] = v; - for i1 in (i0 + 1)..len { - duo[i1] = quo[i1]; - } - break; - } - (v,true) => { - duo[i0] = v; - } - } - } - return - }, - { - //the quotient should be `quo` and remainder should be `duo` - for i in 0..len { - div[i] = duo[i]; - duo[i] = quo[i]; - } - return - } - ); - } - //more 0 cases check - if div_sb > duo_sb { - //the quotient should be `quo` and remainder should be `duo` - for i in 0..len { - div[i] = duo[i]; - duo[i] = quo[i]; - } - return - } - //this can only happen if `div_sd < 2` (because of above branches), - //but it is not worth it to unroll further - if duo_sd < 2 { - //duo_sd < 2 because of the "if `duo >= div`" branch above - //simple division and addition - let duo_dd = DoubleDigit::from_lo_hi(duo[0],duo[1]); - let div_dd = DoubleDigit::from_lo_hi(div[0],div[1]); - let (mul, rem) = duo_dd.wrapping_divrem(div_dd); - //the quotient should be `quo + mul` and remainder should be `rem` - div[0] = rem.lo(); - div[1] = rem.hi(); - let (temp, mut carry) = quo[0].carrying_add(mul.lo()); - duo[0] = temp; - let temp = quo[1].dd() - .wrapping_add(mul.hi().dd()) - .wrapping_add(carry.dd()); - duo[1] = temp.lo(); - carry = temp.hi(); - for i0 in 2..len { - if carry == Digit::zero() { - for i1 in i0..len { - duo[i1] = quo[i1]; - } - break - } - let temp = quo[i0].carrying_add(carry); - duo[i0] = temp.0; - carry = temp.1; - } - return - } - } - } - - //Note: Special cases are aggressively taken care of throughout this function, both because - //the core long division algorithm does not work on many edges, and because of optimization. - //find the most significant non zeroes, check for `duo` < `div`, and check for division by - //zero - match div.iter().rposition(|x| x != &Digit::zero()) { - Some(div_sd) => { - //the initial most significant nonzero duo digit - let ini_duo_sd: usize = match duo.iter().rposition(|x| x != &Digit::zero()) { - Some(x) => x, - None => { - //quotient and remainder should be 0 - //duo is already zero - for x in div.iter_mut() { - x.unset_all() - } - return true - }, - }; - if ini_duo_sd < div_sd { - //the divisor is larger than the dividend - //quotient should be 0 and remainder is `duo` - for (duo_d,div_d) in duo.iter_mut().zip(div.iter_mut()) { - *div_d = *duo_d; - (*duo_d).unset_all() - } - return true - } - match (ini_duo_sd == 0, div_sd == 0) { - (false,false) => { - //ini_duo_sd cannot be 0 or 1 for `large_div_by_large` - if ini_duo_sd == 1 { - let temp = DoubleDigit::from_lo_hi(duo[0], duo[1]).wrapping_divrem(DoubleDigit::from_lo_hi(div[0],div[1])); - duo[0] = temp.0.lo(); - duo[1] = temp.0.hi(); - div[0] = temp.1.lo(); - div[1] = temp.1.hi(); - return true - } - large_div_by_large( - duo.len(), - duo, - ini_duo_sd, - div, - div_sd - ); - return true - }, - (true,false) => unreachable!(), - (false,true) => { - if div[0].leading_zeros() >= 32 { - large_div_by_u32(duo,ini_duo_sd,div); - return true - } else { - large_div_by_small(duo, ini_duo_sd, div); - return true - } - }, - (true,true) => { - let temp = duo[0].wrapping_divrem(div[0]); - duo[0] = temp.0; - div[0] = temp.1; - return true - } - } - }, - None => return false, - } - } - - /// This function is intended to be inlined into all of the unsigned quotient and remainder - /// functions for optimal assembly. - /// `duo` is divided by `div`, and the remainder is assigned to `duo` and quotient assigned - /// to `div` - /// `false` is returned if division by zero happened. Nothing is modified in the case of - /// division by zero. - #[inline] - pub(crate) fn aarons_algorithm_remdiv(duo: &mut [Digit], div: &mut [Digit]) -> bool { - if ApInt::aarons_algorithm_divrem(duo, div) { - let mut temp; - for i in 0..duo.len() { - temp = duo[i]; - duo[i] = div[i]; - div[i] = temp; - } - true - } else { - false - } - } - - /// Divides `lhs` by `rhs` using **unsigned** interpretation and sets `lhs` equal to the - /// quotient and `rhs` equal to the remainder. This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_udivrem_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { - match ApInt::zip_access_data_mut_both(lhs, rhs)? { - ZipDataAccessMutBoth::Inl(duo,div) => { - if *div != Digit::zero() { - let temp = duo.wrapping_divrem(*div); - *duo = temp.0; - *div = temp.1; - return Ok(()) - } - } - ZipDataAccessMutBoth::Ext(duo,div) => { - if ApInt::aarons_algorithm_divrem(duo, div) { - return Ok(()) - } - } - } - //Note that the typical places `Err` `Ok` are returned is switched. This is because - //`rhs.is_zero()` is found as part of finding `duo_sd` inside `aarons_algorithm_divrem`, - //and `lhs.clone()` cannot be performed inside the match statement - return Err(Error::division_by_zero(DivOp::UnsignedDivRem, lhs.clone())) - } - - /// Divides `lhs` by `rhs` using **unsigned** interpretation and sets `lhs` equal to the - /// remainder and `rhs` equal to the quotient. This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_uremdiv_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { - match ApInt::zip_access_data_mut_both(lhs, rhs)? { - ZipDataAccessMutBoth::Inl(duo,div) => { - if *div != Digit::zero() { - let temp = duo.wrapping_divrem(*div); - *duo = temp.1; - *div = temp.0; - return Ok(()) - } - } - ZipDataAccessMutBoth::Ext(duo,div) => { - if ApInt::aarons_algorithm_remdiv(duo, div) { - return Ok(()) - } - } - } - return Err(Error::division_by_zero(DivOp::UnsignedRemDiv, lhs.clone())) - } - - /// Quotient-assigns `lhs` by `rhs` inplace using **unsigned** interpretation. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_udiv_assign(&mut self, rhs: &ApInt) -> Result<()> { - match self.zip_access_data_mut_self(rhs)? { - Inl(duo, div) => { - if !div.is_zero() { - *duo = duo.wrapping_div(div); - return Ok(()) - } - } - Ext(duo, div) => { - if ApInt::aarons_algorithm_divrem(duo, &mut div.to_vec()[..]) { - return Ok(()) - } - } - } - return Err(Error::division_by_zero(DivOp::UnsignedDiv, self.clone())) - } - - /// Divides `lhs` by `rhs` using **unsigned** interpretation and returns the quotient. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn into_wrapping_udiv(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_udiv_assign) - } - - /// Remainder-assigns `lhs` by `rhs` inplace using **unsigned** interpretation. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_urem_assign(&mut self, rhs: &ApInt) -> Result<()> { - match self.zip_access_data_mut_self(rhs)? { - Inl(duo, div) => { - if !div.is_zero() { - *duo = duo.wrapping_rem(div); - return Ok(()) - } - } - Ext(duo, div) => { - if ApInt::aarons_algorithm_remdiv(duo, &mut div.to_vec()[..]) { - return Ok(()) - } - } - } - return Err(Error::division_by_zero(DivOp::UnsignedRem, self.clone())) - } - - /// Divides `lhs` by `rhs` using **unsigned** interpretation and returns the remainder. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn into_wrapping_urem(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_urem_assign) - } - - /// Divides `lhs` by `rhs` using **signed** interpretation and sets `lhs` equal to the - /// quotient and `rhs` equal to the remainder. This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_sdivrem_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { - if rhs.is_zero() { - return Err(Error::division_by_zero(DivOp::SignedDivRem, lhs.clone())) - } - let (negate_lhs, negate_rhs) = match ((*lhs).is_negative(), (*rhs).is_negative()) { - (false,false) => (false,false), - (true,false) => { - lhs.wrapping_neg(); - (true, true) - }, - (false,true) => { - rhs.wrapping_neg(); - (true, false) - }, - (true,true) => { - lhs.wrapping_neg(); - rhs.wrapping_neg(); - (false, true) - }, - }; - ApInt::wrapping_udivrem_assign(lhs, rhs).unwrap(); - if negate_lhs {lhs.wrapping_neg()} - if negate_rhs {rhs.wrapping_neg()} - //clearing unused bits is handled by `wrapping_neg()` - return Ok(()); - } - - /// Divides `lhs` by `rhs` using **signed** interpretation and sets `lhs` equal to the - /// remainder and `rhs` equal to the quotient. This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_sremdiv_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { - if rhs.is_zero() { - return Err(Error::division_by_zero(DivOp::SignedRemDiv, lhs.clone())) - } - let (negate_lhs, negate_rhs) = match ((*lhs).is_negative(), (*rhs).is_negative()) { - (false,false) => (false,false), - (true,false) => { - lhs.wrapping_neg(); - (true, true) - }, - (false,true) => { - rhs.wrapping_neg(); - (false, true) - }, - (true,true) => { - lhs.wrapping_neg(); - rhs.wrapping_neg(); - (true, false) - }, - }; - ApInt::wrapping_uremdiv_assign(lhs, rhs).unwrap(); - if negate_lhs {lhs.wrapping_neg()} - if negate_rhs {rhs.wrapping_neg()} - //clearing unused bits is handled by `wrapping_neg()` - return Ok(()); - } - - /// Quotient-assigns `lhs` by `rhs` inplace using **signed** interpretation. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_sdiv_assign(&mut self, rhs: &ApInt) -> Result<()> { - if rhs.is_zero() { - return Err(Error::division_by_zero(DivOp::SignedDiv, self.clone())) - } - let mut rhs_clone = (*rhs).clone(); - let negate_lhs = match ((*self).is_negative(), rhs_clone.is_negative()) { - (false,false) => false, - (true,false) => { - self.wrapping_neg(); - true - }, - (false,true) => { - rhs_clone.wrapping_neg(); - true - }, - (true,true) => { - self.wrapping_neg(); - rhs_clone.wrapping_neg(); - false - }, - }; - ApInt::wrapping_udivrem_assign(self, &mut rhs_clone).unwrap(); - if negate_lhs {self.wrapping_neg()} - //clearing unused bits is handled by `wrapping_neg()` - Ok(()) - } - - /// Divides `self` by `rhs` using **signed** interpretation and returns the quotient. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - /// - pub fn into_wrapping_sdiv(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_sdiv_assign) - } - - /// Remainder-assigns `lhs` by `rhs` inplace using **signed** interpretation. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `lhs` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn wrapping_srem_assign(&mut self, rhs: &ApInt) -> Result<()> { - if rhs.is_zero() { - return Err(Error::division_by_zero(DivOp::SignedRem, self.clone())) - } - let mut rhs_clone = (*rhs).clone(); - let negate_lhs = match ((*self).is_negative(), rhs_clone.is_negative()) { - (false,false) => false, - (true,false) => { - self.wrapping_neg(); - true - }, - (false,true) => { - rhs_clone.wrapping_neg(); - false - }, - (true,true) => { - self.wrapping_neg(); - rhs_clone.wrapping_neg(); - true - }, - }; - ApInt::wrapping_uremdiv_assign(self, &mut rhs_clone).unwrap(); - if negate_lhs {self.wrapping_neg()} - //clearing unused bits is handled by `wrapping_neg()` - Ok(()) - } - - /// Divides `self` by `rhs` using **signed** interpretation and returns the remainder. - /// This function **may** allocate memory. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - /// - If division by zero is attempted - pub fn into_wrapping_srem(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_srem_assign) - } -} - -/// ============================================================================ -/// Standard `ops` trait implementations. -/// ---------------------------------------------------------------------------- -/// **Note:** These ops will panic if their corresponding functions return an -/// error. They may also allocate memory. -/// -/// `ApInt` implements some `std::ops` traits for improved usability. -/// Only traits for operations that do not depend on the signedness -/// interpretation of the specific `ApInt` instance are actually implemented. -/// Operations like `div` and `rem` are not expected to have an -/// implementation since a favor in unsigned or signed cannot be decided. -/// ============================================================================ - -impl Neg for ApInt { - type Output = ApInt; - - fn neg(self) -> Self::Output { - self.into_wrapping_neg() - } -} - -impl<'a> Neg for &'a ApInt { - type Output = ApInt; - - fn neg(self) -> Self::Output { - self.clone().into_wrapping_neg() - } -} - -impl<'a> Neg for &'a mut ApInt { - type Output = &'a mut ApInt; - - fn neg(self) -> Self::Output { - self.wrapping_neg(); - self - } -} - -impl<'a> Add<&'a ApInt> for ApInt { - type Output = ApInt; - - fn add(self, rhs: &'a ApInt) -> Self::Output { - self.into_wrapping_add(rhs).unwrap() - } -} - -impl<'a, 'b> Add<&'a ApInt> for &'b ApInt { - type Output = ApInt; - - fn add(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_wrapping_add(rhs).unwrap() - } -} - -impl<'a> AddAssign<&'a ApInt> for ApInt { - fn add_assign(&mut self, rhs: &'a ApInt) { - self.wrapping_add_assign(rhs).unwrap() - } -} - -impl<'a> Sub<&'a ApInt> for ApInt { - type Output = ApInt; - - fn sub(self, rhs: &'a ApInt) -> Self::Output { - self.into_wrapping_sub(rhs).unwrap() - } -} - -impl<'a, 'b> Sub<&'a ApInt> for &'b ApInt { - type Output = ApInt; - - fn sub(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_wrapping_sub(rhs).unwrap() - } -} - -impl<'a> SubAssign<&'a ApInt> for ApInt { - fn sub_assign(&mut self, rhs: &'a ApInt) { - self.wrapping_sub_assign(rhs).unwrap() - } -} - -impl<'a> Mul<&'a ApInt> for ApInt { - type Output = ApInt; - - fn mul(self, rhs: &'a ApInt) -> Self::Output { - self.into_wrapping_mul(rhs).unwrap() - } -} - -impl<'a, 'b> Mul<&'a ApInt> for &'b ApInt { - type Output = ApInt; - - fn mul(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_wrapping_mul(rhs).unwrap() - } -} - -impl<'a> MulAssign<&'a ApInt> for ApInt { - fn mul_assign(&mut self, rhs: &'a ApInt) { - self.wrapping_mul_assign(rhs).unwrap(); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - mod inc { - use super::*; - use std::u64; - - #[test] - fn test() { - assert_eq!(ApInt::from(14u8).into_wrapping_inc(),ApInt::from(15u8)); - assert_eq!(ApInt::from(15u8).into_wrapping_inc(),ApInt::from(16u8)); - assert_eq!(ApInt::from(16u8).into_wrapping_inc(),ApInt::from(17u8)); - assert_eq!(ApInt::from(17u8).into_wrapping_inc(),ApInt::from(18u8)); - assert_eq!(ApInt::from([0u64,0,0]).into_wrapping_inc(),ApInt::from([0u64,0,1])); - assert_eq!(ApInt::from([0,7,u64::MAX]).into_wrapping_inc(),ApInt::from([0u64,8,0])); - assert_eq!(ApInt::from([u64::MAX,u64::MAX]).into_wrapping_inc(),ApInt::from([0u64,0])); - assert_eq!(ApInt::from([0,u64::MAX,u64::MAX - 1]).into_wrapping_inc(),ApInt::from([0,u64::MAX,u64::MAX])); - assert_eq!(ApInt::from([0,u64::MAX,0]).into_wrapping_inc(),ApInt::from([0,u64::MAX,1])); - } - } - - mod wrapping_neg { - use super::*; - use bitwidth::{BitWidth}; - - fn assert_symmetry(input: ApInt, expected: ApInt) { - assert_eq!(input.clone().into_wrapping_neg(), expected.clone()); - assert_eq!(expected.into_wrapping_neg(), input); - } - - fn test_vals() -> impl Iterator { - [0_i128, 1, 2, 4, 5, 7, 10, 42, 50, 100, 128, 150, - 1337, 123123, 999999, 987432, 77216417].into_iter().map(|v| *v) - } - - #[test] - fn simple() { - assert_symmetry(ApInt::zero(BitWidth::w1()), ApInt::zero(BitWidth::w1())); - assert_symmetry(ApInt::one(BitWidth::w1()), ApInt::all_set(BitWidth::w1())); - } - - #[test] - fn range() { - for v in test_vals() { - assert_symmetry(ApInt::from_i8(v as i8), ApInt::from_i8(-v as i8)); - assert_symmetry(ApInt::from_i16(v as i16), ApInt::from_i16(-v as i16)); - assert_symmetry(ApInt::from_i32(v as i32), ApInt::from_i32(-v as i32)); - assert_symmetry(ApInt::from_i64(v as i64), ApInt::from_i64(-v as i64)); - assert_symmetry(ApInt::from_i128(v), ApInt::from_i128(-v)); - } - } - } - - mod mul { - use super::*; - use bitwidth::BitWidth; - use std::{u8,u64}; - - #[test] - fn rigorous() { - //there are many special case and size optimization paths, so this test must be very - //rigorous. - - //multiplication of apints composed of only u8::MAX in their least significant digits - //only works for num_u8 > 1 - fn nine_test(num_u8: usize) { - let mut lhs; - let mut rhs = ApInt::from(0u8).into_zero_resize(BitWidth::new(num_u8 * 8).unwrap()); - let nine = - ApInt::from(u8::MAX).into_zero_resize(BitWidth::new(num_u8 * 8).unwrap()); - for rhs_nine in 0..num_u8 { - rhs.wrapping_shl_assign(8usize).unwrap(); - rhs |= &nine; - lhs = ApInt::from(0u8).into_zero_resize(BitWidth::new(num_u8 * 8).unwrap()); - 'outer: for lhs_nine in 0..num_u8 { - lhs.wrapping_shl_assign(8usize).unwrap(); - lhs |= &nine; - //imagine multiplying a string of base 10 nines together. - //It will produce things like 998001, 8991, 98901, 9989001. - //this uses a formula for the number of nines, eights, and zeros except here - //nine is u8::MAX, eight is u8::MAX - 1, and zero is 0u8 - let mut zeros_after_one = if lhs_nine < rhs_nine { - lhs_nine - } else { - rhs_nine - }; - let mut nines_before_eight = if lhs_nine > rhs_nine { - lhs_nine - rhs_nine - } else { - rhs_nine - lhs_nine - }; - let mut nines_after_eight = if lhs_nine < rhs_nine { - lhs_nine - } else { - rhs_nine - }; - let mut result = lhs.clone().into_wrapping_mul(&rhs).unwrap(); - assert_eq!(result.clone().resize_to_u8(), 1u8); - for i in 0..zeros_after_one { - if i >= num_u8 - 1 { - continue 'outer - } - result.wrapping_lshr_assign(8usize).unwrap(); - assert_eq!(result.clone().resize_to_u8(),0); - } - for i in 0..nines_before_eight { - if zeros_after_one + i >= num_u8 - 1 { - continue 'outer - } - result.wrapping_lshr_assign(8usize).unwrap(); - assert_eq!(result.clone().resize_to_u8(), u8::MAX); - } - if zeros_after_one + nines_before_eight >= num_u8 - 1 { - continue 'outer - } - result.wrapping_lshr_assign(8usize).unwrap(); - assert_eq!(result.clone().resize_to_u8(),u8::MAX - 1); - for i in 0..nines_after_eight { - if 1 + zeros_after_one + nines_before_eight + i >= num_u8 - 1 { - continue 'outer - } - result.wrapping_lshr_assign(8usize).unwrap(); - assert_eq!(result.clone().resize_to_u8(),u8::MAX); - } - } - } - } - //test inl apints - assert_eq!( - ApInt::from(u8::MAX) - .into_wrapping_mul(&ApInt::from(u8::MAX)) - .unwrap(), - ApInt::from(1u8) - ); - nine_test(2); - nine_test(3); - nine_test(4); - nine_test(7); - nine_test(8); - //test ext apints - nine_test(9); - nine_test(16); - //5 digits wide - nine_test(40); - nine_test(63); - //non overflowing test - let resize = [ - 7usize, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 137, 200, 255, - 256, 700, 907, 1024, 2018, 2019, - ]; - let lhs_shl = [ - 0usize, 1, 0, 1, 4, 7, 4, 10, 13, 0, 31, 25, 7, 17, 32, 50, 0, 64, 249, 8, 777, 0, - 1000, 0, - ]; - let rhs_shl = [ - 0usize, 0, 1, 1, 3, 6, 4, 14, 10, 0, 0, 25, 0, 18, 32, 49, 100, 64, 0, 256, 64, - 900, 1000, 0, - ]; - for (i, _) in resize.iter().enumerate() { - let mut lhs = ApInt::from(5u8) - .into_zero_resize(BitWidth::new(resize[i]).unwrap()) - .into_wrapping_shl(lhs_shl[i]) - .unwrap(); - let mut rhs = ApInt::from(11u8) - .into_zero_resize(BitWidth::new(resize[i]).unwrap()) - .into_wrapping_shl(rhs_shl[i]) - .unwrap(); - let mut zero = ApInt::from(0u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); - let mut one = ApInt::from(1u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); - let mut expected = ApInt::from(55u8) - .into_zero_resize(BitWidth::new(resize[i]).unwrap()) - .into_wrapping_shl(rhs_shl[i] + lhs_shl[i]) - .unwrap(); - assert_eq!(lhs.clone().into_wrapping_mul(&zero).unwrap(), zero); - assert_eq!(zero.clone().into_wrapping_mul(&rhs).unwrap(), zero); - assert_eq!(lhs.clone().into_wrapping_mul(&one).unwrap(), lhs); - assert_eq!(one.clone().into_wrapping_mul(&rhs).unwrap(), rhs); - assert_eq!(lhs.clone().into_wrapping_mul(&rhs).unwrap(), expected); - } - assert_eq!( - ApInt::from([0,0,0,0,u64::MAX,0,u64::MAX,u64::MAX]) - .into_wrapping_mul(&ApInt::from([0,0,0,0,u64::MAX,u64::MAX,0,u64::MAX])).unwrap() - ,ApInt::from([u64::MAX,0,1,u64::MAX - 3,1,u64::MAX,u64::MAX,1])); - } - } - - mod div_rem { - use super::*; - use bitwidth::BitWidth; - use std::u64; - - //TODO: add division by zero testing after error refactoring is finished - //use errors::ErrorKind; - #[test] - fn simple() { - /// does all of the simple division tests - /// - `$signed`: if the functions are signed divisions or not - /// - `$fun_assign`: a division function such as `wrapping_udiv_assign` with that - /// signature - /// - `$fun_into`: a division function such as `into_wrapping_udiv` with that signature - /// - `$fun`: a division function such as `wrapping_udivrem_assign` with that signature - /// - `$r0`: the quotient or remainder or both of 80 by 7, depending on division - /// function type - /// - `$r1`, `$r2`, `$r3`: 80 by -7, -80 by 7, -80 by -7. These can be 0 if `$signed` is - /// false. - macro_rules! s { - ($signed:expr,$fun_assign:ident,$fun_into:ident,$r0:expr,$r1:expr,$r2:expr,$r3:expr/*,$div_op:ident*/) => { - /*match $fun_assign - match ApInt::from(123u8).$fun_into(&ApInt::from(0u8)) { - Err(Error{kind: ErrorKind::DivisionByZero{op: DivOp::$div_op, lhs: x}, message: _, annotation: _}) => { - assert_eq!(x,ApInt::from(123u8)); - }, - _ => unreachable!(), - } - match ApInt::from(12345678912345689123456789123456789u128).*/ - { - let lhs = ApInt::from(80i8); - let rhs = ApInt::from(7i8); - let mut temp = lhs.clone(); - temp.$fun_assign(&rhs).unwrap(); - assert_eq!(temp, ApInt::from($r0)); - assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r0)); - } - if $signed { - { - let lhs = ApInt::from(80i8); - let rhs = ApInt::from(-7i8); - let mut temp = lhs.clone(); - temp.$fun_assign(&rhs).unwrap(); - assert_eq!(temp, ApInt::from($r1)); - assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r1)); - } - { - let lhs = ApInt::from(-80i8); - let rhs = ApInt::from(7i8); - let mut temp = lhs.clone(); - temp.$fun_assign(&rhs).unwrap(); - assert_eq!(temp, ApInt::from($r2)); - assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r2)); - } - { - let lhs = ApInt::from(-80i8); - let rhs = ApInt::from(-7i8); - let mut temp = lhs.clone(); - temp.$fun_assign(&rhs).unwrap(); - assert_eq!(temp, ApInt::from($r3)); - assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r3)); - } - } - }; - ($signed:expr,$fun:ident,$r0:expr,$r1:expr,$r2:expr,$r3:expr/*,$div_op:ident*/) => { - { - let mut lhs = ApInt::from(80i8); - let mut rhs = ApInt::from(7i8); - ApInt::$fun(&mut lhs, &mut rhs).unwrap(); - assert_eq!(lhs, ApInt::from($r0.0)); - assert_eq!(rhs, ApInt::from($r0.1)); - } - if $signed { - { - let mut lhs = ApInt::from(80i8); - let mut rhs = ApInt::from(-7i8); - ApInt::$fun(&mut lhs, &mut rhs).unwrap(); - assert_eq!(lhs, ApInt::from($r1.0)); - assert_eq!(rhs, ApInt::from($r1.1)); - } - { - let mut lhs = ApInt::from(-80i8); - let mut rhs = ApInt::from(7i8); - ApInt::$fun(&mut lhs, &mut rhs).unwrap(); - assert_eq!(lhs, ApInt::from($r2.0)); - assert_eq!(rhs, ApInt::from($r2.1)); - } - { - let mut lhs = ApInt::from(-80i8); - let mut rhs = ApInt::from(-7i8); - ApInt::$fun(&mut lhs, &mut rhs).unwrap(); - assert_eq!(lhs, ApInt::from($r3.0)); - assert_eq!(rhs, ApInt::from($r3.1)); - } - } - } - } - s!(false,wrapping_udiv_assign,into_wrapping_udiv,11i8,0,0,0); - s!(false,wrapping_urem_assign,into_wrapping_urem,3i8,0,0,0); - s!(true,wrapping_sdiv_assign,into_wrapping_sdiv,11i8,-11i8,-11i8,11i8); - s!(true,wrapping_srem_assign,into_wrapping_srem,3i8,3i8,-3i8,-3i8); - s!(false,wrapping_udivrem_assign,(11i8,3i8),(0,0),(0,0),(0,0)); - s!(false,wrapping_uremdiv_assign,(3i8,11i8),(0,0),(0,0),(0,0)); - s!(true,wrapping_sdivrem_assign,(11i8,3i8),(-11i8,3i8),(-11i8,-3i8),(11i8,-3i8)); - s!(true,wrapping_sremdiv_assign,(3i8,11i8),(3i8,-11i8),(-3i8,-11i8),(-3i8,11i8)); - } - - //NOTE: this test only works if multiplication and a few other functions work - #[test] - fn complex() { - //there are many special case and size optimization paths, - //so this test must be very rigorous. - assert_eq!( - ApInt::from(123u8) - .into_wrapping_udiv(&ApInt::from(7u8)).unwrap(), - ApInt::from(17u8)); - assert_eq!( - ApInt::from([0u64,0,0,123]) - .into_wrapping_udiv(&ApInt::from([0u64,0,0,7])).unwrap(), - ApInt::from([0u64,0,0,17])); - assert_eq!( - ApInt::from([0u64,0,0,0]) - .into_wrapping_udiv(&ApInt::from([0u64,0,0,7])).unwrap(), - ApInt::from([0u64,0,0,0])); - assert_eq!( - ApInt::from([0u64,0,0,3]) - .into_wrapping_udiv(&ApInt::from([0u64,0,0,7])).unwrap(), - ApInt::from([0u64,0,0,0])); - assert_eq!( - ApInt::from([0u64,0,0,0]) - .into_wrapping_udiv(&ApInt::from([0u64,7,0,0])).unwrap(), - ApInt::from([0u64,0,0,0])); - assert_eq!( - ApInt::from([0u64,0,0,7]) - .into_wrapping_udiv(&ApInt::from([0u64,4,0,0])).unwrap(), - ApInt::from([0u64,0,0,0])); - assert_eq!( - ApInt::from([0u64,0,3,0]) - .into_wrapping_udiv(&ApInt::from([0u64,4,0,0])).unwrap(), - ApInt::from([0u64,0,0,0])); - assert_eq!( - ApInt::from([0u64,1,0,0]) - .into_wrapping_udiv(&ApInt::from([0u64,0,0,4])).unwrap(), - ApInt::from([0u64,0,u64::MAX / 4 + 1,0])); - assert_eq!(//this one - ApInt::from([0u64,1,0,0]) - .into_wrapping_udiv(&ApInt::from([0u64,0,1,0])).unwrap(), - ApInt::from([0u64,0,1,0])); - assert_eq!( - ApInt::from([1u64,2,3,4]) - .into_wrapping_udiv(&ApInt::from([1u64,2,3,4])).unwrap(), - ApInt::from([0u64,0,0,1])); - assert_eq!( - ApInt::from([0u64,1,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX]) - .into_wrapping_udiv(&ApInt::from([0u64,0,0,0,0,0,0,2])).unwrap() - ,ApInt::from([0u64,0,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX])); - assert_eq!( - ApInt::from([u64::MAX,u64::MAX - 1,1,u64::MAX - 1,u64::MAX - 1,2,u64::MAX - 1,1]) - .into_wrapping_udiv(&ApInt::from([0,0,0,0,u64::MAX,u64::MAX,0,u64::MAX])).unwrap(), - ApInt::from([0,0,0,0,u64::MAX,u64::MAX,0,u64::MAX]) - ); - assert_eq!(ApInt::from(61924494876344321u128).into_wrapping_urem(&ApInt::from(167772160u128)).unwrap(),ApInt::from(1u128)); - assert_eq!(ApInt::from([18446744073709551615u64, 18446744073709551615, 1048575, 18446462598732840960]).into_wrapping_urem(&ApInt::from([0u64, 0, 140668768878592, 0])).unwrap(), ApInt::from([0,0, 136545601323007, 18446462598732840960u64])); - assert_eq!(ApInt::from([1u64, 17293821508111564796, 2305843009213693952]).into_wrapping_urem(&ApInt::from([0u64,1,18446742978492891132])).unwrap(),ApInt::from([0u64,0,0])); - assert_eq!(ApInt::from([1u64,18446744073692774368,268435456]).into_wrapping_add(&ApInt::from([0u64,1,18446744073709519359])).unwrap().into_wrapping_udiv(&ApInt::from([0u64,1,18446744073709551584])).unwrap(),ApInt::from([0u64,0,18446744073701163008])); - assert_eq!(ApInt::from([18446744073709551615u64,18446744073709551615,18446739675663040512,2199023255552]).into_wrapping_urem(&ApInt::from([18446744073709551615u64,18446744073709551615,18446739675663040512,2199023255552])).unwrap(),ApInt::from([0u64,0,0,0])); - assert_eq!(ApInt::from([1u64,18446462598730776592,1047972020113]).into_wrapping_udiv(&ApInt::from([0u64,16383,18446744056529682433])).unwrap(),ApInt::from([0u64,0,2251782633816065])); - assert_eq!(ApInt::from([54467619767447688u64, 18446739675392512496, 5200531536562092095, 18446744073709551615]).into_wrapping_udiv(&ApInt::from([0u64, 8255, 18446462598732840960, 0])).unwrap(), ApInt::from([0u64,0, 6597337677824, 288230376151678976])); - let resize = [ - 7usize, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 137, 200, 255, - 256, 700, 907, 1024, 2018, 2019, - ]; - let lhs_shl = [ - 0usize, 1, 0, 1, 4, 6, 4, 10, 13, 0, 31, 25, 7, 17, 32, 50, 0, 64, 249, 8, 777, 0, - 900, 0, - ]; - let rhs_shl = [ - 0usize, 0, 1, 1, 3, 5, 4, 14, 10, 0, 0, 25, 0, 18, 32, 49, 100, 64, 0, 256, 64, - 900, 1000, 0, - ]; - for (i, _) in resize.iter().enumerate() { - let mut lhs = ApInt::from(5u8) - .into_zero_resize(BitWidth::new(resize[i]).unwrap()) - .into_wrapping_shl(lhs_shl[i]) - .unwrap(); - let mut rhs = ApInt::from(11u8) - .into_zero_resize(BitWidth::new(resize[i]).unwrap()) - .into_wrapping_shl(rhs_shl[i]) - .unwrap(); - let mut zero = ApInt::from(0u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); - let mut one = ApInt::from(1u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); - let mut product = lhs.clone().into_wrapping_mul(&rhs).unwrap(); - assert_eq!(zero.clone().into_wrapping_udiv(&lhs).unwrap(), zero); - assert_eq!(zero.clone().into_wrapping_udiv(&rhs).unwrap(), zero); - assert_eq!(lhs.clone().into_wrapping_udiv(&one).unwrap(), lhs); - assert_eq!(rhs.clone().into_wrapping_udiv(&one).unwrap(), rhs); - assert_eq!(lhs.clone().into_wrapping_udiv(&lhs).unwrap(), one); - assert_eq!(rhs.clone().into_wrapping_udiv(&rhs).unwrap(), one); - let temp = product.clone().into_wrapping_udiv(&lhs).unwrap(); - if temp != rhs { - panic!("lhs_shl:{:?}\nrhs_shl:{:?}\nlhs:{:?}\nrhs:{:?}\n={:?}\ntemp:{:?}",lhs_shl[i],rhs_shl[i],lhs,rhs,product,temp); - } - assert_eq!(product.clone().into_wrapping_udiv(&rhs).unwrap(), lhs); - assert_eq!(zero.clone().into_wrapping_urem(&lhs).unwrap(), zero); - assert_eq!(zero.clone().into_wrapping_urem(&rhs).unwrap(), zero); - assert_eq!(lhs.clone().into_wrapping_urem(&one).unwrap(), zero); - assert_eq!(rhs.clone().into_wrapping_urem(&one).unwrap(), zero); - assert_eq!(lhs.clone().into_wrapping_urem(&lhs).unwrap(), zero); - assert_eq!(rhs.clone().into_wrapping_urem(&rhs).unwrap(), zero); - assert_eq!(product.clone().into_wrapping_urem(&lhs).unwrap(), zero); - assert_eq!(product.clone().into_wrapping_urem(&rhs).unwrap(), zero); - assert_eq!(product.clone().into_wrapping_add(&one).unwrap().into_wrapping_urem(&lhs).unwrap(), one); - assert_eq!(product.clone().into_wrapping_add(&one).unwrap().into_wrapping_urem(&rhs).unwrap(), one); - } - } - } - - mod megafuzz { - use super::*; - use bitwidth::BitWidth; - use std::u64; - use rand::random; - - #[test] - fn pull_request_35_regression() { - let width = BitWidth::new(65).unwrap(); - //arithmetic shift right shift - assert_eq!( - ApInt::from([1u64, u64::MAX - (1 << 6)]).into_truncate(width).unwrap(), - ApInt::from([1u64, u64::MAX - (1 << 10)]).into_truncate(width).unwrap() - .into_wrapping_ashr(4).unwrap() - ); - //multiplication related - let v1 = ApInt::from((1u128 << 64) | (7u128)).into_zero_resize(width); - let v2 = ApInt::one(BitWidth::w1()).into_zero_extend(width).unwrap().into_wrapping_shl(64).unwrap(); - let v3 = v1.clone().into_wrapping_mul(&v2).unwrap(); - assert_eq!(v1, ApInt::from([1u64,7]).into_zero_resize(width)); - assert_eq!(v2, ApInt::from([1u64,0]).into_zero_resize(width)); - assert_eq!(v3, ApInt::from([1u64,0]).into_zero_resize(width)); - let width = BitWidth::new(193).unwrap(); - let v3 = ApInt::from([0u64, 0, 17179852800, 1073676288]).into_zero_resize(width).into_wrapping_mul(&ApInt::from(1u128 << 115).into_zero_resize(width)).unwrap(); - assert_eq!(v3, ApInt::from([0u64, 0, 17179852800, 1073676288]).into_wrapping_shl(115).unwrap().into_zero_resize(width)); - } - - //throws all the functions together for an identities party. If one function breaks, the - //whole thing should break. - fn identities(size: usize, width: BitWidth, zero: &ApInt, lhs: ApInt, rhs: ApInt, third: ApInt) { - //basic addition and subtraction tests - let shift = random::() % size; - let mut temp = lhs.clone().into_wrapping_inc(); - assert_eq!(temp, lhs.clone().into_wrapping_add(&ApInt::one(width)).unwrap()); - assert_eq!(temp, lhs.clone().into_wrapping_sub(&ApInt::all_set(width)).unwrap()); - temp.wrapping_dec(); - assert_eq!(temp, lhs); - temp.wrapping_dec(); - assert_eq!(temp, lhs.clone().into_wrapping_sub(&ApInt::one(width)).unwrap()); - assert_eq!(temp, lhs.clone().into_wrapping_add(&ApInt::all_set(width)).unwrap()); - temp.wrapping_inc(); - assert_eq!(temp, lhs); - - //power of two multiplication and division shifting tests - let mut tmp1 = ApInt::one(BitWidth::w1()).into_zero_extend(width).unwrap().into_wrapping_shl(shift).unwrap(); - assert_eq!( - lhs.clone().into_wrapping_shl(shift).unwrap(), - lhs.clone().into_wrapping_mul(&tmp1).unwrap() - ); - //negation test also - assert_eq!( - lhs.clone().into_wrapping_neg().into_wrapping_shl(shift).unwrap(), - lhs.clone().into_wrapping_mul( - &ApInt::one(BitWidth::w1()).into_sign_extend(width).unwrap().into_wrapping_shl(shift).unwrap() - ).unwrap() - ); - assert_eq!( - lhs.clone().into_wrapping_lshr(shift).unwrap(), - lhs.clone().into_wrapping_udiv(&tmp1).unwrap() - ); - if (shift == (size - 1)) && (lhs == tmp1) { - //unfortunate numerical corner case where the result of the shift is -1 but the - //division ends up as +1 - assert_eq!( - lhs.clone().into_wrapping_sdiv(&tmp1).unwrap(), - ApInt::one(width) - ); - } else { - let mut tmp0 = lhs.clone(); - ApInt::wrapping_sdivrem_assign(&mut tmp0, &mut tmp1).unwrap(); - //make it a floored division - if lhs.is_negative() && !tmp1.is_zero() { - tmp0.wrapping_dec(); - } - assert_eq!(tmp0, lhs.clone().into_wrapping_ashr(shift).unwrap()); - } - let rand_width = BitWidth::new((random::() % size) + 1).unwrap(); - //wrapping multiplication test - assert_eq!( - lhs.clone().into_zero_extend(BitWidth::new(size * 2).unwrap()).unwrap() - .into_wrapping_mul( - &rhs.clone().into_zero_extend(BitWidth::new(size * 2).unwrap()).unwrap() - ).unwrap().into_zero_resize(rand_width), - lhs.clone().into_wrapping_mul(&rhs).unwrap().into_zero_resize(rand_width) - ); - let tot_leading_zeros = lhs.leading_zeros() + rhs.leading_zeros(); - let anti_overflow_mask = if tot_leading_zeros < size { - if rhs.leading_zeros() == 0 { - ApInt::zero(width) - } else { - ApInt::one(BitWidth::new(1).unwrap()).into_sign_extend(rhs.leading_zeros()).unwrap().into_zero_extend(width).unwrap() - } - } else { - ApInt::one(BitWidth::new(1).unwrap()).into_sign_extend(width).unwrap() - }; - let mul = (lhs.clone() & &anti_overflow_mask).into_wrapping_mul(&rhs).unwrap(); - if rhs != *zero { - let rem = third.clone().into_wrapping_urem(&rhs).unwrap(); - let mut temp0 = mul.clone(); - if !temp0.overflowing_uadd_assign(&rem).unwrap() { - let mut temp1 = rhs.clone(); - let mul_plus_rem = temp0.clone(); - ApInt::wrapping_udivrem_assign(&mut temp0, &mut temp1).unwrap(); - if temp0 != (lhs.clone() & &anti_overflow_mask) {panic!("wrong div\nlhs:{:?}\nactual:{:?}\nrhs:{:?}\nthird:{:?}\nrem:{:?}\nmul:{:?}\nmul_plus_rem:{:?}\ntemp0:{:?}\ntemp1:{:?}",lhs,(lhs.clone() & &anti_overflow_mask),rhs,third,rem,mul,mul_plus_rem,temp0,temp1)} - if temp1 != rem {panic!("wrong rem\nlhs:{:?}\nactual:{:?}\nrhs:{:?}\nthird:{:?}\nrem:{:?}\nmul:{:?}\nmul_plus_rem:{:?}\ntemp0:{:?}\ntemp1:{:?}",lhs,(lhs.clone() & &anti_overflow_mask),rhs,third,rem,mul,mul_plus_rem,temp0,temp1)} - } - } - } - - //random length AND, XOR, and OR fuzzer; - fn fuzz_random(size: usize, iterations: usize) { - let width = BitWidth::new(size).unwrap(); - use rand::random; - let mut lhs = ApInt::from(0u8).into_zero_resize(width); - let mut rhs = ApInt::from(0u8).into_zero_resize(width); - let mut third = ApInt::from(0u8).into_zero_resize(width); - let zero = ApInt::from(0u8).into_zero_resize(width); - for _ in 0..iterations { - let mut r0 = (random::() % (size as u32)) as usize; - if r0 == 0 {r0 = 1;} - let ones = ApInt::one(BitWidth::new(1).unwrap()).into_sign_extend(r0).unwrap().into_zero_extend(width).unwrap(); - let r1 = (random::() % (size as u32)) as usize; - //circular shift - let mask = if r1 == 0 { - ones.clone() - } else { - ones.clone().into_wrapping_shl(r1).unwrap() | (&ones.clone().into_wrapping_lshr((size - r1) as usize).unwrap()) - }; - //assert_eq!(mask,ones.into_rotate_left(r1 as usize).unwrap()); - match (random(),random(),random(),random()) { - (false,false,false,false) => lhs |= &mask, - (false,false,false,true) => lhs &= &mask, - (false,false,true,false) => lhs ^= &mask, - (false,false,true,true) => lhs ^= &mask, - (false,true,false,false) => rhs |= &mask, - (false,true,false,true) => rhs &= &mask, - (false,true,true,false) => rhs ^= &mask, - (false,true,true,true) => rhs ^= &mask, - (true,false,false,false) => third |= &mask, - (true,false,false,true) => third &= &mask, - (true,false,true,false) => third ^= &mask, - (true,false,true,true) => third ^= &mask, - (true,true,false,false) => rhs |= &mask, - (true,true,false,true) => rhs &= &mask, - (true,true,true,false) => rhs ^= &mask, - (true,true,true,true) => rhs ^= &mask, - } - identities(size, width, &zero, lhs.clone(), lhs.clone(), rhs.clone()); - identities(size, width, &zero, lhs.clone(), rhs.clone(), third.clone()); - identities(size, width, &zero, rhs.clone(), lhs.clone(), third.clone()); - identities(size, width, &zero, third.clone(), lhs.clone(), rhs.clone()); - identities(size, width, &zero, lhs.clone(), third.clone(), rhs.clone()); - identities(size, width, &zero, third.clone(), rhs.clone(), lhs.clone()); - identities(size, width, &zero, rhs.clone(), third.clone(), lhs.clone()); - } - } - - macro_rules! explode { - ($cd:ident, $temp:ident, $i_zero:ident, $i_one:ident, $inner:tt) => {{ - for $i_zero in 0..(2usize.pow(($cd * 2) as u32)) { - let mut $temp: Vec = Vec::with_capacity($cd); - for $i_one in 0..$cd { - match ($i_zero >> ($i_one * 2)) & 0b11 { - 0b0 => $temp.push(0), - 0b1 => $temp.push(1), - 0b10 => $temp.push(u64::MAX - 1), - 0b11 => $temp.push(u64::MAX), - _ => unreachable!() - } - } - $inner - } - }} - } - - //edge and corner case fuzzer - fn fuzz_edge(size: usize) { - let width = BitWidth::new(size).unwrap(); - let zero = ApInt::from(0u8).into_zero_resize(width); - let cd = - if (size % 64) == 0 {size / 64} - else {(size / 64) + 1}; - explode!(cd,temp0,i0,i1, - {explode!(cd,temp1,i1,i2, - {explode!(cd,temp2,i2,i3, - {identities(size, width, &zero, - ApInt::from_vec_u64(temp0.clone()).unwrap().into_truncate(size).unwrap(), - ApInt::from_vec_u64(temp1.clone()).unwrap().into_truncate(size).unwrap(), - ApInt::from_vec_u64(temp2.clone()).unwrap().into_truncate(size).unwrap());} - )} - )} - ) - } - - #[test] - fn fuzz_test() { - assert_eq!(ApInt::from_vec_u64(vec![32u64,234,23]).unwrap(),ApInt::from([32u64,234,23])); - let a = 10000; - fuzz_random(1, a); - fuzz_random(2, a); - fuzz_random(3, a); - fuzz_random(31, a); - fuzz_random(32, a); - fuzz_random(33, a); - fuzz_random(63, a); - fuzz_random(64, a); - fuzz_random(65, a); - fuzz_random(127, a); - fuzz_random(128, a); - fuzz_random(129, a); - fuzz_random(191, a); - fuzz_random(192, a); - fuzz_random(193, a); - fuzz_random(255, a); - fuzz_random(256, a); - fuzz_edge(63); - fuzz_edge(64); - fuzz_edge(65); - fuzz_edge(127); - fuzz_edge(128); - fuzz_edge(129); - fuzz_edge(191); - fuzz_edge(192); - //very expensive - //fuzz_random(512, a); - //fuzz_random(777, a); - //fuzz_random(16*64, a); - //fuzz_edge(193); - //fuzz_edge(255); - //fuzz_edge(256); - } - } -} \ No newline at end of file diff --git a/src/apint/mod.rs b/src/apint/mod.rs deleted file mode 100644 index 0812ede..0000000 --- a/src/apint/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -mod constructors; -mod casting; -mod utils; -mod bitwise; -mod relational; -mod arithmetic; -mod shift; -mod serialization; -mod to_primitive; - -#[cfg(feature = "rand_support")] -mod rand_impl; - -#[cfg(feature = "serde_support")] -mod serde_impl; - -use digit::{Digit}; -use bitwidth::{BitWidth}; - -pub use self::shift::{ShiftAmount}; -pub(crate) use self::to_primitive::{PrimitiveTy}; - -use std::ptr::NonNull; - -/// An arbitrary precision integer with modulo arithmetics similar to machine integers. -pub struct ApInt { - /// The width in bits of this `ApInt`. - len : BitWidth, - /// The actual data (bits) of this `ApInt`. - data: ApIntData -} - -union ApIntData { - /// Inline storage (up to 64 bits) for small-space optimization. - inl: Digit, - /// Extern storage (>64 bits) for larger `ApInt`s. - ext: NonNull -} - -/// `ApInt` is safe to send between threads since it does not own -/// aliasing memory and has no reference counting mechanism like `Rc`. -unsafe impl Send for ApInt {} - -/// `ApInt` is safe to share between threads since it does not own -/// aliasing memory and has no mutable internal state like `Cell` or `RefCell`. -unsafe impl Sync for ApInt {} diff --git a/src/apint/relational.rs b/src/apint/relational.rs deleted file mode 100644 index a876337..0000000 --- a/src/apint/relational.rs +++ /dev/null @@ -1,275 +0,0 @@ -use apint::{ApInt}; -use apint::utils::{ - ZipDataAccess -}; -use errors::{Result}; -use traits::Width; -use digit; -use digit::{Bit}; - -use std::cmp::Ordering; -use std::ops::Not; - -/// If `self` and `other` have unmatching bit widths, `false` will be returned. -impl PartialEq for ApInt { - fn eq(&self, other: &ApInt) -> bool { - if self.len_bits() != other.len_bits() { - return false - } - self.as_digit_slice() == other.as_digit_slice() - } -} - -impl Eq for ApInt {} - -/// # Comparison Operations -impl ApInt { - - /// Unsigned less-than (`ult`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self < rhs`. - /// - Interprets both `ApInt` instances as **unsigned** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn checked_ult(&self, rhs: &ApInt) -> Result { - match self - .zip_access_data(rhs) - .map_err(|err| err.with_annotation(format!( - "Error occured on unsigned less-than (slt) comparison with `lhs < rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - ))? - { - ZipDataAccess::Inl(lhs, rhs) => { - Ok(lhs.repr() < rhs.repr()) - } - ZipDataAccess::Ext(lhs, rhs) => { - for (l, r) in lhs.into_iter().rev() - .zip(rhs.into_iter().rev()) - { - match l.cmp(r) { - Ordering::Less => return Ok(true), - Ordering::Greater => return Ok(false), - Ordering::Equal => () - } - } - Ok(false) - } - } - } - - /// Unsigned less-equals (`ule`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self <= rhs`. - /// - Interprets both `ApInt` instances as **unsigned** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - #[inline] - pub fn checked_ule(&self, rhs: &ApInt) -> Result { - rhs.checked_ult(self).map(Not::not) - .map_err(|err| err.with_annotation(format!( - "Error occured on unsigned less-than or equals (ule) comparison with `lhs <= rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - - /// Unsigned greater-than (`ugt`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self > rhs`. - /// - Interprets both `ApInt` instances as **unsigned** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - #[inline] - pub fn checked_ugt(&self, rhs: &ApInt) -> Result { - rhs.checked_ult(self) - .map_err(|err| err.with_annotation(format!( - "Error occured on unsigned greater-than (ugt) comparison with `lhs > rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - - /// Unsigned greater-equals (`uge`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self >= rhs`. - /// - Interprets both `ApInt` instances as **unsigned** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - #[inline] - pub fn checked_uge(&self, rhs: &ApInt) -> Result { - self.checked_ult(rhs).map(Not::not) - .map_err(|err| err.with_annotation(format!( - "Error occured on unsigned greater-than or equals (ule) comparison with `lhs >= rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - - /// Signed less-than (`slt`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self < rhs`. - /// - Interprets both `ApInt` instances as **signed** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - pub fn checked_slt(&self, rhs: &ApInt) -> Result { - let lhs = self; - lhs.zip_access_data(rhs).and_then(|zipped| { - match zipped { - ZipDataAccess::Inl(lhs, rhs) => { - let infate_abs = digit::BITS - self.width().to_usize(); - let lhs = (lhs.repr() << infate_abs) as i64; - let rhs = (rhs.repr() << infate_abs) as i64; - Ok(lhs < rhs) - } - ZipDataAccess::Ext(_, _) => { - match (lhs.sign_bit(), rhs.sign_bit()) { - (Bit::Unset, Bit::Unset) => lhs.checked_ult(rhs), - (Bit::Unset, Bit::Set ) => Ok(false), - (Bit::Set , Bit::Unset) => Ok(true), - (Bit::Set , Bit::Set ) => rhs.checked_ugt(lhs) - } - } - } - }) - .map_err(|err| err.with_annotation(format!( - "Error occured on signed less-than (slt) comparison with `lhs < rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - - /// Signed less-equals (`sle`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self <= rhs`. - /// - Interprets both `ApInt` instances as **signed** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - #[inline] - pub fn checked_sle(&self, rhs: &ApInt) -> Result { - rhs.checked_slt(self).map(Not::not) - .map_err(|err| err.with_annotation(format!( - "Error occured on signed less-than or equals (ule) comparison with `lhs <= rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - - /// Signed greater-than (`sgt`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self > rhs`. - /// - Interprets both `ApInt` instances as **signed** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - #[inline] - pub fn checked_sgt(&self, rhs: &ApInt) -> Result { - rhs.checked_slt(self) - .map_err(|err| err.with_annotation(format!( - "Error occured on signed greater-than (ugt) comparison with `lhs > rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - - /// Signed greater-equals (`sge`) comparison between `self` and `rhs`. - /// - /// # Note - /// - /// - `checked_` for this function means that it checks the bit widths - /// - Returns `Ok(true)` if `self >= rhs`. - /// - Interprets both `ApInt` instances as **signed** values. - /// - /// # Errors - /// - /// - If `self` and `rhs` have unmatching bit widths. - #[inline] - pub fn checked_sge(&self, rhs: &ApInt) -> Result { - self.checked_slt(rhs).map(Not::not) - .map_err(|err| err.with_annotation(format!( - "Error occured on signed greater-than or equals (ule) comparison with `lhs >= rhs` where \ - \n\tlhs = {:?}\ - \n\trhs = {:?}", - self, rhs) - )) - } - -} - -#[cfg(test)] -mod tests { - use super::*; - - mod partial_eq { - use super::*; - - #[test] - fn simple_small() { - let a = ApInt::from_u8(42); - let b = ApInt::from_u8(42); - let c = ApInt::from_u8(77); - let d = ApInt::from_u16(42); - assert_eq!(a, b); - assert_ne!(a, c); - assert_ne!(a, d); - assert_ne!(b, c); - assert_ne!(b, d); - assert_ne!(c, d); - } - - #[test] - fn simple_large() { - let a = ApInt::from_u128(42); - let b = ApInt::from_u128(42); - let c = ApInt::from_u128(1337); - let d = ApInt::from_u64(42); - assert_eq!(a, b); - assert_ne!(a, c); - assert_ne!(a, d); - assert_ne!(b, c); - assert_ne!(b, d); - assert_ne!(c, d); - } - } -} diff --git a/src/apint/shift.rs b/src/apint/shift.rs deleted file mode 100644 index 71d1a18..0000000 --- a/src/apint/shift.rs +++ /dev/null @@ -1,579 +0,0 @@ -use apint::{ApInt}; -use apint::utils::{DataAccessMut}; -use errors::{Result}; -use checks; -use digit; -use digit::{Bit, Digit}; -use traits::{Width}; -use utils::{try_forward_bin_mut_impl}; - -/// Represents an amount of bits to shift an `ApInt`. -/// -/// The purpose of this type is to create a generic abstraction -/// over input types that may act as a `ShiftAmount` for shift -/// operations. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ShiftAmount(usize); - -impl ShiftAmount { - /// Returns the internal shift amount representation as `usize`. - #[inline] - pub fn to_usize(self) -> usize { - self.0 - } - - /// Returns the number of digits this `ShiftAmount` will leap over. - /// - /// # Examples - /// - /// - `ShiftAmount(50)` leaps over zero digits. - /// - `ShiftAmount(64)` leaps exactly over one digit. - /// - `ShiftAmount(100)` leaps over 1 digit. - /// - `ShiftAmount(150)` leaps over 2 digits. - #[inline] - pub(in apint) fn digit_steps(self) -> usize { - self.to_usize() / digit::BITS - } - - /// Returns the number of bits within a single digit this - /// `ShiftAmount` will leap over. - /// - /// # TODO - /// - /// Maybe adding `left_bit_steps` and `right_bit_steps` is better? - /// - /// # Examples - /// - /// - `ShiftAmount(50)` leaps over `50` bits. - /// - `ShiftAmount(64)` leaps exactly over `0` bits. - /// - `ShiftAmount(100)` leaps over `28` bits. - /// - `ShiftAmount(150)` leaps over `22` bits. - #[inline] - pub(in apint) fn bit_steps(self) -> usize { - self.to_usize() % digit::BITS - } -} - -impl From for ShiftAmount { - /// Returns a new `ShiftAmount` from the given `usize`. - #[inline] - fn from(val: usize) -> ShiftAmount { - ShiftAmount(val) - } -} - -/// # Shift Operations -impl ApInt { - - /// Shift this `ApInt` left by the given `shift_amount` bits. - /// - /// This operation is inplace and will **not** allocate memory. - /// - /// # Errors - /// - /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. - pub fn wrapping_shl_assign(&mut self, shift_amount: S) -> Result<()> - where S: Into - { - let shift_amount = shift_amount.into(); - checks::verify_shift_amount(self, shift_amount)?; - match self.access_data_mut() { - DataAccessMut::Inl(digit) => { - *digit.repr_mut() <<= shift_amount.to_usize(); - } - DataAccessMut::Ext(digits) => { - let digit_steps = shift_amount.digit_steps(); - if digit_steps != 0 { - let digits_len = digits.len(); - { - use std::ptr; - let src_ptr = digits.as_mut_ptr(); - unsafe { - let dst_ptr = src_ptr.offset(digit_steps as isize); - ptr::copy(src_ptr, dst_ptr, digits_len - digit_steps) - } - } - digits.iter_mut() - .take(digit_steps) - .for_each(|d| *d = Digit::zero()); - } - let bit_steps = shift_amount.bit_steps(); - if bit_steps != 0 { - let mut carry = 0; - for elem in digits[digit_steps..].iter_mut() { - let repr = elem.repr(); - let new_carry = repr >> (digit::BITS - bit_steps); - *elem = Digit((repr << bit_steps) | carry); - carry = new_carry; - } - } - } - } - self.clear_unused_bits(); - Ok(()) - } - - /// Shift this `ApInt` left by the given `shift_amount` bits and returns the result. - /// - /// This operation is inplace and will **not** allocate memory. - /// - /// # Errors - /// - /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. - pub fn into_wrapping_shl(self, shift_amount: S) -> Result - where S: Into - { - try_forward_bin_mut_impl(self, shift_amount, ApInt::wrapping_shl_assign) - } - - /// Logically right-shifts this `ApInt` by the given `shift_amount` bits. - /// - /// This operation is inplace and will **not** allocate memory. - /// - /// # Errors - /// - /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. - pub fn wrapping_lshr_assign(&mut self, shift_amount: S) -> Result<()> - where S: Into - { - let shift_amount = shift_amount.into(); - checks::verify_shift_amount(self, shift_amount)?; - match self.access_data_mut() { - DataAccessMut::Inl(digit) => { - *digit.repr_mut() >>= shift_amount.to_usize(); - } - DataAccessMut::Ext(digits) => { - let digit_steps = shift_amount.digit_steps(); - if digit_steps != 0 { - digits.rotate_left(digit_steps); - digits.iter_mut() - .rev() - .take(digit_steps) - .for_each(|d| *d = Digit::zero()); - } - let bit_steps = shift_amount.bit_steps(); - if bit_steps > 0 { - let mut borrow = 0; - for elem in digits.iter_mut().rev() { - let repr = elem.repr(); - let new_borrow = repr << (digit::BITS - bit_steps); - *elem = Digit((repr >> bit_steps) | borrow); - borrow = new_borrow; - } - } - } - } - Ok(()) - } - - /// Logically right-shifts this `ApInt` by the given `shift_amount` bits - /// and returns the result. - /// - /// This operation is inplace and will **not** allocate memory. - /// - /// # Errors - /// - /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. - pub fn into_wrapping_lshr(self, shift_amount: S) -> Result - where S: Into - { - try_forward_bin_mut_impl(self, shift_amount, ApInt::wrapping_lshr_assign) - } - - /// Arithmetically right-shifts this `ApInt` by the given `shift_amount` bits. - /// - /// This operation is inplace and will **not** allocate memory. - /// - /// # Note - /// - /// Arithmetic shifting copies the sign bit instead of filling up with zeros. - /// - /// # Errors - /// - /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. - pub fn wrapping_ashr_assign(&mut self, shift_amount: S) -> Result<()> - where S: Into - { - if self.sign_bit() == Bit::Unset { - return self.wrapping_lshr_assign(shift_amount) - } - let shift_amount = shift_amount.into(); - checks::verify_shift_amount(self, shift_amount)?; - let shift_amount = shift_amount.to_usize(); - //prevents shift overflow below - if shift_amount == 0 {return Ok(())} - let width = self.width(); - let width_bits = width.to_usize() % digit::BITS; - let (digits, bits) = (shift_amount / digit::BITS, shift_amount % digit::BITS); - let uns = digit::BITS - bits; - match self.access_data_mut() { - DataAccessMut::Inl(x) => { - *x = (*x >> bits) | (digit::ONES << (width.to_usize() - bits)); - } - DataAccessMut::Ext(x) => { - if width_bits != 0 { - x[x.len() - 1].sign_extend_from(width_bits).unwrap(); - } - let diff = x.len() - digits; - if digits == 0 { - //subdigit shift - for i in 0..(x.len() - 1) { - x[i] = (x[i] >> bits) | (x[i + 1] << uns); - } - x[x.len() - 1] = (x[x.len() - 1] >> bits) | (digit::ONES << uns); - } else if bits == 0 { - //digit shift - for i in digits..x.len() { - x[i - digits] = x[i]; - } - for i in 0..digits { - x[i + diff].set_all(); - } - } else { - //digit and subdigit shift - for i in digits..(x.len() - 1) { - x[i - digits] = (x[i] >> bits) | (x[i + 1] << uns); - } - x[diff - 1] = (x[x.len() - 1] >> bits) | (digit::ONES << uns); - for i in 0..digits { - x[i + diff].set_all(); - } - } - } - } - self.clear_unused_bits(); - Ok(()) - } - - /// Arithmetically right-shifts this `ApInt` by the given `shift_amount` bits - /// and returns the result. - /// - /// This operation is inplace and will **not** allocate memory. - /// - /// # Note - /// - /// Arithmetic shifting copies the sign bit instead of filling up with zeros. - /// - /// # Errors - /// - /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. - pub fn into_wrapping_ashr(self, shift_amount: S) -> Result - where S: Into - { - try_forward_bin_mut_impl(self, shift_amount, ApInt::wrapping_ashr_assign) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn test_reprs_w64() -> impl Iterator { - vec![ - 0x0123_4567_89AB_CDEF, - 0xFEDC_BA98_7654_3210, - 0x0000_0000_0000_0000, - 0x5555_5555_5555_5555, - 0xAAAA_AAAA_AAAA_AAAA, - 0xFFFF_FFFF_FFFF_FFFF, - ] - .into_iter() - } - - fn test_apints_w64() -> impl Iterator { - test_reprs_w64().map(ApInt::from_u64) - } - - fn test_reprs_w128() -> impl Iterator { - vec![ - 0x0123_4567_89AB_CDEF_0011_2233_4455_6677, - 0xFEDC_BA98_7654_3210_7766_5544_3322_1100, - 0x0000_0000_0000_0000_0000_0000_0000_0001, - 0x8000_0000_0000_0000_0000_0000_0000_0000, - 0x0000_0000_0000_0000_0000_0000_0000_0000, - 0x5555_5555_5555_5555_5555_5555_5555_5555, - 0xAAAA_AAAA_AAAA_AAAA_AAAA_AAAA_AAAA_AAAA, - 0xFFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF, - ] - .into_iter() - } - - fn test_apints_w128() -> impl Iterator { - test_reprs_w128().map(ApInt::from_u128) - } - - mod shl { - use super::*; - - #[test] - fn assign_small_ok() { - for repr in test_reprs_w64() { - for shamt in 0..64 { - let mut result = ApInt::from_u64(repr); - result.wrapping_shl_assign(shamt).unwrap(); - let expected = ApInt::from_u64(repr << shamt); - assert_eq!(result, expected); - } - } - } - - #[test] - fn assign_large_ok() { - for repr in test_reprs_w128() { - for shamt in 0..128 { - let mut result = ApInt::from_u128(repr); - result.wrapping_shl_assign(shamt).unwrap(); - let expected = ApInt::from_u128(repr << shamt); - assert_eq!(result, expected); - } - } - } - - #[test] - fn assign_xtra_large_ok() { - use digit; - let d0 = 0xFEDC_BA98_7654_3210; - let d1 = 0x5555_5555_4444_4444; - let d2 = 0xAAAA_AAAA_CCCC_CCCC; - let d3 = 0xFFFF_7777_7777_FFFF; - let input: [u64; 4] = [d0, d1, d2, d3]; - { - let shamt = 100; - let digit_steps = shamt / 64; - let bit_steps = shamt % 64; - assert_eq!(digit_steps, 1); - assert_eq!(bit_steps, 36); - let result = ApInt::from(input) - .into_wrapping_shl(shamt) - .unwrap(); - let expected: [u64; 4] = [ - (d1 << bit_steps) | (d2 >> (digit::BITS - bit_steps)), - (d2 << bit_steps) | (d3 >> (digit::BITS - bit_steps)), - (d3 << bit_steps), - 0 - ]; - let expected = ApInt::from(expected); - assert_eq!(result, expected); - } - { - let shamt = 150; - let digit_steps = shamt / 64; - let bit_steps = shamt % 64; - assert_eq!(digit_steps, 2); - assert_eq!(bit_steps, 22); - let result = ApInt::from(input) - .into_wrapping_shl(shamt) - .unwrap(); - let expected: [u64; 4] = [ - (d2 << bit_steps) | (d3 >> (digit::BITS - bit_steps)), - (d3 << bit_steps), - 0, - 0 - ]; - let expected = ApInt::from(expected); - assert_eq!(result, expected); - } - { - let shamt = 200; - let digit_steps = shamt / 64; - let bit_steps = shamt % 64; - assert_eq!(digit_steps, 3); - assert_eq!(bit_steps, 8); - let result = ApInt::from(input) - .into_wrapping_shl(shamt) - .unwrap(); - let expected: [u64; 4] = [ - (d3 << bit_steps), - 0, - 0, - 0 - ]; - let expected = ApInt::from(expected); - assert_eq!(result, expected); - } - } - - #[test] - fn assign_small_fail() { - for mut apint in test_apints_w64() { - assert!(apint.wrapping_shl_assign(64).is_err()) - } - } - - #[test] - fn assign_large_fail() { - for mut apint in test_apints_w128() { - assert!(apint.wrapping_shl_assign(128).is_err()) - } - } - - #[test] - fn into_equivalent_small() { - for apint in test_apints_w64() { - for shamt in 0..64 { - let mut x = apint.clone(); - let y = apint.clone(); - x.wrapping_shl_assign(shamt).unwrap(); - let y = y.into_wrapping_shl(shamt).unwrap(); - assert_eq!(x, y); - } - } - } - - #[test] - fn into_equivalent_large() { - for apint in test_apints_w128() { - for shamt in 0..128 { - let mut x = apint.clone(); - let y = apint.clone(); - x.wrapping_shl_assign(shamt).unwrap(); - let y = y.into_wrapping_shl(shamt).unwrap(); - assert_eq!(x, y); - } - } - } - } - - mod lshr { - use super::*; - - #[test] - fn assign_small_ok() { - for repr in test_reprs_w64() { - for shamt in 0..64 { - let mut result = ApInt::from_u64(repr); - result.wrapping_lshr_assign(shamt).unwrap(); - let expected = ApInt::from_u64(repr >> shamt); - assert_eq!(result, expected); - } - } - } - - #[test] - fn assign_large_ok() { - for repr in test_reprs_w128() { - for shamt in 0..128 { - let mut result = ApInt::from_u128(repr); - result.wrapping_lshr_assign(shamt).unwrap(); - let expected = ApInt::from_u128(repr >> shamt); - assert_eq!(result, expected); - } - } - } - - #[test] - fn assign_small_fail() { - for mut apint in test_apints_w64() { - assert!(apint.wrapping_lshr_assign(64).is_err()) - } - } - - #[test] - fn assign_large_fail() { - for mut apint in test_apints_w128() { - assert!(apint.wrapping_lshr_assign(128).is_err()) - } - } - - #[test] - fn into_equivalent_small() { - for apint in test_apints_w64() { - for shamt in 0..64 { - let mut x = apint.clone(); - let y = apint.clone(); - x.wrapping_lshr_assign(shamt).unwrap(); - let y = y.into_wrapping_lshr(shamt).unwrap(); - assert_eq!(x, y); - } - } - } - - #[test] - fn into_equivalent_large() { - for apint in test_apints_w128() { - for shamt in 0..128 { - let mut x = apint.clone(); - let y = apint.clone(); - x.wrapping_lshr_assign(shamt).unwrap(); - let y = y.into_wrapping_lshr(shamt).unwrap(); - assert_eq!(x, y); - } - } - } - } - - mod ashr { - use super::*; - - #[test] - fn regression_stevia_01() { - let input = ApInt::from_i32(-8); - let expected = ApInt::from_u32(0x_FFFF_FFFE); - assert_eq!(input.into_wrapping_ashr(ShiftAmount::from(2)).unwrap(), expected); - } - - #[test] - fn assign_small_ok() { - for repr in test_reprs_w64() { - for shamt in 0..64 { - let mut result = ApInt::from_u64(repr); - result.wrapping_ashr_assign(shamt).unwrap(); - let expected = ApInt::from_i64((repr as i64) >> shamt); - assert_eq!(result, expected); - } - } - } - - #[test] - fn assign_large_ok() { - for repr in test_reprs_w128() { - for shamt in 0..128 { - let mut result = ApInt::from_u128(repr); - result.wrapping_ashr_assign(shamt).unwrap(); - let expected = ApInt::from_i128((repr as i128) >> shamt); - assert_eq!(result, expected); - } - } - } - - #[test] - fn assign_small_fail() { - for mut apint in test_apints_w64() { - assert!(apint.wrapping_ashr_assign(64).is_err()) - } - } - - #[test] - fn assign_large_fail() { - for mut apint in test_apints_w128() { - assert!(apint.wrapping_ashr_assign(128).is_err()) - } - } - - #[test] - fn into_equivalent_small() { - for apint in test_apints_w64() { - for shamt in 0..64 { - let mut x = apint.clone(); - let y = apint.clone(); - x.wrapping_ashr_assign(shamt).unwrap(); - let y = y.into_wrapping_ashr(shamt).unwrap(); - assert_eq!(x, y); - } - } - } - - #[test] - fn into_equivalent_large() { - for apint in test_apints_w128() { - for shamt in 0..128 { - let mut x = apint.clone(); - let y = apint.clone(); - x.wrapping_ashr_assign(shamt).unwrap(); - let y = y.into_wrapping_ashr(shamt).unwrap(); - assert_eq!(x, y); - } - } - } - } -} diff --git a/src/apint/utils.rs b/src/apint/utils.rs deleted file mode 100644 index 8017911..0000000 --- a/src/apint/utils.rs +++ /dev/null @@ -1,468 +0,0 @@ - -use storage::{Storage}; -use digit::{Digit, Bit}; -use apint::{ApInt}; -use errors::{Error, Result}; -use traits::Width; -use bitwidth::BitWidth; -use digit_seq::{ - ContiguousDigitSeq, - ContiguousDigitSeqMut -}; - -use std::fmt; -use std::hash::{Hash, Hasher}; - -impl fmt::Debug for ApInt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("ApInt") - .field("len", &self.width()) - .field("digits", &self.as_digit_slice()) - .finish() - } -} - -impl Hash for ApInt { - fn hash(&self, state: &mut H) { - self.len.hash(state); - self.as_digit_slice().hash(state); - } -} - -// ============================================================================ - -impl ApInt { - pub(in apint) fn digits(&self) -> ContiguousDigitSeq { - ContiguousDigitSeq::from(self.as_digit_slice()) - } - - pub(in apint) fn digits_mut(&mut self) -> ContiguousDigitSeqMut { - ContiguousDigitSeqMut::from(self.as_digit_slice_mut()) - } -} - -// ============================================================================ - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum DataAccess<'a> { - Inl(Digit), - Ext(&'a [Digit]) -} - -#[derive(Debug, PartialEq, Eq)] -pub(crate) enum DataAccessMut<'a> { - Inl(&'a mut Digit), - Ext(&'a mut [Digit]) -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub(crate) enum ZipDataAccess<'a, 'b> { - Inl(Digit, Digit), - Ext(&'a [Digit], &'b [Digit]) -} - -#[derive(Debug, PartialEq, Eq)] -pub(crate) enum ZipDataAccessMutSelf<'a, 'b> { - Inl(&'a mut Digit, Digit), - Ext(&'a mut [Digit], &'b [Digit]) -} - -#[derive(Debug, PartialEq, Eq)] -pub(crate) enum ZipDataAccessMutBoth<'a, 'b> { - Inl(&'a mut Digit, &'b mut Digit), - Ext(&'a mut [Digit], &'b mut [Digit]) -} - -// ============================================================================ - -impl Width for ApInt { - /// Returns the `BitWidth` of this `ApInt`. - #[inline] - fn width(&self) -> BitWidth { - BitWidth::new(self.len_bits()).unwrap() - } -} - -/// # Utility & Helper Methods -impl ApInt { - /// Returns the number of bits of the bit width of this `ApInt`. - #[inline] - pub(in apint) fn len_bits(&self) -> usize { - self.len.to_usize() - } - - /// Returns the number of digits used internally for the value - /// representation of this `ApInt`. - #[inline] - pub(in apint) fn len_digits(&self) -> usize { - self.len.required_digits() - } - - /// Returns the storage specifier of this `ApInt`. - /// - /// This is `Storage::Inl` for `ApInt` instances that can be stored - /// entirely on the stack and `Storage::Ext` otherwise. - #[inline] - pub(in apint) fn storage(&self) -> Storage { - self.len.storage() - } - - /// Accesses the internal `Digit` data of this `ApInt` in a safe way. - #[inline] - pub(in apint) fn access_data(&self) -> DataAccess { - match self.storage() { - Storage::Inl => DataAccess::Inl(unsafe{self.data.inl}), - Storage::Ext => DataAccess::Ext(self.as_digit_slice()) - } - } - - /// Mutably accesses the internal `Digit` data of this `ApInt` in a safe way. - #[inline] - pub(in apint) fn access_data_mut(&mut self) -> DataAccessMut { - match self.storage() { - Storage::Inl => DataAccessMut::Inl(unsafe{&mut self.data.inl}), - Storage::Ext => DataAccessMut::Ext(self.as_digit_slice_mut()) - } - } - - /// Zips both given `ApInt` instances and tries to access their data in a safe way. - /// - /// # Errors - /// - /// - If both given `ApInt` instances have non-matching bit widths. - #[inline] - pub(in apint) fn zip_access_data<'a, 'b>(&'a self, other: &'b ApInt) -> Result> { - if self.width() != other.width() { - return Error::unmatching_bitwidths(self.width(), other.width()).into() - } - Ok(match self.storage() { - Storage::Inl => { - ZipDataAccess::Inl( - unsafe{ self.data.inl}, - unsafe{other.data.inl}) - }, - Storage::Ext => { - ZipDataAccess::Ext( - self.as_digit_slice(), - other.as_digit_slice()) - } - }) - } - - /// Zips both given `ApInt` instances and tries to mutably access `self` data and immutably - /// access `other` data in a safe way. - /// - /// # Errors - /// - /// - If both given `ApInt` instances have non-matching bit widths. - #[inline] - pub(in apint) fn zip_access_data_mut_self<'a, 'b>(&'a mut self, other: &'b ApInt) -> Result> { - if self.width() != other.width() { - return Error::unmatching_bitwidths(self.width(), other.width()).into() - } - Ok(match self.storage() { - Storage::Inl => { - ZipDataAccessMutSelf::Inl( - unsafe{&mut self.data.inl}, - unsafe{other.data.inl}) - }, - Storage::Ext => { - ZipDataAccessMutSelf::Ext( - self.as_digit_slice_mut(), - other.as_digit_slice()) - } - }) - } - - /// Zips both given `ApInt` instances and tries to mutably access `lhs` and `rhs` data - /// in a safe way. - /// - /// # Errors - /// - /// - If both given `ApInt` instances have non-matching bit widths. - #[inline] - pub(in apint) fn zip_access_data_mut_both<'a, 'b>(lhs: &'a mut ApInt, rhs: &'b mut ApInt) -> Result> { - if lhs.width() != rhs.width() { - return Error::unmatching_bitwidths(lhs.width(), rhs.width()).into() - } - Ok(match lhs.storage() { - Storage::Inl => { - ZipDataAccessMutBoth::Inl( - unsafe{&mut lhs.data.inl}, - unsafe{&mut rhs.data.inl}) - }, - Storage::Ext => { - ZipDataAccessMutBoth::Ext( - lhs.as_digit_slice_mut(), - rhs.as_digit_slice_mut()) - } - }) - } - - /// Computes the given operation on all digits of this `ApInt`. - /// - /// # Note - /// - /// Prefer this utility method if you want to perform the same - /// operation for all digits within this `ApInt` as this operation - /// uses the most efficient way to do so. - #[inline] - pub(in apint) fn modify_digits(&mut self, f: F) - where F: Fn(&mut Digit) - { - use self::DataAccessMut::*; - match self.access_data_mut() { - Inl(digit) => f(digit), - Ext(digits) => { - for digit in digits { - f(digit) - } - } - } - } - - /// Computes the given operation on all digits of this `ApInt` - /// zipped with the digits of `rhs`. - /// - /// # Note - /// - /// Prefer this utility method for these use cases since this operation - /// uses the most efficient way to perform the specified task. - #[inline] - pub(in apint) fn modify_zipped_digits(&mut self, rhs: &ApInt, f: F) -> Result<()> - where F: Fn(&mut Digit, Digit) - { - use self::ZipDataAccessMutSelf::*; - match self.zip_access_data_mut_self(rhs)? { - Inl(lhs, rhs) => f(lhs, rhs), - Ext(lhs, rhs) => { - for (l, &r) in lhs.into_iter().zip(rhs) { - f(l, r) - } - } - } - Ok(()) - } - - /// Returns a slice over the `Digit`s of this `ApInt` in little-endian order. - #[inline] - pub(in apint) fn as_digit_slice(&self) -> &[Digit] { - use std::slice; - match self.len.storage() { - Storage::Inl => unsafe { - slice::from_raw_parts(&self.data.inl, 1) - }, - Storage::Ext => unsafe { - slice::from_raw_parts(self.data.ext.as_ptr(), self.len_digits()) - } - } - } - - /// Returns a mutable slice over the `Digit`s of this `ApInt` in little-endian order. - #[inline] - pub(in apint) fn as_digit_slice_mut(&mut self) -> &mut [Digit] { - use std::slice; - match self.len.storage() { - Storage::Inl => unsafe { - slice::from_raw_parts_mut(&mut self.data.inl, 1) - }, - Storage::Ext => unsafe { - slice::from_raw_parts_mut(self.data.ext.as_ptr(), self.len_digits()) - } - } - } - - /// Returns the most significant `Digit` of this `ApInt`. - #[inline] - pub(in apint) fn most_significant_digit(&self) -> Digit { - match self.access_data() { - DataAccess::Inl(digit) => digit, - DataAccess::Ext(digits) => { - *digits.last().unwrap() - } - } - } - - /// Returns a mutable reference to the most significant `Digit` of this `ApInt`. - #[inline] - pub(in apint) fn most_significant_digit_mut(&mut self) -> &mut Digit { - match self.access_data_mut() { - DataAccessMut::Inl(digit) => digit, - DataAccessMut::Ext(digits) => { - digits.last_mut().unwrap() - } - } - } - - /// Returns the least significant `Digit` of this `ApInt`. - #[inline] - pub(in apint) fn least_significant_digit(&self) -> Digit { - match self.access_data() { - DataAccess::Inl(digit) => digit, - DataAccess::Ext(digits) => digits[0] - } - } - - /// Returns `Bit::Set` if the most significant bit of this `ApInt` is set - /// and `Bit::Unset` otherwise. - #[inline] - pub(in apint) fn most_significant_bit(&self) -> Bit { - let sign_bit_pos = self.width().sign_bit_pos(); - self.most_significant_digit() - .get(sign_bit_pos.to_pos_within_digit()) - .expect("`BitWidth::excess_bits` returns a number that \ - is always a valid `BitPos` for a `Digit` so this \ - operation cannot fail.") - } - - /// Returns `Bit::Set` if the least significant bit of this `ApInt` is set - /// and `Bit::Unset` otherwise. - #[inline] - pub(in apint) fn least_significant_bit(&self) -> Bit { - self.least_significant_digit().least_significant_bit() - } - - /// Clears unused bits of this `ApInt`. - /// - /// # Example - /// - /// An `ApInt` with a `BitWidth` of `100` bits requires - /// 2 `Digit`s for its internal value representation, - /// each having 64-bits which totals in `128` bits for the - /// `ApInt` instance. - /// So upon a call to `ApInt::clear_unused_bits` the upper - /// `128-100 = 28` bits are cleared (set to zero (`0`)). - #[inline] - pub(in apint) fn clear_unused_bits(&mut self) { - if let Some(bits) = self.width().excess_bits() { - self.most_significant_digit_mut() - .retain_last_n(bits) - .expect("`BitWidth::excess_bits` always returns a number of \ - bits that can safely forwarded to `Digit::retain_last_n`."); - } - } - - /// Returns `true` if this `ApInt` represents the value zero (`0`). - /// - /// # Note - /// - /// - Zero (`0`) is also called the additive neutral element. - /// - This operation is more efficient than comparing two instances - /// of `ApInt` for the same reason. - #[inline] - pub fn is_zero(&self) -> bool { - match self.access_data() { - DataAccess::Inl(digit) => digit.is_zero(), - DataAccess::Ext(digits) => { - digits.into_iter().all(|digit| digit.is_zero()) - } - } - } - - /// Returns `true` if this `ApInt` represents the value one (`1`). - /// - /// # Note - /// - /// - One (`1`) is also called the multiplicative neutral element. - /// - This operation is more efficient than comparing two instances - /// of `ApInt` for the same reason. - #[inline] - pub fn is_one(&self) -> bool { - match self.access_data() { - DataAccess::Inl(digit) => digit == Digit::one(), - DataAccess::Ext(digits) => { - let (last, rest) = digits.split_last().unwrap_or_else(|| unreachable!()); - last.is_one() && rest.into_iter().all(|digit| digit.is_zero()) - } - } - } - - /// Returns `true` if this `ApInt` represents an even number. - /// Equivalent to testing if the least significant bit is zero. - #[inline] - pub fn is_even(&self) -> bool { - self.least_significant_bit() == Bit::Unset - } - - /// Returns `true` if this `ApInt` represents an odd number. - /// Equivalent to testing if the least significant bit is one. - #[inline] - pub fn is_odd(&self) -> bool { - self.least_significant_bit() == Bit::Set - } - - /// Returns `true` if the **signed** representation of this `ApInt` is positive. - /// Equivalent to testing if the most significant bit is zero. - #[inline] - pub fn is_positive(&self) -> bool { - self.most_significant_bit() == Bit::Unset - } - - /// Returns `true` if the **signed** representation of this `ApInt` is negative. - /// Equivalent to testing if the most significant bit is one. - #[inline] - pub fn is_negative(&self) -> bool { - self.most_significant_bit() == Bit::Set - } - - /// Splits the least significant digits from the rest of the digit slice - /// and returns it as well as the remaining part of the digit slice. - #[inline] - pub(in apint) fn split_least_significant_digit(&self) -> (Digit, &[Digit]) { - match self.access_data() { - DataAccess::Inl(digit) => (digit, &[]), - DataAccess::Ext(digits) => { - let (lsd, rest) = digits.split_first() - .expect("An `ApInt` always has at least one digit so calling \ - `split_first` on a slice of its digits will never \ - return `None`."); - (*lsd, rest) - } - } - } - - /// Splits the most significant digits from the rest of the digit slice - /// and returns it as well as the remaining part of the digit slice. - #[inline] - pub(in apint) fn split_most_significant_digit(&self) -> (Digit, &[Digit]) { - match self.access_data() { - DataAccess::Inl(digit) => (digit, &[]), - DataAccess::Ext(digits) => { - let (lsd, rest) = digits.split_last() - .expect("An `ApInt` always has at least one digit so calling \ - `split_last` on a slice of its digits will never \ - return `None`."); - (*lsd, rest) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn most_significant_bit() { - assert_eq!(Bit::Unset, - ApInt::from_bit(false).most_significant_bit()); - assert_eq!(Bit::Set, - ApInt::from_bit(true).most_significant_bit()); - assert_eq!(Bit::Unset, - ApInt::from_u8(0b0101_0101).most_significant_bit()); - assert_eq!(Bit::Set, - ApInt::from_u8(0b1101_0101).most_significant_bit()); - assert_eq!(Bit::Unset, - ApInt::from_u16(0b0111_1000_1101_0101).most_significant_bit()); - assert_eq!(Bit::Set, - ApInt::from_u16(0b1011_0001_0101_0101).most_significant_bit()); - assert_eq!(Bit::Unset, - ApInt::from_u32(0x7000_0000).most_significant_bit()); - assert_eq!(Bit::Set, - ApInt::from_u32(0x8000_0000).most_significant_bit()); - assert_eq!(Bit::Unset, - ApInt::from_u64(0x70FC_A875_4321_1234).most_significant_bit()); - assert_eq!(Bit::Set, - ApInt::from_u64(0x8765_4321_5555_6666).most_significant_bit()); - } -} diff --git a/src/checks.rs b/src/checks.rs deleted file mode 100644 index ad3f210..0000000 --- a/src/checks.rs +++ /dev/null @@ -1,30 +0,0 @@ -use errors::{Error, Result}; -use traits::{Width}; -use bitpos::{BitPos}; -use apint::{ShiftAmount}; - -#[inline] -pub(crate) fn verify_bit_access(a: &T, pos: P) -> Result<()> - where T: Width, - P: Into -{ - let pos = pos.into(); - let width = a.width(); - if !width.is_valid_pos(pos) { - return Err(Error::invalid_bit_access(pos, width)) - } - Ok(()) -} - -#[inline] -pub(crate) fn verify_shift_amount(a: &W, shift_amount: S) -> Result<()> - where W: Width, - S: Into -{ - let shift_amount = shift_amount.into(); - let width = a.width(); - if !width.is_valid_shift_amount(shift_amount) { - return Err(Error::invalid_shift_amount(shift_amount, width)) - } - Ok(()) -} diff --git a/src/construction.rs b/src/construction.rs new file mode 100644 index 0000000..f76a142 --- /dev/null +++ b/src/construction.rs @@ -0,0 +1,10 @@ +mod casting; +mod constructors; +#[cfg(feature = "rand_support")] +mod rand_impl; +#[cfg(feature = "serde_support")] +mod serde_impl; +mod serialization; +mod to_primitive; + +pub(crate) use self::{to_primitive::PrimitiveTy}; \ No newline at end of file diff --git a/src/apint/casting.rs b/src/construction/casting.rs similarity index 77% rename from src/apint/casting.rs rename to src/construction/casting.rs index dd3e597..0055de0 100644 --- a/src/apint/casting.rs +++ b/src/construction/casting.rs @@ -1,133 +1,16 @@ - -use apint::{ApInt}; -use errors::{Error, Result}; - -use bitwidth::{BitWidth}; -use storage::{Storage}; -use digit::{Bit}; -use traits::Width; -use utils::{try_forward_bin_mut_impl, forward_bin_mut_impl}; - -impl Clone for ApInt { - fn clone(&self) -> Self { - match self.storage() { - Storage::Inl => { - ApInt::new_inl(self.len, unsafe{ self.data.inl }) - } - Storage::Ext => { - use std::mem; - let req_digits = self.len_digits(); - let mut buffer = self.as_digit_slice() - .to_vec() - .into_boxed_slice(); - assert_eq!(buffer.len(), req_digits); - let ptr_buffer = buffer.as_mut_ptr(); - mem::forget(buffer); - unsafe{ ApInt::new_ext(self.len, ptr_buffer) } - } - } - } -} - -/// # Assignment Operations -impl ApInt { - /// Assigns `rhs` to this `ApInt`. - /// - /// This mutates digits and may affect the bitwidth of `self` - /// which **might result in an expensive operations**. - /// - /// After this operation `rhs` and `self` are equal to each other. - pub fn assign(&mut self, rhs: &ApInt) { - if self.len_digits() == rhs.len_digits() { - // If `self` and `rhs` require the same amount of digits - // for their representation we can simply utilize `ApInt` - // invariants and basically `memcpy` from `rhs` to `self`. - // Afterwards a simple adjustment of the length is sufficient. - // (At the end of this method.) - self.as_digit_slice_mut() - .copy_from_slice(rhs.as_digit_slice()); - } - else { - // In this case `rhs` and `self` require an unequal amount - // of digits for their representation which means that the - // digits that may be allocated by `self` must be dropped. - // - // Note that `ApInt::drop_digits` only deallocates if possible. - unsafe{ self.drop_digits(); } - - match rhs.storage() { - Storage::Inl => { - // If `rhs` is a small `ApInt` we can simply update - // the `digit` field of `self` and we are done. - self.data.inl = unsafe{ rhs.data.inl }; - } - Storage::Ext => { - // If `rhs` is a large heap-allocated `ApInt` we first - // need to expensively clone its buffer and feed it to `self`. - let cloned = rhs.clone(); - self.data.ext = unsafe{ cloned.data.ext }; - use std::mem; - mem::forget(cloned); - } - } - } - // Since all cases may need bit width adjustment we outsourced it - // to the end of this method. - self.len = rhs.len; - } - - /// Strictly assigns `rhs` to this `ApInt`. - /// - /// After this operation `rhs` and `self` are equal to each other. - /// - /// **Note:** Strict assigns protect against mutating the bit width - /// of `self` and thus return an error instead of executing a probably - /// expensive `assign` operation. - /// - /// # Errors - /// - /// - If `rhs` and `self` have unmatching bit widths. - pub fn strict_assign(&mut self, rhs: &ApInt) -> Result<()> { - if self.width() != rhs.width() { - return Error::unmatching_bitwidths(self.width(), rhs.width()) - .with_annotation(format!( - "Occured while trying to `strict_assign` {:?} to {:?}.", self, rhs)) - .into() - } - self.as_digit_slice_mut() - .copy_from_slice(rhs.as_digit_slice()); - Ok(()) - } -} +use crate::data::{ApInt, Digit}; +use crate::info::{BitWidth, Error, Result, Width}; +use crate::logic::{try_forward_bin_mut_impl, forward_bin_mut_impl}; /// # Casting: Truncation & Extension +/// +/// **Note**: unless otherwise noted in the function specific documentation, +/// +/// - These operations **will reallocate** if `self.width()` and `target_width` require different amounts of digits for their representation. +/// - These functions optimize to no-ops if `self.width()` and `target_width` are equal. impl ApInt { - /// Tries to truncate this `ApInt` inplace to the given `target_width` - /// and returns the result. - /// - /// # Note - /// - /// - This is useful for method chaining. - /// - For more details look into - /// [`truncate`](struct.ApInt.html#method.truncate). - /// - /// # Errors - /// - /// - If the `target_width` is greater than the current width. - pub fn into_truncate(self, target_width: W) -> Result - where W: Into - { - try_forward_bin_mut_impl(self, target_width, ApInt::truncate) - } - /// Tries to truncate this `ApInt` inplace to the given `target_width`. /// - /// # Note - /// - /// - This is a no-op if `self.width()` and `target_width` are equal. - /// - This operation is inplace as long as `self.width()` and `target_width` - /// require the same amount of digits for their representation. - /// /// # Errors /// /// - If the `target_width` is greater than the current width. @@ -199,16 +82,18 @@ impl ApInt { Ok(()) } - // ======================================================================== - - /// Tries to zero-extend this `ApInt` inplace to the given `target_width` - /// and returns the result. + /// Tries to truncate this `ApInt` inplace to the given `target_width` and returns the result. /// - /// # Note + /// # Errors /// - /// - This is useful for method chaining. - /// - For more details look into - /// [`zero_extend`](struct.ApInt.html#method.zero_extend). + /// - If the `target_width` is greater than the current width. + pub fn into_truncate(self, target_width: W) -> Result + where W: Into + { + try_forward_bin_mut_impl(self, target_width, ApInt::truncate) + } + + /// Tries to zero-extend this `ApInt` inplace to the given `target_width` and returns the result. /// /// # Errors /// @@ -221,12 +106,6 @@ impl ApInt { /// Tries to zero-extend this `ApInt` inplace to the given `target_width`. /// - /// # Note - /// - /// - This is a no-op if `self.width()` and `target_width` are equal. - /// - This operation is inplace as long as `self.width()` and `target_width` - /// require the same amount of digits for their representation. - /// /// # Errors /// /// - If the `target_width` is less than the current width. @@ -269,29 +148,19 @@ impl ApInt { // must allocate a new buffer that fits for the required amount of digits // for the target width. Also we need to `memcpy` the digits of the // extended `ApInt` to the newly allocated buffer. - use digit; use std::iter; assert!(target_req_digits > actual_req_digits); let additional_digits = target_req_digits - actual_req_digits; let extended_clone = ApInt::from_iter( self.digits() - .chain(iter::repeat(digit::ZERO).take(additional_digits))) + .chain(iter::repeat(Digit::ZERO).take(additional_digits))) .and_then(|apint| apint.into_truncate(target_width))?; *self = extended_clone; } Ok(()) } - // ======================================================================== - - /// Tries to sign-extend this `ApInt` inplace to the given `target_width` - /// and returns the result. - /// - /// # Note - /// - /// - This is useful for method chaining. - /// - For more details look into - /// [`sign_extend`](struct.ApInt.html#method.sign_extend). + /// Tries to sign-extend this `ApInt` inplace to the given `target_width` and returns the result. /// /// # Errors /// @@ -304,12 +173,6 @@ impl ApInt { /// Tries to sign-extend this `ApInt` inplace to the given `target_width`. /// - /// # Note - /// - /// - This is a no-op if `self.width()` and `target_width` are equal. - /// - This operation is inplace as long as `self.width()` and `target_width` - /// require the same amount of digits for their representation. - /// /// # Errors /// /// - If the `target_width` is less than the current width. @@ -335,7 +198,7 @@ impl ApInt { .into() } - if self.most_significant_bit() == Bit::Unset { + if !self.most_significant_bit() { return self.zero_extend(target_width) } @@ -367,7 +230,6 @@ impl ApInt { // must allocate a new buffer that fits for the required amount of digits // for the target width. Also we need to `memcpy` the digits of the // extended `ApInt` to the newly allocated buffer. - use digit; use std::iter; assert!(target_req_digits > actual_req_digits); let additional_digits = target_req_digits - actual_req_digits; @@ -379,7 +241,7 @@ impl ApInt { let extended_copy = ApInt::from_iter( self.digits() - .chain(iter::repeat(digit::ONES).take(additional_digits))) + .chain(iter::repeat(Digit::ONES).take(additional_digits))) .and_then(|apint| apint.into_truncate(target_width))?; self.clear_unused_bits(); @@ -389,30 +251,14 @@ impl ApInt { Ok(()) } - // ======================================================================== - - /// Zero-resizes this `ApInt` to the given `target_width` - /// and returns the result. - /// - /// # Note - /// - /// - This is useful for method chaining. - /// - For more details look into - /// [`zero_resize`](struct.ApInt.html#method.zero_resize). + /// Zero-resizes this `ApInt` to the given `target_width` and returns the result. pub fn into_zero_resize(self, target_width: W) -> ApInt where W: Into { forward_bin_mut_impl(self, target_width, ApInt::zero_resize) } - /// Sign-resizes this `ApInt` to the given `target_width` - /// and returns the result. - /// - /// # Note - /// - /// - This is useful for method chaining. - /// - For more details look into - /// [`sign_resize`](struct.ApInt.html#method.sign_resize). + /// Sign-resizes this `ApInt` to the given `target_width` and returns the result. pub fn into_sign_resize(self, target_width: W) -> ApInt where W: Into { diff --git a/src/apint/constructors.rs b/src/construction/constructors.rs similarity index 71% rename from src/apint/constructors.rs rename to src/construction/constructors.rs index 62481ad..3229895 100644 --- a/src/apint/constructors.rs +++ b/src/construction/constructors.rs @@ -1,87 +1,15 @@ - -use apint::{ApInt, ApIntData}; -use bitwidth::{BitWidth}; -use errors::{Error, Result}; -use storage::{Storage}; -use digit::{Bit, Digit}; -use digit; +use crate::data::{ApInt, Digit, DigitRepr}; +use crate::info::{BitWidth, Error, Result}; use smallvec::SmallVec; -use std::ptr::NonNull; - -impl ApInt { - /// Deallocates memory that may be allocated by this `ApInt`. - /// - /// `ApInt` instances with a bit width larger than `64` bits - /// allocate their digits on the heap. With `drop_digits` this - /// memory can be freed. - /// - /// **Note:** This is extremely unsafe, only use this if the - /// `ApInt` no longer needs its digits. - /// - /// **Note:** This is `unsafe` since it violates invariants - /// of the `ApInt`. - pub(in apint) unsafe fn drop_digits(&mut self) { - if self.len.storage() == Storage::Ext { - let len = self.len_digits(); - drop(Vec::from_raw_parts( - self.data.ext.as_ptr(), len, len)) - } - } -} - -impl Drop for ApInt { - fn drop(&mut self) { - unsafe{self.drop_digits()} - } -} - /// # Constructors impl ApInt { - - /// Creates a new small `ApInt` from the given `BitWidth` and `Digit`. - /// - /// Small `ApInt` instances are stored entirely on the stack. - /// - /// # Panics - /// - /// - If the given `width` represents a `BitWidth` larger than `64` bits. - #[inline] - pub(in apint) fn new_inl(width: BitWidth, digit: Digit) -> ApInt { - assert_eq!(width.storage(), Storage::Inl); - ApInt { - len: width, - data: ApIntData { inl: digit } - } - } - - /// Creates a new large `ApInt` from the given `BitWidth` and `Digit`. - /// - /// Large `ApInt` instances allocate their digits on the heap. - /// - /// **Note:** This operation is unsafe since the buffer length behind the - /// given `ext_ptr` must be trusted. - /// - /// # Panics - /// - /// - If the given `width` represents a `BitWidth` smaller than - /// or equal to `64` bits. - pub(in apint) unsafe fn new_ext(width: BitWidth, ext_ptr: *mut Digit) -> ApInt { - assert_eq!(width.storage(), Storage::Ext); - ApInt{ - len: width, - data: ApIntData{ ext: NonNull::new_unchecked(ext_ptr) } - } - } - - /// Creates a new `ApInt` from the given `Bit` value with a bit width of `1`. + /// Creates a new `ApInt` from the given boolean value with a bit width of `1`. /// - /// This function is generic over types that are convertible to `Bit` such as `bool`. - pub fn from_bit(bit: B) -> ApInt - where B: Into - { - ApInt::new_inl(BitWidth::w1(), Digit(bit.into().to_bool() as u64)) + /// When `bit` is `false`, the single bit in the `ApInt` is 0, otherwise it is 1. + pub fn from_bool(bit: bool) -> ApInt { + ApInt::new_inl(BitWidth::w1(), Digit(bit as DigitRepr)) } /// Creates a new `ApInt` from a given `i8` value with a bit-width of 8. @@ -140,7 +68,7 @@ impl ApInt { /// Creates a new `ApInt` from a given `u128` value with a bit-width of 128. pub fn from_u128(val: u128) -> ApInt { - let hi = (val >> digit::BITS) as u64; + let hi = (val >> Digit::BITS) as u64; let lo = (val & ((1u128 << 64) - 1)) as u64; ApInt::from([hi, lo]) } @@ -160,7 +88,7 @@ impl ApInt { /// # Errors /// /// - If the iterator yields no elements. - pub(in apint) fn from_iter(digits: I) -> Result + pub(crate) fn from_iter(digits: I) -> Result where I: IntoIterator, { let mut buffer = digits.into_iter().collect::>(); @@ -176,7 +104,7 @@ impl ApInt { } n => { use std::mem; - let bitwidth = BitWidth::new(n * digit::BITS) + let bitwidth = BitWidth::new(n * Digit::BITS) .expect("We have already asserted that the number of items the given Iterator \ iterates over is greater than `1` and thus non-zero and thus a valid `BitWidth`."); let req_digits = bitwidth.required_digits(); @@ -191,8 +119,9 @@ impl ApInt { } // TODO: convert this to take from a slice or IntoIterator + #[cfg(test)] pub(crate) fn from_vec_u64(val: Vec) -> Option { - if val.len() == 0 { + if val.is_empty() { None } else { let buffer = val.into_iter() @@ -206,9 +135,9 @@ impl ApInt { /// Creates a new `ApInt` that represents the repetition of the given digit /// up to the given target bitwidth. /// - /// Note: The last digit in the generated sequence is truncated to make the `ApInt`'s + /// Note: The last and thus the most-significant digit in the generated sequence is truncated to make the `ApInt`'s /// value representation fit the given bit-width. - pub(in apint) fn repeat_digit(target_width: BitWidth, digit: D) -> ApInt + pub(crate) fn repeat_digit(target_width: BitWidth, digit: D) -> ApInt where D: Into { use std::iter; @@ -229,7 +158,7 @@ impl ApInt { /// Creates a new `ApInt` with the given bit width that represents zero. pub fn zero(width: BitWidth) -> ApInt { - ApInt::repeat_digit(width, digit::ZERO) + ApInt::repeat_digit(width, Digit::ZERO) } /// Creates a new `ApInt` with the given bit width that represents one. @@ -246,7 +175,7 @@ impl ApInt { /// Creates a new `ApInt` with the given bit width that has all bits set. pub fn all_set(width: BitWidth) -> ApInt { - ApInt::repeat_digit(width, digit::ONES) + ApInt::repeat_digit(width, Digit::ONES) } /// Returns the smallest unsigned `ApInt` that can be represented by the given `BitWidth`. @@ -274,12 +203,10 @@ impl ApInt { } } -impl From for ApInt - where B: Into -{ +impl From for ApInt { #[inline] - fn from(bit: B) -> ApInt { - ApInt::from_bit(bit) + fn from(bit: bool) -> ApInt { + ApInt::from_bool(bit) } } @@ -388,6 +315,8 @@ impl_from_array_for_apint!(8); // 512 bits impl_from_array_for_apint!(16); // 1024 bits impl_from_array_for_apint!(32); // 2048 bits +// Note: There are some tests for this file that are located in `apint.rs` + #[cfg(test)] mod tests { use super::*; @@ -402,38 +331,34 @@ mod tests { powers().skip(range.start).take(range.end - range.start) } - mod tests { - use super::{powers, powers_from_to}; + #[test] + fn test_powers() { + let mut pows = powers(); + assert_eq!(pows.next(), Some(1 << 0)); + assert_eq!(pows.next(), Some(1 << 1)); + assert_eq!(pows.next(), Some(1 << 2)); + assert_eq!(pows.next(), Some(1 << 3)); + assert_eq!(pows.next(), Some(1 << 4)); + assert_eq!(pows.next(), Some(1 << 5)); + assert_eq!(pows.last(), Some(1 << 127)); + } - #[test] - fn test_powers() { - let mut pows = powers(); - assert_eq!(pows.next(), Some(1 << 0)); - assert_eq!(pows.next(), Some(1 << 1)); - assert_eq!(pows.next(), Some(1 << 2)); - assert_eq!(pows.next(), Some(1 << 3)); - assert_eq!(pows.next(), Some(1 << 4)); - assert_eq!(pows.next(), Some(1 << 5)); - assert_eq!(pows.last(), Some(1 << 127)); + #[test] + fn test_powers_from_to() { + { + let mut powsft = powers_from_to(0..4); + assert_eq!(powsft.next(), Some(1 << 0)); + assert_eq!(powsft.next(), Some(1 << 1)); + assert_eq!(powsft.next(), Some(1 << 2)); + assert_eq!(powsft.next(), Some(1 << 3)); + assert_eq!(powsft.next(), None); } - - #[test] - fn test_powers_from_to() { - { - let mut powsft = powers_from_to(0..4); - assert_eq!(powsft.next(), Some(1 << 0)); - assert_eq!(powsft.next(), Some(1 << 1)); - assert_eq!(powsft.next(), Some(1 << 2)); - assert_eq!(powsft.next(), Some(1 << 3)); - assert_eq!(powsft.next(), None); - } - { - let mut powsft = powers_from_to(4..7); - assert_eq!(powsft.next(), Some(1 << 4)); - assert_eq!(powsft.next(), Some(1 << 5)); - assert_eq!(powsft.next(), Some(1 << 6)); - assert_eq!(powsft.next(), None); - } + { + let mut powsft = powers_from_to(4..7); + assert_eq!(powsft.next(), Some(1 << 4)); + assert_eq!(powsft.next(), Some(1 << 5)); + assert_eq!(powsft.next(), Some(1 << 6)); + assert_eq!(powsft.next(), None); } } @@ -451,41 +376,23 @@ mod tests { } #[test] - fn from_bit() { + fn from_bool() { { - let explicit = ApInt::from_bit(Bit::Set); - let implicit = ApInt::from(Bit::Set); + let explicit = ApInt::from_bool(true); + let implicit = ApInt::from(true); let expected = ApInt::new_inl(BitWidth::w1(), Digit::one()); assert_eq!(explicit, implicit); assert_eq!(explicit, expected); } { - let explicit = ApInt::from_bit(Bit::Unset); - let implicit = ApInt::from(Bit::Unset); + let explicit = ApInt::from_bool(false); + let implicit = ApInt::from(false); let expected = ApInt::new_inl(BitWidth::w1(), Digit::zero()); assert_eq!(explicit, implicit); assert_eq!(explicit, expected); } } - #[test] - fn from_w8() { - for val in test_values_u8() { - let explicit_u8 = ApInt::from_u8(val); - let explicit_i8 = ApInt::from_i8(val as i8); - let implicit_u8 = ApInt::from(val); - let implicit_i8 = ApInt::from(val as i8); - let expected = ApInt{ - len : BitWidth::w8(), - data: ApIntData{inl: Digit(u64::from(val))} - }; - assert_eq!(explicit_u8, explicit_i8); - assert_eq!(explicit_u8, implicit_i8); - assert_eq!(explicit_u8, implicit_u8); - assert_eq!(explicit_u8, expected); - } - } - fn test_values_u16() -> impl Iterator { test_values_u8() .map(u16::from) @@ -501,24 +408,6 @@ mod tests { ].into_iter().map(|v| *v)) } - #[test] - fn from_w16() { - for val in test_values_u16() { - let explicit_u16 = ApInt::from_u16(val); - let explicit_i16 = ApInt::from_i16(val as i16); - let implicit_u16 = ApInt::from(val); - let implicit_i16 = ApInt::from(val as i16); - let expected = ApInt{ - len : BitWidth::w16(), - data: ApIntData{inl: Digit(u64::from(val))} - }; - assert_eq!(explicit_u16, explicit_i16); - assert_eq!(explicit_u16, implicit_i16); - assert_eq!(explicit_u16, implicit_u16); - assert_eq!(explicit_u16, expected); - } - } - fn test_values_u32() -> impl Iterator { test_values_u16() .map(u32::from) @@ -532,24 +421,6 @@ mod tests { ].into_iter().map(|v| *v)) } - #[test] - fn from_w32() { - for val in test_values_u32() { - let explicit_u32 = ApInt::from_u32(val); - let explicit_i32 = ApInt::from_i32(val as i32); - let implicit_u32 = ApInt::from(val); - let implicit_i32 = ApInt::from(val as i32); - let expected = ApInt{ - len : BitWidth::w32(), - data: ApIntData{inl: Digit(u64::from(val))} - }; - assert_eq!(explicit_u32, explicit_i32); - assert_eq!(explicit_u32, implicit_i32); - assert_eq!(explicit_u32, implicit_u32); - assert_eq!(explicit_u32, expected); - } - } - fn test_values_u64() -> impl Iterator { test_values_u32() .map(u64::from) @@ -563,24 +434,6 @@ mod tests { ].into_iter().map(|v| *v)) } - #[test] - fn from_w64() { - for val in test_values_u64() { - let explicit_u64 = ApInt::from_u64(val); - let explicit_i64 = ApInt::from_i64(val as i64); - let implicit_u64 = ApInt::from(val); - let implicit_i64 = ApInt::from(val as i64); - let expected = ApInt{ - len : BitWidth::w64(), - data: ApIntData{inl: Digit(u64::from(val))} - }; - assert_eq!(explicit_u64, explicit_i64); - assert_eq!(explicit_u64, implicit_i64); - assert_eq!(explicit_u64, implicit_u64); - assert_eq!(explicit_u64, expected); - } - } - fn test_values_u128() -> impl Iterator { test_values_u64() .map(u128::from) @@ -596,7 +449,6 @@ mod tests { #[test] fn from_w128() { - use digit::{Digit, DigitRepr}; for val in test_values_u128() { let explicit_u128 = ApInt::from_u128(val); let explicit_i128 = ApInt::from_i128(val as i128); @@ -616,7 +468,7 @@ mod tests { #[test] fn zero() { - assert_eq!(ApInt::zero(BitWidth::w1()), ApInt::from_bit(false)); + assert_eq!(ApInt::zero(BitWidth::w1()), ApInt::from_bool(false)); assert_eq!(ApInt::zero(BitWidth::w8()), ApInt::from_u8(0)); assert_eq!(ApInt::zero(BitWidth::w16()), ApInt::from_u16(0)); assert_eq!(ApInt::zero(BitWidth::w32()), ApInt::from_u32(0)); @@ -628,7 +480,7 @@ mod tests { #[test] fn one() { - assert_eq!(ApInt::one(BitWidth::w1()), ApInt::from_bit(true)); + assert_eq!(ApInt::one(BitWidth::w1()), ApInt::from_bool(true)); assert_eq!(ApInt::one(BitWidth::w8()), ApInt::from_u8(1)); assert_eq!(ApInt::one(BitWidth::w16()), ApInt::from_u16(1)); assert_eq!(ApInt::one(BitWidth::w32()), ApInt::from_u32(1)); @@ -663,7 +515,7 @@ mod tests { #[test] fn all_set() { - assert_eq!(ApInt::all_set(BitWidth::w1()), ApInt::from_bit(true)); + assert_eq!(ApInt::all_set(BitWidth::w1()), ApInt::from_bool(true)); assert_eq!(ApInt::all_set(BitWidth::w8()), ApInt::from_i8(-1)); assert_eq!(ApInt::all_set(BitWidth::w16()), ApInt::from_i16(-1)); assert_eq!(ApInt::all_set(BitWidth::w32()), ApInt::from_i32(-1)); @@ -709,7 +561,7 @@ mod tests { #[test] fn signed_min_value() { - assert_eq!(ApInt::signed_min_value(BitWidth::w1()), ApInt::from_bit(true)); + assert_eq!(ApInt::signed_min_value(BitWidth::w1()), ApInt::from_bool(true)); assert_eq!(ApInt::signed_min_value(BitWidth::w8()), ApInt::from_i8(i8::min_value())); assert_eq!(ApInt::signed_min_value(BitWidth::w16()), ApInt::from_i16(i16::min_value())); assert_eq!(ApInt::signed_min_value(BitWidth::w32()), ApInt::from_i32(i32::min_value())); @@ -739,7 +591,7 @@ mod tests { #[test] fn signed_max_value() { - assert_eq!(ApInt::signed_max_value(BitWidth::w1()), ApInt::from_bit(false)); + assert_eq!(ApInt::signed_max_value(BitWidth::w1()), ApInt::from_bool(false)); assert_eq!(ApInt::signed_max_value(BitWidth::w8()), ApInt::from_i8(i8::max_value())); assert_eq!(ApInt::signed_max_value(BitWidth::w16()), ApInt::from_i16(i16::max_value())); assert_eq!(ApInt::signed_max_value(BitWidth::w32()), ApInt::from_i32(i32::max_value())); diff --git a/src/apint/rand_impl.rs b/src/construction/rand_impl.rs similarity index 54% rename from src/apint/rand_impl.rs rename to src/construction/rand_impl.rs index 8505dbe..c794511 100644 --- a/src/apint/rand_impl.rs +++ b/src/construction/rand_impl.rs @@ -1,16 +1,14 @@ -use apint::{ApInt}; -use bitwidth::{BitWidth}; -use digit::{Digit}; +use crate::data::{ApInt, Digit}; +use crate::info::{BitWidth}; use rand; -use rand::{FromEntropy}; +use rand::{Rng, SeedableRng}; +use rand::distributions::{Distribution, Standard}; +use rand::rngs::SmallRng; -impl rand::distributions::Distribution for rand::distributions::Standard { +impl Distribution for Standard { /// Creates a random `Digit` using the given random number generator. - fn sample(&self, rng: &mut R) -> Digit - where - R: rand::Rng + ?Sized - { + fn sample(&self, rng: &mut R) -> Digit { Digit(rng.next_u64()) } } @@ -19,20 +17,15 @@ impl rand::distributions::Distribution for rand::distributions::Standard impl ApInt { /// Creates a new `ApInt` with the given `BitWidth` and random `Digit`s. pub fn random_with_width(width: BitWidth) -> ApInt { - ApInt::random_with_width_using(width, &mut rand::rngs::SmallRng::from_entropy()) + ApInt::random_with_width_using(width, &mut SmallRng::from_entropy()) } - /// Creates a new `ApInt` with the given `BitWidth` and random `Digit`s - /// using the given random number generator. - /// - /// **Note:** This is useful for cryptographic or testing purposes. - pub fn random_with_width_using(width: BitWidth, rng: &mut R) -> ApInt - where R: rand::Rng - { - use rand::distributions::Standard; + /// Creates a new `ApInt` with the given `BitWidth` and random `Digit`s using the given random + /// number generator. This is useful for cryptographic or testing purposes. + pub fn random_with_width_using(width: BitWidth, rng: &mut R) -> ApInt { let required_digits = width.required_digits(); assert!(required_digits >= 1); - let random_digits = rng.sample_iter::(&Standard).take(required_digits); + let random_digits = rng.sample_iter::(Standard).take(required_digits); ApInt::from_iter(random_digits) .expect("We asserted that `required_digits` is at least `1` or greater so it is safe to assume that `ApInt::from_iter` won't fail.") @@ -42,22 +35,14 @@ impl ApInt { } /// Randomizes the digits of this `ApInt` inplace. - /// - /// This won't change its `BitWidth`. pub fn randomize(&mut self) { - self.randomize_using(&mut rand::rngs::SmallRng::from_entropy()) + self.randomize_using(&mut SmallRng::from_entropy()) } - /// Randomizes the digits of this `ApInt` inplace using the given - /// random number generator. - /// - /// This won't change its `BitWidth`. - pub fn randomize_using(&mut self, rng: &mut R) - where R: rand::Rng - { - use rand::distributions::Standard; + /// Randomizes the digits of this `ApInt` inplace using the given random number generator. + pub fn randomize_using(&mut self, rng: &mut R) { self.digits_mut() - .zip(rng.sample_iter::(&Standard)) + .zip(rng.sample_iter::(Standard)) .for_each(|(d, r)| *d = r); self.clear_unused_bits(); } @@ -66,58 +51,52 @@ impl ApInt { #[cfg(test)] mod tests { use super::*; - use rand::{SeedableRng}; + use rand_xoshiro::Xoshiro256StarStar; #[test] fn random_with_width_using() { - let default_seed = ::Seed::default(); - let mut rng = rand::XorShiftRng::from_seed(default_seed); - let r = &mut rng; - assert_eq!(ApInt::random_with_width_using(BitWidth::w1(), r), ApInt::from_bit(true)); - assert_eq!(ApInt::random_with_width_using(BitWidth::w8(), r), ApInt::from_u8(100)); - assert_eq!(ApInt::random_with_width_using(BitWidth::w16(), r), ApInt::from_u16(30960)); - assert_eq!(ApInt::random_with_width_using(BitWidth::w32(), r), ApInt::from_u32(1788231528)); - assert_eq!(ApInt::random_with_width_using(BitWidth::w64(), r), ApInt::from_u64(13499822775494449820)); - assert_eq!(ApInt::random_with_width_using(BitWidth::w128(), r), ApInt::from([16330942765510900160_u64, 131735358788273206])); + let r = &mut Xoshiro256StarStar::seed_from_u64(0); + assert_eq!(ApInt::random_with_width_using(BitWidth::w1(), r), ApInt::from_bool(false)); + assert_eq!(ApInt::random_with_width_using(BitWidth::w8(), r), ApInt::from_u8(42)); + assert_eq!(ApInt::random_with_width_using(BitWidth::w16(), r), ApInt::from_u16(59104)); + assert_eq!(ApInt::random_with_width_using(BitWidth::w32(), r), ApInt::from_u32(640494892)); + assert_eq!(ApInt::random_with_width_using(BitWidth::w64(), r), ApInt::from_u64(13521403990117723737)); + assert_eq!(ApInt::random_with_width_using(BitWidth::w128(), r), ApInt::from([7788427924976520344u64, 18442103541295991498])); } #[test] fn randomize_using() { - let default_seed = ::Seed::default(); - let mut rng1 = rand::XorShiftRng::from_seed(default_seed); - let mut rng2 = rand::XorShiftRng::from_seed(default_seed); - let r1 = &mut rng1; - let r2 = &mut rng2; - + let r0 = &mut Xoshiro256StarStar::seed_from_u64(0); + let r1 = &mut Xoshiro256StarStar::seed_from_u64(0); { - let mut randomized = ApInt::from_bit(false); - randomized.randomize_using(r1); - let new_random = ApInt::random_with_width_using(BitWidth::w1(), r2); + let mut randomized = ApInt::from_bool(false); + randomized.randomize_using(r0); + let new_random = ApInt::random_with_width_using(BitWidth::w1(), r1); assert_eq!(randomized, new_random); }{ let mut randomized = ApInt::from_u8(0); - randomized.randomize_using(r1); - let new_random = ApInt::random_with_width_using(BitWidth::w8(), r2); + randomized.randomize_using(r0); + let new_random = ApInt::random_with_width_using(BitWidth::w8(), r1); assert_eq!(randomized, new_random); }{ let mut randomized = ApInt::from_u16(0); - randomized.randomize_using(r1); - let new_random = ApInt::random_with_width_using(BitWidth::w16(), r2); + randomized.randomize_using(r0); + let new_random = ApInt::random_with_width_using(BitWidth::w16(), r1); assert_eq!(randomized, new_random); }{ let mut randomized = ApInt::from_u32(0); - randomized.randomize_using(r1); - let new_random = ApInt::random_with_width_using(BitWidth::w32(), r2); + randomized.randomize_using(r0); + let new_random = ApInt::random_with_width_using(BitWidth::w32(), r1); assert_eq!(randomized, new_random); }{ let mut randomized = ApInt::from_u64(0); - randomized.randomize_using(r1); - let new_random = ApInt::random_with_width_using(BitWidth::w64(), r2); + randomized.randomize_using(r0); + let new_random = ApInt::random_with_width_using(BitWidth::w64(), r1); assert_eq!(randomized, new_random); }{ let mut randomized = ApInt::from_u128(0); - randomized.randomize_using(r1); - let new_random = ApInt::random_with_width_using(BitWidth::w128(), r2); + randomized.randomize_using(r0); + let new_random = ApInt::random_with_width_using(BitWidth::w128(), r1); assert_eq!(randomized, new_random); } } diff --git a/src/apint/serde_impl.rs b/src/construction/serde_impl.rs similarity index 99% rename from src/apint/serde_impl.rs rename to src/construction/serde_impl.rs index bba753a..997a3c9 100644 --- a/src/apint/serde_impl.rs +++ b/src/construction/serde_impl.rs @@ -1,6 +1,5 @@ -use apint::{ApInt}; -use digit::{Digit}; -use bitwidth::{BitWidth}; +use crate::data::{ApInt, Digit}; +use crate::info::BitWidth; use serde::{ Serialize, diff --git a/src/apint/serialization.rs b/src/construction/serialization.rs similarity index 93% rename from src/apint/serialization.rs rename to src/construction/serialization.rs index 42a535f..a209cac 100644 --- a/src/apint/serialization.rs +++ b/src/construction/serialization.rs @@ -1,16 +1,23 @@ -use radix::{Radix}; -use apint::{ApInt}; -use errors::{Error, Result}; -use digit; +use crate::data::{ApInt, Digit, DigitRepr}; +use crate::info::{Radix, Error, Result, Width}; use std::fmt; +impl fmt::Debug for ApInt { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("ApInt") + .field("len", &self.width()) + .field("digits", &self.as_digit_slice()) + .finish() + } +} + impl fmt::Binary for ApInt { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_zero() { return write!(f, "0") } - let mut ds = self.as_digit_slice().into_iter().rev(); + let mut ds = self.as_digit_slice().iter().rev(); while let Some(digit) = ds.next() { if digit.is_zero() { continue; @@ -40,7 +47,7 @@ impl fmt::LowerHex for ApInt { if self.is_zero() { return write!(f, "0") } - let mut ds = self.as_digit_slice().into_iter().rev(); + let mut ds = self.as_digit_slice().iter().rev(); while let Some(digit) = ds.next() { if digit.is_zero() { continue; @@ -60,7 +67,7 @@ impl fmt::UpperHex for ApInt { if self.is_zero() { return write!(f, "0") } - let mut ds = self.as_digit_slice().into_iter().rev(); + let mut ds = self.as_digit_slice().iter().rev(); while let Some(digit) = ds.next() { if digit.is_zero() { continue; @@ -147,7 +154,7 @@ impl ApInt { let result = match radix.exact_bits_per_digit() { Some(bits) => { v.reverse(); - if digit::BITS % bits == 0 { + if Digit::BITS % bits == 0 { ApInt::from_bitwise_digits(&v, bits) } else { @@ -170,13 +177,10 @@ impl ApInt { // // TODO: Better document what happens here and why. fn from_bitwise_digits(v: &[u8], bits: usize) -> ApInt { - use digit; - use digit::{DigitRepr, Digit}; - - debug_assert!(!v.is_empty() && bits <= 8 && digit::BITS % bits == 0); + debug_assert!(!v.is_empty() && bits <= 8 && Digit::BITS % bits == 0); debug_assert!(v.iter().all(|&c| DigitRepr::from(c) < (1 << bits))); - let radix_digits_per_digit = digit::BITS / bits; + let radix_digits_per_digit = Digit::BITS / bits; let data = v.chunks(radix_digits_per_digit) .map(|chunk| chunk.iter() @@ -194,34 +198,31 @@ impl ApInt { // // TODO: Better document what happens here and why. fn from_inexact_bitwise_digits(v: &[u8], bits: usize) -> ApInt { - use digit; - use digit::{DigitRepr, Digit}; - - debug_assert!(!v.is_empty() && bits <= 8 && digit::BITS % bits != 0); + debug_assert!(!v.is_empty() && bits <= 8 && Digit::BITS % bits != 0); debug_assert!(v.iter().all(|&c| (DigitRepr::from(c)) < (1 << bits))); - let len_digits = (v.len() * bits + digit::BITS - 1) / digit::BITS; + let len_digits = (v.len() * bits + Digit::BITS - 1) / Digit::BITS; let mut data = Vec::with_capacity(len_digits); let mut d = 0; let mut dbits = 0; // Number of bits we currently have in d. - // Walk v accumulating bits in d; whenever we accumulate digit::BITS in d, spit out a digit: + // Walk v accumulating bits in d; whenever we accumulate Digit::BITS in d, spit out a digit: for &c in v { d |= (DigitRepr::from(c)) << dbits; dbits += bits; - if dbits >= digit::BITS { + if dbits >= Digit::BITS { data.push(Digit(d)); - dbits -= digit::BITS; - // If `dbits` was greater than `digit::BITS`, we dropped some of the bits in c + dbits -= Digit::BITS; + // If `dbits` was greater than `Digit::BITS`, we dropped some of the bits in c // (they couldn't fit in d) - grab the bits we lost here: d = (DigitRepr::from(c)) >> (bits - dbits); } } if dbits > 0 { - debug_assert!(dbits < digit::BITS); + debug_assert!(dbits < Digit::BITS); data.push(Digit(d)); } @@ -235,15 +236,12 @@ impl ApInt { // TODO: This does not work, yet. Some parts of the algorithm are // commented-out since the required functionality does not exist, yet. fn from_radix_digits(v: &[u8], radix: Radix) -> ApInt { - use digit; - use digit::{DigitRepr, Digit}; - debug_assert!(!v.is_empty() && !radix.is_power_of_two()); debug_assert!(v.iter().all(|&c| radix.is_valid_byte(c))); // Estimate how big the result will be, so we can pre-allocate it. let bits = (f64::from(radix.to_u8())).log2() * v.len() as f64; - let big_digits = (bits / digit::BITS as f64).ceil(); + let big_digits = (bits / Digit::BITS as f64).ceil(); let mut data = Vec::with_capacity(big_digits as usize); let (_base, power) = radix.get_radix_base(); @@ -266,7 +264,7 @@ impl ApInt { data.push(0); } - let mut carry = 0; + let carry = 0; for _d in &mut data { // *d = mac_with_carry(0, *d, base, &mut carry); // TODO! This was commented out. @@ -285,9 +283,7 @@ impl ApInt { } } -// ======================================================================= /// Serialization -/// ======================================================================= impl ApInt { /// Returns a `String` representation of the binary encoded `ApInt` for the given `Radix`. pub fn to_string_radix(&self, radix: R) -> String @@ -302,8 +298,7 @@ impl ApInt { #[cfg(test)] mod tests { use super::*; - - use bitwidth::{BitWidth}; + use crate::info::BitWidth; mod binary { use super::*; diff --git a/src/apint/to_primitive.rs b/src/construction/to_primitive.rs similarity index 97% rename from src/apint/to_primitive.rs rename to src/construction/to_primitive.rs index e64bb5e..d09eb5f 100644 --- a/src/apint/to_primitive.rs +++ b/src/construction/to_primitive.rs @@ -1,9 +1,5 @@ -use apint::{ApInt}; -use digit; -use digit::{Digit}; -use bitwidth::{BitWidth}; -use errors::{Result, Error}; -use traits::{Width}; +use crate::data::{ApInt, Digit}; +use crate::info::{BitWidth, Error, Result, Width}; /// Represents a primitive data type. /// @@ -135,13 +131,11 @@ impl ApInt { let mut lsd = self.least_significant_digit(); let actual_width = self.width(); let target_width = prim_ty.associated_width(); - if prim_ty.is_signed() { - if actual_width < target_width { - lsd.sign_extend_from(actual_width) - .expect("We already asserted that `actual_width` < `target_width` \ - and since `target_width` is always less than or equal to \ - `64` bits calling `Digit::sign_extend_from` is safe for it."); - } + if prim_ty.is_signed() && (actual_width < target_width) { + lsd.sign_extend_from(actual_width) + .expect("We already asserted that `actual_width` < `target_width` \ + and since `target_width` is always less than or equal to \ + `64` bits calling `Digit::sign_extend_from` is safe for it."); } if target_width < BitWidth::w64() { lsd.truncate_to(target_width) @@ -158,8 +152,8 @@ impl ApInt { /// /// # Note /// - /// - Basically this returns `true` if the least significant - /// bit of this `ApInt` is `1` and `false` otherwise. + /// In this context, a `bool` is interpreted as a single bit, where `false` + /// is 0 and `true` is 1. pub fn resize_to_bool(&self) -> bool { match self.resize_to_primitive_ty(PrimitiveTy::Bool) { Digit(0) => false, @@ -287,7 +281,7 @@ impl ApInt { let ( lsd_0, rest) = self.split_least_significant_digit(); let (&lsd_1, _) = rest.split_first().unwrap_or((&Digit(0), &[])); let mut result: i128 = - (i128::from(lsd_1.repr()) << digit::BITS) + i128::from(lsd_0.repr()); + (i128::from(lsd_1.repr()) << Digit::BITS) + i128::from(lsd_0.repr()); let actual_width = self.width(); let target_width = BitWidth::w128(); @@ -313,7 +307,7 @@ impl ApInt { let ( lsd_0, rest) = self.split_least_significant_digit(); let (&lsd_1, _) = rest.split_first().unwrap_or((&Digit(0), &[])); let result: u128 = - (u128::from(lsd_1.repr()) << digit::BITS) + u128::from(lsd_0.repr()); + (u128::from(lsd_1.repr()) << Digit::BITS) + u128::from(lsd_0.repr()); result } } @@ -339,7 +333,7 @@ impl ApInt { debug_assert_ne!(prim_ty, PrimitiveTy::I128); let (mut lsd, rest) = self.split_least_significant_digit(); if !prim_ty.is_valid_repr(lsd.repr()) - || rest.into_iter().any(|d| d.repr() != 0) + || rest.iter().any(|d| d.repr() != 0) { return Error::encountered_unrepresentable_value( self.clone(), prim_ty).into() @@ -536,12 +530,12 @@ impl ApInt { pub fn try_to_i128(&self) -> Result { let ( lsd_0, rest) = self.split_least_significant_digit(); let (&lsd_1, rest) = rest.split_first().unwrap_or((&Digit(0), &[])); - if rest.into_iter().any(|d| d.repr() != 0) { + if rest.iter().any(|d| d.repr() != 0) { return Error::encountered_unrepresentable_value( self.clone(), PrimitiveTy::I128).into() } let mut result: i128 = - (i128::from(lsd_1.repr()) << digit::BITS) + i128::from(lsd_0.repr()); + (i128::from(lsd_1.repr()) << Digit::BITS) + i128::from(lsd_0.repr()); let actual_width = self.width(); let target_width = BitWidth::w128(); @@ -574,12 +568,12 @@ impl ApInt { pub fn try_to_u128(&self) -> Result { let ( lsd_0, rest) = self.split_least_significant_digit(); let (&lsd_1, rest) = rest.split_first().unwrap_or((&Digit(0), &[])); - if rest.into_iter().any(|d| d.repr() != 0) { + if rest.iter().any(|d| d.repr() != 0) { return Error::encountered_unrepresentable_value( self.clone(), PrimitiveTy::U128).into() } let result: u128 = - (u128::from(lsd_1.repr()) << digit::BITS) + u128::from(lsd_0.repr()); + (u128::from(lsd_1.repr()) << Digit::BITS) + u128::from(lsd_0.repr()); Ok(result) } } @@ -644,7 +638,7 @@ mod tests { match prim_ty { Bool => { let val = val != 0; - (val as u128, ApInt::from_bit(val)) + (val as u128, ApInt::from_bool(val)) } I8 => { let val = val as i8; @@ -853,7 +847,7 @@ mod tests { } } - mod try { + mod try_to { use super::*; #[test] diff --git a/src/data.rs b/src/data.rs new file mode 100644 index 0000000..942e159 --- /dev/null +++ b/src/data.rs @@ -0,0 +1,11 @@ +mod access; +mod apint; +mod digit_seq; +mod digit; +mod int; +mod storage; +mod uint; +mod utils; + +pub(crate) use self::{digit::{DigitRepr, Digit, DoubleDigit}, digit_seq::ContiguousDigitSeq, digit_seq::ContiguousDigitSeqMut, storage::Storage, access::{DataAccess, DataAccessMut, ZipDataAccess,ZipDataAccessMutSelf,ZipDataAccessMutBoth}}; +pub use self::{apint::ApInt,uint::UInt, int::Int}; \ No newline at end of file diff --git a/src/data/access.rs b/src/data/access.rs new file mode 100644 index 0000000..091c769 --- /dev/null +++ b/src/data/access.rs @@ -0,0 +1,134 @@ +use crate::data::{ApInt, Digit, Storage, ContiguousDigitSeq, ContiguousDigitSeqMut}; +use crate::info::{Error, Width, Result}; + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum DataAccess<'a> { + Inl(Digit), + Ext(&'a [Digit]) +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum DataAccessMut<'a> { + Inl(&'a mut Digit), + Ext(&'a mut [Digit]) +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum ZipDataAccess<'a, 'b> { + Inl(Digit, Digit), + Ext(&'a [Digit], &'b [Digit]) +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum ZipDataAccessMutSelf<'a, 'b> { + Inl(&'a mut Digit, Digit), + Ext(&'a mut [Digit], &'b [Digit]) +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum ZipDataAccessMutBoth<'a, 'b> { + Inl(&'a mut Digit, &'b mut Digit), + Ext(&'a mut [Digit], &'b mut [Digit]) +} + +impl ApInt { + pub(crate) fn digits(&self) -> ContiguousDigitSeq { + ContiguousDigitSeq::from(self.as_digit_slice()) + } + + pub(crate) fn digits_mut(&mut self) -> ContiguousDigitSeqMut { + ContiguousDigitSeqMut::from(self.as_digit_slice_mut()) + } + + /// Accesses the internal `Digit` data of this `ApInt` in a safe way. + #[inline] + pub(crate) fn access_data(&self) -> DataAccess { + match self.storage() { + Storage::Inl => DataAccess::Inl(unsafe{self.data.inl}), + Storage::Ext => DataAccess::Ext(self.as_digit_slice()) + } + } + + /// Mutably accesses the internal `Digit` data of this `ApInt` in a safe way. + #[inline] + pub(crate) fn access_data_mut(&mut self) -> DataAccessMut { + match self.storage() { + Storage::Inl => DataAccessMut::Inl(unsafe{&mut self.data.inl}), + Storage::Ext => DataAccessMut::Ext(self.as_digit_slice_mut()) + } + } + + /// Zips both given `ApInt` instances and tries to access their data in a safe way. + /// + /// # Errors + /// + /// - If both given `ApInt` instances have non-matching bit widths. + #[inline] + pub(crate) fn zip_access_data<'a, 'b>(&'a self, other: &'b ApInt) -> Result> { + if self.width() != other.width() { + return Error::unmatching_bitwidths(self.width(), other.width()).into() + } + Ok(match self.storage() { + Storage::Inl => { + ZipDataAccess::Inl( + unsafe{ self.data.inl}, + unsafe{other.data.inl}) + }, + Storage::Ext => { + ZipDataAccess::Ext( + self.as_digit_slice(), + other.as_digit_slice()) + } + }) + } + + /// Zips both given `ApInt` instances and tries to mutably access `self` data and immutably + /// access `other` data in a safe way. + /// + /// # Errors + /// + /// - If both given `ApInt` instances have non-matching bit widths. + #[inline] + pub(crate) fn zip_access_data_mut_self<'a, 'b>(&'a mut self, other: &'b ApInt) -> Result> { + if self.width() != other.width() { + return Error::unmatching_bitwidths(self.width(), other.width()).into() + } + Ok(match self.storage() { + Storage::Inl => { + ZipDataAccessMutSelf::Inl( + unsafe{&mut self.data.inl}, + unsafe{other.data.inl}) + }, + Storage::Ext => { + ZipDataAccessMutSelf::Ext( + self.as_digit_slice_mut(), + other.as_digit_slice()) + } + }) + } + + /// Zips both given `ApInt` instances and tries to mutably access `lhs` and `rhs` data + /// in a safe way. + /// + /// # Errors + /// + /// - If both given `ApInt` instances have non-matching bit widths. + #[inline] + pub(crate) fn zip_access_data_mut_both<'a, 'b>(lhs: &'a mut ApInt, rhs: &'b mut ApInt) -> Result> { + if lhs.width() != rhs.width() { + return Error::unmatching_bitwidths(lhs.width(), rhs.width()).into() + } + Ok(match lhs.storage() { + Storage::Inl => { + ZipDataAccessMutBoth::Inl( + unsafe{&mut lhs.data.inl}, + unsafe{&mut rhs.data.inl}) + }, + Storage::Ext => { + ZipDataAccessMutBoth::Ext( + lhs.as_digit_slice_mut(), + rhs.as_digit_slice_mut()) + } + }) + } +} \ No newline at end of file diff --git a/src/data/apint.rs b/src/data/apint.rs new file mode 100644 index 0000000..455678d --- /dev/null +++ b/src/data/apint.rs @@ -0,0 +1,383 @@ +use crate::data::{Storage, Digit}; +use crate::info::{BitWidth, Width, Result, Error}; + +use std::ptr::NonNull; + +/// An arbitrary precision integer with modulo arithmetics similar to machine integers. +/// +/// Note: The width of the `ApInt` can be found at runtime through importing the [`Width`](trait.Width.html) trait. +pub struct ApInt { + /// The width in bits of this `ApInt`. + pub(crate) len : BitWidth, + /// The actual data (bits) of this `ApInt`. + pub(in crate::data) data: ApIntData +} + +pub(in crate::data) union ApIntData { + /// Inline storage (up to 64 bits) for small-space optimization. + pub(in crate::data) inl: Digit, + /// Extern storage (>64 bits) for larger `ApInt`s. + pub(in crate::data) ext: NonNull +} + +/// `ApInt` is safe to send between threads since it does not own +/// aliasing memory and has no reference counting mechanism like `Rc`. +unsafe impl Send for ApInt {} + +/// `ApInt` is safe to share between threads since it does not own +/// aliasing memory and has no mutable internal state like `Cell` or `RefCell`. +unsafe impl Sync for ApInt {} + +impl ApInt { + /// Deallocates memory that may be allocated by this `ApInt`. + /// + /// `ApInt` instances with a bit width larger than `64` bits + /// allocate their digits on the heap. With `drop_digits` this + /// memory can be freed. + /// + /// # Safety + /// + /// This is extremely unsafe, only use this if the `ApInt` no longer needs its digits. + /// This is `unsafe` since it violates invariants of the `ApInt`. + unsafe fn drop_digits(&mut self) { + if self.len.storage() == Storage::Ext { + let len = self.len_digits(); + // TODO: Is there a more direct way to do this? + drop(Vec::from_raw_parts( + self.data.ext.as_ptr(), len, len)) + } + } + + /// Creates a new small `ApInt` from the given `BitWidth` and `Digit`. + /// + /// Small `ApInt` instances are stored entirely on the stack. + /// + /// # Panics + /// + /// - If the given `width` represents a `BitWidth` larger than `64` bits. + #[inline] + pub(crate) fn new_inl(width: BitWidth, digit: Digit) -> ApInt { + assert_eq!(width.storage(), Storage::Inl); + ApInt { + len: width, + data: ApIntData { inl: digit } + } + } + + /// Creates a new large `ApInt` from the given `BitWidth` and `Digit`. + /// + /// Large `ApInt` instances allocate their digits on the heap. + /// + /// # Safety + /// + /// This operation is unsafe since the buffer length behind the given `ext_ptr` must be trusted. + /// + /// # Panics + /// + /// - If the given `width` represents a `BitWidth` smaller than + /// or equal to `64` bits. + pub(crate) unsafe fn new_ext(width: BitWidth, ext_ptr: *mut Digit) -> ApInt { + assert_eq!(width.storage(), Storage::Ext); + ApInt{ + len: width, + data: ApIntData{ ext: NonNull::new_unchecked(ext_ptr) } + } + } + + /// Returns the number of bits of the bit width of this `ApInt`. + #[inline] + pub(crate) fn len_bits(&self) -> usize { + self.len.to_usize() + } + + /// Returns the number of digits used internally for the value + /// representation of this `ApInt`. + #[inline] + pub(crate) fn len_digits(&self) -> usize { + self.len.required_digits() + } + + /// Returns the storage specifier of this `ApInt`. + /// + /// This is `Storage::Inl` for `ApInt` instances that can be stored + /// entirely on the stack and `Storage::Ext` otherwise. + #[inline] + pub(crate) fn storage(&self) -> Storage { + self.len.storage() + } + + /// Returns a slice over the `Digit`s of this `ApInt` in little-endian order. + #[inline] + pub(crate) fn as_digit_slice(&self) -> &[Digit] { + use std::slice; + match self.len.storage() { + Storage::Inl => unsafe { + slice::from_raw_parts(&self.data.inl, 1) + }, + Storage::Ext => unsafe { + slice::from_raw_parts(self.data.ext.as_ptr(), self.len_digits()) + } + } + } + + /// Returns a mutable slice over the `Digit`s of this `ApInt` in little-endian order. + #[inline] + pub(crate) fn as_digit_slice_mut(&mut self) -> &mut [Digit] { + use std::slice; + match self.len.storage() { + Storage::Inl => unsafe { + slice::from_raw_parts_mut(&mut self.data.inl, 1) + }, + Storage::Ext => unsafe { + slice::from_raw_parts_mut(self.data.ext.as_ptr(), self.len_digits()) + } + } + } + + /// Assigns `rhs` to this `ApInt`. + /// + /// This mutates digits and may affect the bitwidth of `self` + /// which **may cause allocations**. + /// + /// After this operation `rhs` and `self` are equal to each other. + pub fn assign(&mut self, rhs: &ApInt) { + if self.len_digits() == rhs.len_digits() { + // If `self` and `rhs` require the same amount of digits + // for their representation we can simply utilize `ApInt` + // invariants and basically `memcpy` from `rhs` to `self`. + // Afterwards a simple adjustment of the length is sufficient. + // (At the end of this method.) + self.as_digit_slice_mut() + .copy_from_slice(rhs.as_digit_slice()); + } + else { + // In this case `rhs` and `self` require an unequal amount + // of digits for their representation which means that the + // digits that may be allocated by `self` must be dropped. + // + // Note that `ApInt::drop_digits` only deallocates if possible. + unsafe{ self.drop_digits(); } + + match rhs.storage() { + Storage::Inl => { + // If `rhs` is a small `ApInt` we can simply update + // the `digit` field of `self` and we are done. + self.data.inl = unsafe{ rhs.data.inl }; + } + Storage::Ext => { + // If `rhs` is a large heap-allocated `ApInt` we first + // need to expensively clone its buffer and feed it to `self`. + let cloned = rhs.clone(); + self.data.ext = unsafe{ cloned.data.ext }; + use std::mem; + mem::forget(cloned); + } + } + } + // Since all cases may need bit width adjustment we outsourced it + // to the end of this method. + self.len = rhs.len; + } + + /// Strictly assigns `rhs` to this `ApInt`. + /// + /// After this operation `rhs` and `self` are equal to each other. + /// + /// **Note:** Strict assigns protect against mutating the bit width + /// of `self` and thus return an error instead of executing a probably + /// expensive `assign` operation. + /// + /// # Errors + /// + /// - If `rhs` and `self` have unmatching bit widths. + pub fn strict_assign(&mut self, rhs: &ApInt) -> Result<()> { + if self.width() != rhs.width() { + return Error::unmatching_bitwidths(self.width(), rhs.width()) + .with_annotation(format!( + "Occured while trying to `strict_assign` {:?} to {:?}.", self, rhs)) + .into() + } + self.as_digit_slice_mut() + .copy_from_slice(rhs.as_digit_slice()); + Ok(()) + } +} + +impl Drop for ApInt { + fn drop(&mut self) { + unsafe{self.drop_digits()} + } +} + +impl Clone for ApInt { + fn clone(&self) -> Self { + match self.storage() { + Storage::Inl => { + ApInt::new_inl(self.len, unsafe{ self.data.inl }) + } + Storage::Ext => { + use std::mem; + let req_digits = self.len_digits(); + let mut buffer = self.as_digit_slice() + .to_vec() + .into_boxed_slice(); + assert_eq!(buffer.len(), req_digits); + let ptr_buffer = buffer.as_mut_ptr(); + mem::forget(buffer); + unsafe{ ApInt::new_ext(self.len, ptr_buffer) } + } + } + } +} + +impl Width for ApInt { + /// Returns the `BitWidth` of this `ApInt`. + #[inline] + fn width(&self) -> BitWidth { + BitWidth::new(self.len_bits()).unwrap() + } +} + +// These are tests that would normally be in `constructors.rs`. +// This is put here to be able to constrict the visibility modifiers for +// `ApIntData`. +#[cfg(test)] +mod raw_construction { + use super::*; + + use std::ops::Range; + + fn powers() -> impl Iterator { + (0..128).map(|p| 1 << p) + } + + fn powers_from_to(range: Range) -> impl Iterator { + powers().skip(range.start).take(range.end - range.start) + } + + fn test_values_u8() -> impl Iterator { + powers_from_to(0..8) + .map(|v| v as u8) + .chain([ + u8::max_value(), + 10, + 42, + 99, + 123 + ].into_iter() + .map(|v| *v)) + } + + fn test_values_u16() -> impl Iterator { + test_values_u8() + .map(u16::from) + .chain(powers_from_to(8..16) + .map(|v| v as u16)) + .chain([ + u16::max_value(), + 500, + 1000, + 1337, + 7777, + 42_000 + ].into_iter().map(|v| *v)) + } + + fn test_values_u32() -> impl Iterator { + test_values_u16() + .map(u32::from) + .chain(powers_from_to(16..32) + .map(|v| v as u32)) + .chain([ + u32::max_value(), + 1_000_000, + 999_999_999, + 1_234_567_890 + ].into_iter().map(|v| *v)) + } + + fn test_values_u64() -> impl Iterator { + test_values_u32() + .map(u64::from) + .chain(powers_from_to(32..64) + .map(|v| v as u64)) + .chain([ + u64::max_value(), + 1_000_000_000_000, + 999_999_999_999_999_999, + 0x0123_4567_89AB_CDEF + ].into_iter().map(|v| *v)) + } + + #[test] + fn from_w8() { + for val in test_values_u8() { + let explicit_u8 = ApInt::from_u8(val); + let explicit_i8 = ApInt::from_i8(val as i8); + let implicit_u8 = ApInt::from(val); + let implicit_i8 = ApInt::from(val as i8); + let expected = ApInt{ + len : BitWidth::w8(), + data: ApIntData{inl: Digit(u64::from(val))} + }; + assert_eq!(explicit_u8, explicit_i8); + assert_eq!(explicit_u8, implicit_i8); + assert_eq!(explicit_u8, implicit_u8); + assert_eq!(explicit_u8, expected); + } + } + + #[test] + fn from_w16() { + for val in test_values_u16() { + let explicit_u16 = ApInt::from_u16(val); + let explicit_i16 = ApInt::from_i16(val as i16); + let implicit_u16 = ApInt::from(val); + let implicit_i16 = ApInt::from(val as i16); + let expected = ApInt{ + len : BitWidth::w16(), + data: ApIntData{inl: Digit(u64::from(val))} + }; + assert_eq!(explicit_u16, explicit_i16); + assert_eq!(explicit_u16, implicit_i16); + assert_eq!(explicit_u16, implicit_u16); + assert_eq!(explicit_u16, expected); + } + } + + #[test] + fn from_w32() { + for val in test_values_u32() { + let explicit_u32 = ApInt::from_u32(val); + let explicit_i32 = ApInt::from_i32(val as i32); + let implicit_u32 = ApInt::from(val); + let implicit_i32 = ApInt::from(val as i32); + let expected = ApInt{ + len : BitWidth::w32(), + data: ApIntData{inl: Digit(u64::from(val))} + }; + assert_eq!(explicit_u32, explicit_i32); + assert_eq!(explicit_u32, implicit_i32); + assert_eq!(explicit_u32, implicit_u32); + assert_eq!(explicit_u32, expected); + } + } + + #[test] + fn from_w64() { + for val in test_values_u64() { + let explicit_u64 = ApInt::from_u64(val); + let explicit_i64 = ApInt::from_i64(val as i64); + let implicit_u64 = ApInt::from(val); + let implicit_i64 = ApInt::from(val as i64); + let expected = ApInt{ + len : BitWidth::w64(), + data: ApIntData{inl: Digit(u64::from(val))} + }; + assert_eq!(explicit_u64, explicit_i64); + assert_eq!(explicit_u64, implicit_i64); + assert_eq!(explicit_u64, implicit_u64); + assert_eq!(explicit_u64, expected); + } + } +} \ No newline at end of file diff --git a/src/digit.rs b/src/data/digit.rs similarity index 83% rename from src/digit.rs rename to src/data/digit.rs index 4f6df30..9d0eb02 100644 --- a/src/digit.rs +++ b/src/data/digit.rs @@ -1,8 +1,4 @@ -use bitpos::BitPos; -use bitwidth::BitWidth; -use errors::{Error, Result}; -use traits::{Width}; -use checks; +use crate::info::{BitPos,BitWidth,Error,Result,Width}; use std::ops::{ BitAnd, @@ -14,6 +10,8 @@ use std::ops::{ Shl, Shr, + ShlAssign, + ShrAssign, Not, Add, Sub, @@ -22,9 +20,13 @@ use std::ops::{ Rem, }; +use std::fmt; + /// The type for the internal `Digit` representation. /// /// Must be exactly half the size of `DoubleDigitRepr`. +/// +/// Note: When changing this to other built in integers, be sure to change `DoubleDigitRepr` and `Digit::BITS`. pub(crate) type DigitRepr = u64; /// The type for the internal `DoubleDigit` representation. @@ -32,251 +34,42 @@ pub(crate) type DigitRepr = u64; /// Must be exactly double the size of `DigitRepr`. pub(crate) type DoubleDigitRepr = u128; -/// The amount of bits within a single `Digit`. -pub(crate) const BITS: usize = 64; - const REPR_ONE : DigitRepr = 0x1; const REPR_ZERO: DigitRepr = 0x0; const REPR_ONES: DigitRepr = !REPR_ZERO; -pub(crate) const ONE : Digit = Digit(REPR_ONE); -pub(crate) const ZERO: Digit = Digit(REPR_ZERO); -pub(crate) const ONES: Digit = Digit(REPR_ONES); - -/// Represents the set or unset state of a bit within an `ApInt`. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum Bit { - /// Unset, or `false` or `off` state. - Unset = 0, - /// Set, or `true` or `on` state. - Set = 1 -} - -impl Bit { - /// Converts this `Bit` into a `bool`. - /// - /// - `Unset` to `false` - /// - `Set` to `true` - #[inline] - pub fn to_bool(self) -> bool { - match self { - Bit::Unset => false, - Bit::Set => true - } - } -} - -impl From for Bit { - #[inline] - fn from(flag: bool) -> Bit { - if flag { Bit::Set } else { Bit::Unset } - } -} - -impl From for bool { - #[inline] - fn from(bit: Bit) -> bool { - bit.to_bool() - } -} - -/// A (big) digit within an `ApInt` or similar representations. +/// A (big) digit within an `ApInt` or similar representations. The `DigitRepr` is public only to +/// help with construction and mapping. It is preferred to use the `repr` function to get access to +/// the internal representation. /// /// It uses the `DoubleDigit` as computation unit. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub(crate) struct Digit(pub DigitRepr); -use std::fmt; - -impl fmt::Binary for Digit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.repr().fmt(f) - } -} - -impl fmt::Octal for Digit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.repr().fmt(f) - } -} - -impl fmt::LowerHex for Digit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.repr().fmt(f) - } -} - -impl fmt::UpperHex for Digit { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.repr().fmt(f) - } -} - -/// A doubled digit. -/// -/// This is used as a compute unit for `Digit`'s since many `Digit` arithmetic operations -/// may overflow or have carries this is required in order to not lose those overflow- and underflow values. -/// -/// Has wrapping arithmetics for better machine emulation and improved performance. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub(crate) struct DoubleDigit(pub DoubleDigitRepr); - -impl BitOr for DoubleDigit { - type Output = Self; - - fn bitor(self, rhs: Self) -> Self { - DoubleDigit(self.repr() | rhs.repr()) - } -} - -impl BitAnd for DoubleDigit { - type Output = Self; - - fn bitand(self, rhs: Self) -> Self { - DoubleDigit(self.repr() & rhs.repr()) - } -} - -impl Shl for DoubleDigit { - type Output = DoubleDigit; - - fn shl(self, rhs: usize) -> Self::Output { - assert!(rhs < (BITS * 2)); - DoubleDigit(self.repr().wrapping_shl(rhs as u32)) - } -} - -impl Shr for DoubleDigit { - type Output = DoubleDigit; - - fn shr(self, rhs: usize) -> Self::Output { - assert!(rhs < (BITS * 2)); - DoubleDigit(self.repr().wrapping_shr(rhs as u32)) - } -} - -impl Not for DoubleDigit { - type Output = Self; - - fn not(self) -> Self::Output { - DoubleDigit(!self.repr()) - } -} - -impl Add for DoubleDigit { - type Output = DoubleDigit; - - fn add(self, rhs: DoubleDigit) -> Self::Output { - DoubleDigit(self.repr().wrapping_add(rhs.repr())) - } -} - -impl Sub for DoubleDigit { - type Output = DoubleDigit; - - fn sub(self, rhs: DoubleDigit) -> Self::Output { - DoubleDigit(self.repr().wrapping_sub(rhs.repr())) - } -} - -impl Mul for DoubleDigit { - type Output = DoubleDigit; - - fn mul(self, rhs: DoubleDigit) -> Self::Output { - DoubleDigit(self.repr().wrapping_mul(rhs.repr())) - } -} - -impl Div for DoubleDigit { - type Output = DoubleDigit; - - fn div(self, rhs: DoubleDigit) -> Self::Output { - DoubleDigit(self.repr().wrapping_div(rhs.repr())) - } -} - -impl Rem for DoubleDigit { - type Output = DoubleDigit; - - fn rem(self, rhs: DoubleDigit) -> Self::Output { - DoubleDigit(self.repr().wrapping_rem(rhs.repr())) - } -} - -impl DoubleDigit { - /// Returns the value as its internal representation. - pub(crate) fn repr(self) -> DoubleDigitRepr { - self.0 - } - - /// Returns the hi part of this `DoubleDigit` as `Digit`. - pub(crate) fn hi(self) -> Digit { - Digit((self.0 >> BITS) as DigitRepr) - } - - /// Returns the hi part of this `DoubleDigit` as `Digit`. - pub(crate) fn lo(self) -> Digit { - Digit(self.0 as DigitRepr) - } - - /// Returns the lo and hi parts of this `DoubleDigit` as `Digit` each. - pub(crate) fn lo_hi(self) -> (Digit, Digit) { - (self.lo(), self.hi()) - } - - /// Returns a `DoubleDigit` from the given lo and hi raw `Digit` parts. - pub(crate) fn from_lo_hi(lo: Digit, hi: Digit) -> DoubleDigit { - DoubleDigit(DoubleDigitRepr::from(lo.repr()) | (DoubleDigitRepr::from(hi.repr()) << BITS)) - } - - pub(crate) fn wrapping_add(self, other: DoubleDigit) -> Self { - DoubleDigit(self.repr().wrapping_add(other.repr())) - } - - pub(crate) fn wrapping_mul(self, other: DoubleDigit) -> Self { - DoubleDigit(self.repr().wrapping_mul(other.repr())) - } - - pub(crate) fn wrapping_div(self, other: DoubleDigit) -> Self { - self.wrapping_divrem(other).0 - } - - #[cfg(not(feature = "specialized_div_rem"))] - pub(crate) fn wrapping_divrem(self,other: DoubleDigit) -> (DoubleDigit,DoubleDigit) { - ( - DoubleDigit(self.repr().wrapping_div(other.repr())), - DoubleDigit(self.repr().wrapping_rem(other.repr())) - ) - } - - #[cfg(feature = "specialized_div_rem")] - pub(crate) fn wrapping_divrem(self,other: DoubleDigit) -> (DoubleDigit,DoubleDigit) { - let temp = specialized_div_rem::u128_div_rem(self.repr(),other.repr()); - (DoubleDigit(temp.0),DoubleDigit(temp.1)) - } -} - /// # Constructors impl Digit { + /// The amount of bits within a single `Digit`. + pub(crate) const BITS: usize = 64; + pub(crate) const ONE : Digit = Digit(REPR_ONE); + pub(crate) const ZERO: Digit = Digit(REPR_ZERO); + pub(crate) const ONES: Digit = Digit(REPR_ONES); + /// Creates a digit that represents the value `0`. /// /// **Note:** In twos-complement this means that all bits are `0`. - pub fn zero() -> Digit { ZERO } + pub fn zero() -> Digit { Digit::ZERO } /// Creates a digit that represents the value `1`. - pub fn one() -> Digit { ONE } + pub fn one() -> Digit { Digit::ONE } /// Returns `true` if this `Digit` is zero (`0`). - pub fn is_zero(self) -> bool { self == ZERO } + pub fn is_zero(self) -> bool { self == Digit::ZERO } /// Returns `true` if this `Digit` is one (`1`). - pub fn is_one(self) -> bool { self == ONE } + pub fn is_one(self) -> bool { self == Digit::ONE } /// Returns `true` if this `Digit` has all bits set. - pub fn is_all_set(self) -> bool { self == ONES } - - /// Creates a digit where all bits are initialized to `1`. - pub fn all_set() -> Digit { ONES } + pub fn is_all_set(self) -> bool { self == Digit::ONES } } /// # Utility & helper methods. @@ -286,18 +79,12 @@ impl Digit { self.0 } - /// Returns a mutable reference to the underlying representation - /// of this `Digit`. - pub fn repr_mut(&mut self) -> &mut DigitRepr { - &mut self.0 - } - /// Returns the `DoubleDigit` representation of this `Digit`. pub(crate) fn dd(self) -> DoubleDigit { DoubleDigit(DoubleDigitRepr::from(self.repr())) } - pub(crate) fn leading_zeros(&self) -> u32 { + pub(crate) fn leading_zeros(self) -> u32 { self.repr().leading_zeros() } @@ -335,6 +122,14 @@ impl Digit { Digit(self.repr().wrapping_mul(other.repr())) } + #[cfg(test)] + pub(crate) fn overflowing_mul(self, other: Digit) -> (Digit, bool) { + match self.repr().overflowing_mul(other.repr()) { + (x,false) => (Digit(x), false), + (x,true) => (Digit(x), true), + } + } + //TODO if and when `carrying_mul` (rust-lang rfc #2417) is stabilized, this function and others in this crate should use `carrying_mul` as the operation pub(crate) fn carrying_mul(self, other: Digit) -> (Digit, Digit) { let temp = self.dd().wrapping_mul(other.dd()); @@ -374,10 +169,10 @@ impl Digit { where W: Into { let width = width.into(); - if width.to_usize() > BITS { + if width.to_usize() > Digit::BITS { return Err(Error::invalid_bitwidth(width.to_usize()) - .with_annotation(format!("Encountered invalid `BitWidth` for operating \ - on a `Digit`."))) + .with_annotation("Encountered invalid `BitWidth` for operating \ + on a `Digit`.")) } Ok(()) } @@ -433,27 +228,17 @@ impl Digit { } } -impl Width for Digit { - fn width(&self) -> BitWidth { - BitWidth::w64() - } -} - -impl Width for DoubleDigit { - fn width(&self) -> BitWidth { - BitWidth::w128() - } -} - /// # Bitwise access impl Digit { - /// Returns the least significant `Bit` of this `Digit`. + /// Returns the least significant bit, as a `bool`, of this `Digit`. + /// + /// # Note /// - /// Note: This may be useful to determine if a `Digit` - /// represents an even or an uneven number for example. + /// In this context, a `bool` is interpreted as a single bit, where `false` + /// is 0 and `true` is 1. #[inline] - pub fn least_significant_bit(self) -> Bit { - Bit::from((self.repr() & 0x1) != 0) + pub fn least_significant_bit(self) -> bool { + (self.repr() & 0x1) != 0 } /// Returns `true` if the `n`th bit is set to `1`, else returns `false`. @@ -462,12 +247,12 @@ impl Digit { /// /// If the given `n` is greater than the digit size. #[inline] - pub fn get

(self, pos: P) -> Result + pub fn get

(self, pos: P) -> Result where P: Into { let pos = pos.into(); - checks::verify_bit_access(&self, pos)?; - Ok(Bit::from(((self.repr() >> pos.to_usize()) & 0x01) == 1)) + pos.verify_bit_access(&self)?; + Ok(((self.repr() >> pos.to_usize()) & 0x1) == 1) } /// Sets the bit at position `pos` to `1`. @@ -480,8 +265,9 @@ impl Digit { where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; - Ok(self.0 |= 0x01 << pos.to_usize()) + pos.verify_bit_access(self)?; + self.0 |= 0x1 << pos.to_usize(); + Ok(()) } /// Sets the bit at position `pos` to `0`. @@ -494,8 +280,9 @@ impl Digit { where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; - Ok(self.0 &= !(0x01 << pos.to_usize())) + pos.verify_bit_access(self)?; + self.0 &= !(0x1 << pos.to_usize()); + Ok(()) } /// Flips the bit at position `pos`. @@ -508,8 +295,9 @@ impl Digit { where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; - Ok(self.0 ^= 0x01 << pos.to_usize()) + pos.verify_bit_access(self)?; + self.0 ^= 0x1 << pos.to_usize(); + Ok(()) } /// Sets all bits in this digit to `1`. @@ -529,28 +317,37 @@ impl Digit { pub fn flip_all(&mut self) { self.0 ^= REPR_ONES } +} - /// Unsets all bits but the last `n` ones. - /// - /// # Note - /// - /// This is equal to calling `Digit::truncate_to`. - /// - /// # Errors - /// - /// If the given `n` is greater than the digit size. - #[inline] - pub fn retain_last_n(&mut self, n: usize) -> Result<()> { - checks::verify_bit_access(self, n)?; - Ok(self.0 &= !(REPR_ONES << n)) +impl fmt::Binary for Digit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.repr().fmt(f) } } -/// # Bitwise operations +impl fmt::Octal for Digit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.repr().fmt(f) + } +} -impl Digit { - pub fn not_inplace(&mut self) { - self.0 = !self.repr() +impl fmt::LowerHex for Digit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.repr().fmt(f) + } +} + +impl fmt::UpperHex for Digit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.repr().fmt(f) + } +} + +impl Not for Digit { + type Output = Self; + + fn not(self) -> Self::Output { + Digit(!self.repr()) } } @@ -584,7 +381,6 @@ impl BitOrAssign for Digit { } } -// # Bitwise assign operations impl BitAndAssign for Digit { fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.repr() @@ -613,36 +409,181 @@ impl Shr for Digit { } } -impl Not for Digit { +impl ShlAssign for Digit { + fn shl_assign(&mut self, rhs: usize) { + *self = *self << rhs; + } +} + +impl ShrAssign for Digit { + fn shr_assign(&mut self, rhs: usize) { + *self = *self >> rhs; + } +} + +impl Width for Digit { + fn width(&self) -> BitWidth { + BitWidth::from(Digit::BITS) + } +} + +/// A doubled digit. +/// +/// This is used as a compute unit for `Digit`'s since many `Digit` arithmetic operations +/// may overflow or have carries this is required in order to not lose those overflow- and underflow values. +/// +/// Has wrapping arithmetics for better machine emulation and improved performance. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct DoubleDigit(pub DoubleDigitRepr); + +impl DoubleDigit { + /// Returns the value as its internal representation. + pub(crate) fn repr(self) -> DoubleDigitRepr { + self.0 + } + + /// Returns the hi part of this `DoubleDigit` as `Digit`. + pub(crate) fn hi(self) -> Digit { + Digit((self.0 >> Digit::BITS) as DigitRepr) + } + + /// Returns the hi part of this `DoubleDigit` as `Digit`. + pub(crate) fn lo(self) -> Digit { + Digit(self.0 as DigitRepr) + } + + /// Returns the lo and hi parts of this `DoubleDigit` as `Digit` each. + pub(crate) fn lo_hi(self) -> (Digit, Digit) { + (self.lo(), self.hi()) + } + + /// Returns a `DoubleDigit` from the given lo and hi raw `Digit` parts. + pub(crate) fn from_lo_hi(lo: Digit, hi: Digit) -> DoubleDigit { + DoubleDigit(DoubleDigitRepr::from(lo.repr()) | (DoubleDigitRepr::from(hi.repr()) << Digit::BITS)) + } + + pub(crate) fn wrapping_add(self, other: DoubleDigit) -> Self { + DoubleDigit(self.repr().wrapping_add(other.repr())) + } + + pub(crate) fn wrapping_mul(self, other: DoubleDigit) -> Self { + DoubleDigit(self.repr().wrapping_mul(other.repr())) + } + + pub(crate) fn wrapping_div(self, other: DoubleDigit) -> Self { + self.wrapping_divrem(other).0 + } + + #[cfg(not(feature = "specialized_div_rem"))] + pub(crate) fn wrapping_divrem(self,other: DoubleDigit) -> (DoubleDigit,DoubleDigit) { + ( + DoubleDigit(self.repr().wrapping_div(other.repr())), + DoubleDigit(self.repr().wrapping_rem(other.repr())) + ) + } + + #[cfg(feature = "specialized_div_rem")] + pub(crate) fn wrapping_divrem(self,other: DoubleDigit) -> (DoubleDigit,DoubleDigit) { + let temp = specialized_div_rem::u128_div_rem(self.repr(),other.repr()); + (DoubleDigit(temp.0),DoubleDigit(temp.1)) + } +} + +impl Not for DoubleDigit { type Output = Self; fn not(self) -> Self::Output { - Digit(!self.repr()) + DoubleDigit(!self.repr()) } } -#[cfg(test)] -mod tests { - use super::*; +impl BitOr for DoubleDigit { + type Output = Self; - mod bit { - use super::*; + fn bitor(self, rhs: Self) -> Self { + DoubleDigit(self.repr() | rhs.repr()) + } +} - #[test] - fn from_bool() { - assert_eq!(Bit::from(true) , Bit::Set); - assert_eq!(Bit::from(false), Bit::Unset); - } +impl BitAnd for DoubleDigit { + type Output = Self; - #[test] - fn from_bit() { - assert_eq!(bool::from(Bit::Set) , true); - assert_eq!(bool::from(Bit::Unset), false); - } + fn bitand(self, rhs: Self) -> Self { + DoubleDigit(self.repr() & rhs.repr()) } +} + +impl Shl for DoubleDigit { + type Output = DoubleDigit; + + fn shl(self, rhs: usize) -> Self::Output { + assert!(rhs < (Digit::BITS * 2)); + DoubleDigit(self.repr().wrapping_shl(rhs as u32)) + } +} + +impl Shr for DoubleDigit { + type Output = DoubleDigit; + + fn shr(self, rhs: usize) -> Self::Output { + assert!(rhs < (Digit::BITS * 2)); + DoubleDigit(self.repr().wrapping_shr(rhs as u32)) + } +} + +impl Add for DoubleDigit { + type Output = DoubleDigit; + + fn add(self, rhs: DoubleDigit) -> Self::Output { + DoubleDigit(self.repr().wrapping_add(rhs.repr())) + } +} + +impl Sub for DoubleDigit { + type Output = DoubleDigit; + + fn sub(self, rhs: DoubleDigit) -> Self::Output { + DoubleDigit(self.repr().wrapping_sub(rhs.repr())) + } +} + +impl Mul for DoubleDigit { + type Output = DoubleDigit; + + fn mul(self, rhs: DoubleDigit) -> Self::Output { + DoubleDigit(self.repr().wrapping_mul(rhs.repr())) + } +} + +impl Div for DoubleDigit { + type Output = DoubleDigit; + + fn div(self, rhs: DoubleDigit) -> Self::Output { + DoubleDigit(self.repr().wrapping_div(rhs.repr())) + } +} + +impl Rem for DoubleDigit { + type Output = DoubleDigit; + + fn rem(self, rhs: DoubleDigit) -> Self::Output { + DoubleDigit(self.repr().wrapping_rem(rhs.repr())) + } +} + +impl Width for DoubleDigit { + fn width(&self) -> BitWidth { + BitWidth::from(Digit::BITS * 2) + } +} + +#[cfg(test)] +mod tests { + use super::*; mod double_digit { use super::*; + use crate::info::Width; static TEST_VALUES: &[DoubleDigitRepr] = &[0, 1, 2, 10, 42, 1337]; @@ -831,6 +772,7 @@ mod tests { mod digit { use super::*; + use crate::info::Width; use std::usize; @@ -883,8 +825,8 @@ mod tests { #[test] fn width() { - assert_eq!(digit::ONES.width(), BitWidth::w64()); - assert_eq!(digit::ZERO.width(), BitWidth::w64()); + assert_eq!(Digit::ONES.width(), BitWidth::w64()); + assert_eq!(Digit::ZERO.width(), BitWidth::w64()); assert_eq!(even_digit().width(), BitWidth::w64()); assert_eq!(odd_digit().width(), BitWidth::w64()); } @@ -892,10 +834,10 @@ mod tests { #[test] fn get_ok() { for &pos in VALID_TEST_POS_VALUES { - assert_eq!(digit::ONES.get(pos), Ok(Bit::Set)); - assert_eq!(digit::ZERO.get(pos), Ok(Bit::Unset)); - assert_eq!(even_digit().get(pos), Ok(if pos % 2 == 0 { Bit::Set } else { Bit::Unset })); - assert_eq!(odd_digit().get(pos), Ok(if pos % 2 == 1 { Bit::Set } else { Bit::Unset })); + assert_eq!(Digit::ONES.get(pos), Ok(true)); + assert_eq!(Digit::ZERO.get(pos), Ok(false)); + assert_eq!(even_digit().get(pos), Ok(pos % 2 == 0)); + assert_eq!(odd_digit().get(pos), Ok(pos % 2 == 1)); } } @@ -903,8 +845,8 @@ mod tests { fn get_fail() { for &pos in INVALID_TEST_POS_VALUES { let expected_err = Err(Error::invalid_bit_access(pos, BitWidth::w64())); - assert_eq!(digit::ONES.get(pos), expected_err); - assert_eq!(digit::ZERO.get(pos), expected_err); + assert_eq!(Digit::ONES.get(pos), expected_err); + assert_eq!(Digit::ZERO.get(pos), expected_err); assert_eq!(digit::even_digit().get(pos), expected_err); assert_eq!(digit::odd_digit().get(pos), expected_err); } @@ -916,7 +858,7 @@ mod tests { let mut digit = Digit(val); for &pos in VALID_TEST_POS_VALUES { digit.set(pos).unwrap(); - assert_eq!(digit.get(pos), Ok(Bit::Set)); + assert_eq!(digit.get(pos), Ok(true)); } } } @@ -925,8 +867,8 @@ mod tests { fn set_fail() { for &pos in INVALID_TEST_POS_VALUES { let expected_err = Err(Error::invalid_bit_access(pos, BitWidth::w64())); - assert_eq!(digit::ONES.set(pos), expected_err); - assert_eq!(digit::ZERO.set(pos), expected_err); + assert_eq!(Digit::ONES.set(pos), expected_err); + assert_eq!(Digit::ZERO.set(pos), expected_err); assert_eq!(digit::even_digit().set(pos), expected_err); assert_eq!(digit::odd_digit().set(pos), expected_err); } @@ -941,12 +883,5 @@ mod tests { // pub fn set_first_n(&mut self, n: usize) -> Result<()> { // pub fn unset_first_n(&mut self, n: usize) -> Result<()> { // pub fn retain_last_n(&mut self, n: usize) -> Result<()> { - - #[test] - fn retain_last_n() { - let mut d = ONES; - d.retain_last_n(32).unwrap(); - assert_eq!(d, Digit(0x0000_0000_FFFF_FFFF)); - } } } diff --git a/src/digit_seq.rs b/src/data/digit_seq.rs similarity index 98% rename from src/digit_seq.rs rename to src/data/digit_seq.rs index e474493..b1c1e6d 100644 --- a/src/digit_seq.rs +++ b/src/data/digit_seq.rs @@ -1,4 +1,4 @@ -use digit::Digit; +use crate::data::Digit; use std::slice; diff --git a/src/int.rs b/src/data/int.rs similarity index 97% rename from src/int.rs rename to src/data/int.rs index f52066a..7c48ecb 100644 --- a/src/int.rs +++ b/src/data/int.rs @@ -1,12 +1,6 @@ -use apint::ApInt; -use traits::Width; -use digit::Bit; -use bitwidth::BitWidth; -use errors::Result; -use apint::{ShiftAmount}; -use bitpos::{BitPos}; -use uint::UInt; -use utils::{try_forward_bin_mut_impl, forward_mut_impl, forward_bin_mut_impl}; +use crate::data::{ApInt, UInt}; +use crate::info::{BitWidth, Result, ShiftAmount, BitPos, Width}; +use crate::logic::{try_forward_bin_mut_impl, forward_mut_impl, forward_bin_mut_impl}; #[cfg(feature = "rand_support")] use rand; @@ -70,14 +64,11 @@ impl Int { /// # Constructors impl Int { - /// Creates a new `Int` from the given `Bit` value with a bit width of `1`. - /// - /// This function is generic over types that are convertible to `Bit` such as `bool`. - pub fn from_bit(bit: B) -> Int - where - B: Into, - { - Int::from(ApInt::from_bit(bit)) + /// Creates a new `Int` from the given boolean value with a bit width of `1`. + /// + /// When `bit` is `false`, the single bit in the `ApInt` is 0, otherwise it is 1. + pub fn from_bool(bit: bool) -> Int { + Int::from(ApInt::from_bool(bit)) } /// Creates a new `Int` from a given `i8` value with a bit-width of 8. @@ -146,12 +137,9 @@ impl Int { } } -impl From for Int - where B: Into -{ - #[inline] - fn from(bit: B) -> Int { - Int::from_bit(bit) +impl From for Int { + fn from(bit: bool) -> Int { + Int::from_bool(bit) } } @@ -241,7 +229,7 @@ impl Int { /// Returns `true` if the value of this `Int` is positive. pub fn is_positive(&self) -> bool { - self.sign_bit() == Bit::Unset + !self.sign_bit() } /// Returns `true` if the value of this `Int` is negative. @@ -391,9 +379,9 @@ impl Int { /// of the `bool` are being ignored. /// /// # Note - /// - /// - Basically this returns `true` if the least significant - /// bit of this `Int` is `1` and `false` otherwise. + /// + /// In this context, a `bool` is interpreted as a single bit, where `false` + /// is 0 and `true` is 1. pub fn resize_to_bool(&self) -> bool { self.value.resize_to_bool() } @@ -903,7 +891,7 @@ impl Int { /// # Errors /// /// - If `pos` is not a valid bit position for the width of this `Int`. - pub fn get_bit_at

(&self, pos: P) -> Result + pub fn get_bit_at

(&self, pos: P) -> Result where P: Into { self.value.get_bit_at(pos) @@ -975,7 +963,7 @@ impl Int { /// Returns the sign bit of this `Int`. /// /// **Note:** This is equal to the most significant bit of this `Int`. - pub fn sign_bit(&self) -> Bit { + pub fn sign_bit(&self) -> bool { self.value.sign_bit() } @@ -1024,6 +1012,14 @@ impl Int { } } +impl Width for Int { + /// Returns the `BitWidth` of this `ApInt`. + #[inline] + fn width(&self) -> BitWidth { + self.value.width() + } +} + // =========================================================================== // `Not` (bitwise) impls // =========================================================================== diff --git a/src/storage.rs b/src/data/storage.rs similarity index 88% rename from src/storage.rs rename to src/data/storage.rs index 9b31400..3fb1361 100644 --- a/src/storage.rs +++ b/src/data/storage.rs @@ -1,5 +1,5 @@ -use digit; -use bitwidth::BitWidth; +use crate::data::Digit; +use crate::info::BitWidth; #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub(crate) enum Storage { Inl, Ext } @@ -26,6 +26,6 @@ impl Storage { /// similar to the well-known small-string optimization. #[inline] fn is_inline(width: BitWidth) -> bool { - width.to_usize() <= digit::BITS + width.to_usize() <= Digit::BITS } } diff --git a/src/uint.rs b/src/data/uint.rs similarity index 97% rename from src/uint.rs rename to src/data/uint.rs index 19aea5e..6cfc3a4 100644 --- a/src/uint.rs +++ b/src/data/uint.rs @@ -1,12 +1,6 @@ -use apint::ApInt; -use traits::Width; -use digit::Bit; -use bitwidth::BitWidth; -use errors::Result; -use apint::{ShiftAmount}; -use bitpos::{BitPos}; -use int::Int; -use utils::{try_forward_bin_mut_impl, forward_mut_impl, forward_bin_mut_impl}; +use crate::data::{ApInt, Int}; +use crate::info::{Width, BitWidth, Result, ShiftAmount, BitPos}; +use crate::logic::{try_forward_bin_mut_impl, forward_mut_impl, forward_bin_mut_impl}; #[cfg(feature = "rand_support")] use rand; @@ -65,14 +59,11 @@ impl UInt { /// # Constructors impl UInt { - /// Creates a new `UInt` from the given `Bit` value with a bit width of `1`. - /// - /// This function is generic over types that are convertible to `Bit` such as `bool`. - pub fn from_bit(bit: B) -> UInt - where - B: Into, - { - UInt::from(ApInt::from_bit(bit)) + /// Creates a new `UInt` from the given boolean value with a bit width of `1`. + /// + /// When `bit` is `false`, the single bit in the `ApInt` is 0, otherwise it is 1. + pub fn from_bool(bit: bool) -> UInt { + UInt::from(ApInt::from_bool(bit)) } /// Creates a new `UInt` from a given `u8` value with a bit-width of 8. @@ -141,12 +132,9 @@ impl UInt { } } -impl From for UInt - where B: Into -{ - #[inline] - fn from(bit: B) -> UInt { - UInt::from_bit(bit) +impl From for UInt { + fn from(bit: bool) -> UInt { + UInt::from_bool(bit) } } @@ -848,15 +836,10 @@ impl UInt { impl UInt { /// Returns the bit at the given bit position `pos`. /// - /// This returns - /// - /// - `Bit::Set` if the bit at `pos` is `1` - /// - `Bit::Unset` otherwise - /// /// # Errors /// /// - If `pos` is not a valid bit position for the width of this `UInt`. - pub fn get_bit_at

(&self, pos: P) -> Result + pub fn get_bit_at

(&self, pos: P) -> Result where P: Into { self.value.get_bit_at(pos) @@ -961,6 +944,14 @@ impl Not for UInt { } } +impl Width for UInt { + /// Returns the `BitWidth` of this `ApInt`. + #[inline] + fn width(&self) -> BitWidth { + self.value.width() + } +} + // =========================================================================== // `BitAnd` impls // =========================================================================== diff --git a/src/data/utils.rs b/src/data/utils.rs new file mode 100644 index 0000000..969df74 --- /dev/null +++ b/src/data/utils.rs @@ -0,0 +1,177 @@ +use crate::data::{Digit, ApInt}; +use crate::info::{Result, Width}; + +/// # Utility & Helper Methods +impl ApInt { + /// Computes the given operation on all digits of this `ApInt`. + /// + /// # Note + /// + /// Prefer this utility method if you want to perform the same + /// operation for all digits within this `ApInt` as this operation + /// uses the most efficient way to do so. + #[inline] + pub(crate) fn modify_digits(&mut self, f: F) + where F: Fn(&mut Digit) + { + use crate::data::DataAccessMut::*; + match self.access_data_mut() { + Inl(digit) => f(digit), + Ext(digits) => { + for digit in digits { + f(digit) + } + } + } + } + + /// Computes the given operation on all digits of this `ApInt` + /// zipped with the digits of `rhs`. + /// + /// # Note + /// + /// Prefer this utility method for these use cases since this operation + /// uses the most efficient way to perform the specified task. + #[inline] + pub(crate) fn modify_zipped_digits(&mut self, rhs: &ApInt, f: F) -> Result<()> + where F: Fn(&mut Digit, Digit) + { + use crate::data::ZipDataAccessMutSelf::*; + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => f(lhs, rhs), + Ext(lhs, rhs) => { + for (l, &r) in lhs.iter_mut().zip(rhs) { + f(l, r) + } + } + } + Ok(()) + } + + /// Returns the most significant `Digit` of this `ApInt`. + #[inline] + pub(crate) fn most_significant_digit(&self) -> Digit { + use crate::data::DataAccess::*; + match self.access_data() { + Inl(digit) => digit, + Ext(digits) => { + *digits.last().expect("apint instances do always have at least one digit; qed") + } + } + } + + /// Returns a mutable reference to the most significant `Digit` of this `ApInt`. + #[inline] + pub(crate) fn most_significant_digit_mut(&mut self) -> &mut Digit { + use crate::data::DataAccessMut::*; + match self.access_data_mut() { + Inl(digit) => digit, + Ext(digits) => { + digits.last_mut().expect("apint instances do always have at least one digit; qed") + } + } + } + + /// Returns the least significant `Digit` of this `ApInt`. + #[inline] + pub(crate) fn least_significant_digit(&self) -> Digit { + use crate::data::DataAccess::*; + match self.access_data() { + Inl(digit) => digit, + Ext(digits) => digits[0] + } + } + + #[inline] + pub(crate) fn most_significant_bit(&self) -> bool { + let sign_bit_pos = self.width().sign_bit_pos(); + self.most_significant_digit() + .get(sign_bit_pos.to_pos_within_digit()) + .expect("`BitWidth::excess_bits` returns a number that \ + is always a valid `BitPos` for a `Digit` so this \ + operation cannot fail.") + } + + #[inline] + pub(crate) fn least_significant_bit(&self) -> bool { + self.least_significant_digit().least_significant_bit() + } + + /// Clears unused bits of this `ApInt`. + /// + /// # Example + /// + /// An `ApInt` with a `BitWidth` of `100` bits requires 2 `Digit`s for its internal value + /// representation, each having 64-bits which totals in `128` bits for the `ApInt` instance. + /// So upon a call to `ApInt::clear_unused_bits` the upper + /// `128-100 = 28` bits are cleared (set to zero (`0`)). + #[inline] + pub(crate) fn clear_unused_bits(&mut self) { + if let Some(bits) = self.width().excess_bits() { + *self.most_significant_digit_mut() &= !(Digit::ONES << bits); + } + } + + /// Splits the least significant digits from the rest of the digit slice + /// and returns it as well as the remaining part of the digit slice. + #[inline] + pub(crate) fn split_least_significant_digit(&self) -> (Digit, &[Digit]) { + use crate::data::DataAccess::*; + match self.access_data() { + Inl(digit) => (digit, &[]), + Ext(digits) => { + let (lsd, rest) = digits.split_first() + .expect("An `ApInt` always has at least one digit so calling \ + `split_first` on a slice of its digits will never \ + return `None`."); + (*lsd, rest) + } + } + } + + /// Splits the most significant digits from the rest of the digit slice + /// and returns it as well as the remaining part of the digit slice. + #[inline] + pub(crate) fn split_most_significant_digit(&self) -> (Digit, &[Digit]) { + use crate::data::DataAccess::*; + match self.access_data() { + Inl(digit) => (digit, &[]), + Ext(digits) => { + let (lsd, rest) = digits.split_last() + .expect("An `ApInt` always has at least one digit so calling \ + `split_last` on a slice of its digits will never \ + return `None`."); + (*lsd, rest) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn most_significant_bit() { + assert_eq!(false, + ApInt::from_bool(false).most_significant_bit()); + assert_eq!(true, + ApInt::from_bool(true).most_significant_bit()); + assert_eq!(false, + ApInt::from_u8(0b0101_0101).most_significant_bit()); + assert_eq!(true, + ApInt::from_u8(0b1101_0101).most_significant_bit()); + assert_eq!(false, + ApInt::from_u16(0b0111_1000_1101_0101).most_significant_bit()); + assert_eq!(true, + ApInt::from_u16(0b1011_0001_0101_0101).most_significant_bit()); + assert_eq!(false, + ApInt::from_u32(0x7000_0000).most_significant_bit()); + assert_eq!(true, + ApInt::from_u32(0x8000_0000).most_significant_bit()); + assert_eq!(false, + ApInt::from_u64(0x70FC_A875_4321_1234).most_significant_bit()); + assert_eq!(true, + ApInt::from_u64(0x8765_4321_5555_6666).most_significant_bit()); + } +} diff --git a/src/info.rs b/src/info.rs new file mode 100644 index 0000000..46475e7 --- /dev/null +++ b/src/info.rs @@ -0,0 +1,9 @@ +mod bitpos; +mod bitwidth; +mod errors; +mod radix; +mod shiftamount; +mod width; + +pub(crate) use self::{errors::DivOp}; +pub use self::{radix::Radix, bitpos::BitPos, bitwidth::BitWidth, shiftamount::ShiftAmount, width::Width, errors::{Result, Error, ErrorKind}}; \ No newline at end of file diff --git a/src/bitpos.rs b/src/info/bitpos.rs similarity index 84% rename from src/bitpos.rs rename to src/info/bitpos.rs index 56cae70..b0d1829 100644 --- a/src/bitpos.rs +++ b/src/info/bitpos.rs @@ -1,5 +1,5 @@ -use errors::{Result}; -use digit; +use crate::data::Digit; +use crate::info::{Result, Error, Width}; /// Represents a bit position within an `ApInt`. /// @@ -34,17 +34,28 @@ impl BitPos { /// on `Digit` instances. #[inline] pub(crate) fn to_pos_within_digit(self) -> BitPos { - BitPos(self.0 % digit::BITS) + BitPos(self.0 % Digit::BITS) } /// Splits this `BitPos` that may range over several `Digit`s within an `ApInt` /// into the associated `Digit` offset and its `Digit`-relative bit position. #[inline] pub(crate) fn to_digit_and_bit_pos(self) -> (DigitPos, BitPos) { - let digit_pos = DigitPos::from(self.0 / digit::BITS); - let bit_pos = BitPos::from(self.0 % digit::BITS); + let digit_pos = self.0 / Digit::BITS; + let bit_pos = BitPos::from(self.0 % Digit::BITS); (digit_pos, bit_pos) } + + #[inline] + pub(crate) fn verify_bit_access(self, a: &T) -> Result<()> + where T: Width, + { + let width = a.width(); + if !width.is_valid_pos(self) { + return Err(Error::invalid_bit_access(self, width)) + } + Ok(()) + } } impl From for BitPos { @@ -54,6 +65,7 @@ impl From for BitPos { } } + #[cfg(test)] mod tests { use super::*; diff --git a/src/bitwidth.rs b/src/info/bitwidth.rs similarity index 84% rename from src/bitwidth.rs rename to src/info/bitwidth.rs index edd9e8a..6f17d2c 100644 --- a/src/bitwidth.rs +++ b/src/info/bitwidth.rs @@ -1,8 +1,5 @@ -use digit; -use storage::Storage; -use bitpos::BitPos; -use apint::{ShiftAmount}; -use errors::{Result, Error}; +use crate::data::{Digit,Storage}; +use crate::info::{BitPos,ShiftAmount,Result,Error}; /// The `BitWidth` represents the length of an `ApInt`. /// @@ -11,9 +8,6 @@ use errors::{Result, Error}; #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct BitWidth(usize); -// =========================================================================== -/// Constructors -/// =========================================================================== impl BitWidth { /// Creates a `BitWidth` that represents a bit-width of `1` bit. #[inline] @@ -80,9 +74,6 @@ impl From for BitWidth { } } -// =========================================================================== -/// API -/// =========================================================================== impl BitWidth { /// Converts this `BitWidth` into a `usize`. #[inline] @@ -90,8 +81,10 @@ impl BitWidth { self.0 } - /// Returns the number of exceeding bits that is implied for `ApInt` - /// instances with this `BitWidth`. + /// Returns the number of excess bits that is implied for `ApInt` instances with this + /// `BitWidth`. These bits arise when the `BitWidth` is not a multiple of `Digit::BITS`, and + /// the most significant `bitwidth % Digit::BITS` bits must be placed in a whole digit. + /// `None` is returned if there are no excess bits. /// /// For example for an `ApInt` with a `BitWidth` of `140` bits requires /// exactly `3` digits (each with its `64` bits). The third however, @@ -101,7 +94,7 @@ impl BitWidth { /// /// *Note:* A better name for this method has yet to be found! pub(crate) fn excess_bits(self) -> Option { - match self.to_usize() % digit::BITS { + match self.to_usize() % Digit::BITS { 0 => None, n => Some(n) } @@ -116,6 +109,15 @@ impl BitWidth { self.excess_bits().map(BitWidth::from) } + /// Returns the number of unused bits in the most significant digit. This number can be in the + /// range `0..Digit::BITS`. This is different from `ApInt::excess_bits`. + pub(crate) fn unused_bits(self) -> usize { + match Digit::BITS - (self.to_usize() % Digit::BITS) { + Digit::BITS => 0, + n => n + } + } + /// Returns a storage specifier that tells the caller if `ApInt`'s /// associated with this bitwidth require an external memory (`Ext`) to store /// their digits or may use inplace memory (`Inl`). @@ -132,8 +134,8 @@ impl BitWidth { /// /// *Note:* Maybe we should move this method somewhere else? #[inline] - pub(crate) fn required_digits(&self) -> usize { - ((self.to_usize() - 1) / digit::BITS) + 1 + pub(crate) fn required_digits(self) -> usize { + ((self.to_usize() - 1) / Digit::BITS) + 1 } } diff --git a/src/errors.rs b/src/info/errors.rs similarity index 93% rename from src/errors.rs rename to src/info/errors.rs index 816e034..8165b32 100644 --- a/src/errors.rs +++ b/src/info/errors.rs @@ -1,13 +1,14 @@ -use bitwidth::BitWidth; -use bitpos::BitPos; -use radix::Radix; -use apint::{ApInt, ShiftAmount}; -use apint::{PrimitiveTy}; +use crate::info::{Radix, BitPos, BitWidth, ShiftAmount}; +use crate::data::{ApInt}; +use crate::construction::PrimitiveTy; use std::result; use std::error; use std::fmt; +/// The `Result` type used in `ApInt`. +pub type Result = result::Result; + /// Represents the kind of an `Error`. /// /// This also stores the unique information tied to the error report. @@ -106,7 +107,7 @@ pub enum DivOp { SignedRem, } -/// Represents an error that may occure upon using the `ApInt` library. +/// Represents an error that may occur upon using the `ApInt` library. /// /// All errors have a unique kind which also stores extra information for error reporting. /// Besides that an `Error` also stores a message and an optional additional annotation. @@ -117,9 +118,7 @@ pub struct Error { annotation: Option } -// =========================================================================== /// Public getters for `Error`. -/// =========================================================================== impl Error { /// Returns a reference to the kind of this `Error`. #[inline] @@ -143,9 +142,7 @@ impl Error { } } -// =========================================================================== /// Extending constructors for `Error`. -/// =========================================================================== impl Error { #[inline] pub(crate) fn with_annotation(mut self, annotation: A) -> Error @@ -156,9 +153,7 @@ impl Error { } } -// =========================================================================== /// Default constructors for `Error`. -/// =========================================================================== impl Error { pub(crate) fn invalid_radix(val: u8) -> Error { Error{ @@ -327,7 +322,4 @@ impl error::Error for Error { fn description(&self) -> &str { self.message.as_str() } -} - -/// The `Result` type used in `ApInt`. -pub type Result = result::Result; +} \ No newline at end of file diff --git a/src/radix.rs b/src/info/radix.rs similarity index 97% rename from src/radix.rs rename to src/info/radix.rs index 1712a26..bd9e0a8 100644 --- a/src/radix.rs +++ b/src/info/radix.rs @@ -1,6 +1,5 @@ - -use digit::{Digit, DigitRepr}; -use errors::{Error, Result}; +use crate::data::{Digit, DigitRepr}; +use crate::info::{Error, Result}; /// A radix for parsing strings as `ApInt`s. /// @@ -112,7 +111,7 @@ impl Radix { // To generate this table: // ``` // for radix in 2u64..37 { - // let mut power = digit::BITS / find_last_bit_set(radix.to_u8() as u64); + // let mut power = Digit::BITS / find_last_bit_set(radix.to_u8() as u64); // let mut base = (radix.to_u8() as u32).pow(power as u32); // while let Some(b) = base.checked_mul(radix) { // base = b; diff --git a/src/info/shiftamount.rs b/src/info/shiftamount.rs new file mode 100644 index 0000000..0455918 --- /dev/null +++ b/src/info/shiftamount.rs @@ -0,0 +1,98 @@ +use crate::data::Digit; +use crate::info::{Error, Result, Width}; + +/// Represents an amount of bits to shift an `ApInt`. +/// +/// The purpose of this type is to create a generic abstraction over input types that may act as a +/// `ShiftAmount` for shift operations. +/// +/// Shift amounts can only be in the range [0, bit width of the `ApInt`) when used as arguments to +/// the shifting functions in this crate, otherwise they return errors. There is a good reason for +/// not allowing the shift amount to be an arbitrary amount and, for example, making a left shift +/// by 64 or more on a `ApInt` of width 64 simply make it zero. +/// +/// One reason is that internally in all shift functions, there naturally arise expressions such as +/// `digit_val.wrapping_shl(shift_amount)` which will panic when that shift is equal to or greater +/// than the bitwidth of whatever `Digit` type the library is using. +/// +/// Another reason is that shifts could be defined to be like +/// `apint.wrapping_shift(shift % apint.width())`, which is in fact how some CPUs shift their +/// machine integers. This is why Rust makes `1u32.wrapping_shl(32)` panic, since different CPUs +/// use different conventions on what happens when the shift is equal to or greater than the +/// bitwidth of a type. +/// +/// Instead, it is almost always more performant and clearer on the User's side to prevent the shift +/// amount from going outside the range by using expressions like the following, depending on the +/// functionality desired: +/// +/// if the shifting should go to zero when `shift >= apint.width()`: +/// ``` +/// use apint::{ApInt, Width, BitWidth}; +/// let w = BitWidth::new(42).unwrap(); +/// let zero = ApInt::zero(w); +/// let shift = 50; +/// let mut lhs = ApInt::one(w); +/// if shift >= lhs.width().to_usize() { +/// lhs.strict_assign(&zero).unwrap(); +/// } else { +/// lhs.wrapping_shl_assign(shift); +/// } +/// assert_eq!(lhs, zero); +/// ``` +/// +/// if the shifting should act like it wraps around (not like `rotate_left_assign`, but a wrapping +/// around of the `ShiftAmount`): +/// ``` +/// use apint::{ApInt, Width, BitWidth}; +/// let w = BitWidth::new(42).unwrap(); +/// let shift = 50; +/// let mut lhs = ApInt::one(w); +/// lhs.wrapping_shl_assign(shift % 42); +/// assert_eq!(lhs, ApInt::from(256u64).into_truncate(w).unwrap()); +/// ``` +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ShiftAmount(usize); + +impl From for ShiftAmount { + /// Returns a new `ShiftAmount` from the given `usize`. + #[inline] + fn from(val: usize) -> ShiftAmount { + ShiftAmount(val) + } +} + +impl ShiftAmount { + /// Returns the internal shift amount representation as `usize`. + pub fn to_usize(self) -> usize { + self.0 + } + + /// Returns if the shift amount is zero + pub(crate) fn is_zero(self) -> bool { + self.0 == 0 + } + + /// Returns a tuple of the number of digits this `ShiftAmount` will leap over and the number of + /// bits within a single digit this `ShiftAmount` will leap over. + /// + /// # Examples + /// + /// - `ShiftAmount(50)` leaps over 0 digits and 50 bits. + /// - `ShiftAmount(64)` leaps over 1 digits and 0 bits. + /// - `ShiftAmount(100)` leaps over 1 digits and 28 bits. + /// - `ShiftAmount(150)` leaps over 2 digits and 22 bits. + pub(crate) fn digit_bit_steps(self) -> (usize, usize) { + (self.to_usize() / Digit::BITS, self.to_usize() % Digit::BITS) + } + + #[inline] + pub(crate) fn verify_shift_amount(self, a: &W) -> Result<()> + where W: Width, + { + let width = a.width(); + if !width.is_valid_shift_amount(self) { + return Err(Error::invalid_shift_amount(self, width)) + } + Ok(()) + } +} \ No newline at end of file diff --git a/src/traits.rs b/src/info/width.rs similarity index 84% rename from src/traits.rs rename to src/info/width.rs index 3df8f97..4433f9d 100644 --- a/src/traits.rs +++ b/src/info/width.rs @@ -1,7 +1,7 @@ -use bitwidth::{BitWidth}; +use crate::info::{BitWidth}; /// Types that have an associated bit width may implement `Width`. pub trait Width { /// Returns the bit width of `self`. fn width(&self) -> BitWidth; -} +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 612882b..ac61954 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,76 +3,56 @@ //! This library mainly features the **A**rbitrary **p**recision **Int**eger (`ApInt`) type //! which is an `n-bit` integer type acting like a machine integer working in the twos-complement. //! -//! This is useful for emulating machine integers for example in constant evaluation of compilers -//! or for solving bitvector formulas of SMT solvers. -//! -//! Internally `ApInt` uses small-value optimization for values with a bit-width less than or -//! equal to `64` bits. It uses `64` bit digits and thus its algorithms computes within the base -//! of 264. +//! This is useful for emulating machine integers and anything requiring large bitvectors. This +//! crate can be used like a bigint crate, except that most operations are completely inline with no +//! reallocations, resizing is manual, and arithmetic can be purposely overflowed. //! +//! The crate was designed for correctness of emulation and performance in mind, the interface +//! of `ApInt` is very comprehensive, and functions that allocate are clearly documented. +//! //! The `ApInt` data structure does **not** know signedness. Instead, the operations defined on it //! (methods) do so. This makes it the perfect building block for higher-level primitives later on. //! -//! The crate was designed for correctness of emulation and performance in mind and the interface -//! of `ApInt` is very comprehensive. - -// #![allow(dead_code)] -// #![deny(missing_docs)] -// #![deny(warnings)] - -#![doc(html_root_url = "https://docs.rs/crate/apint/0.2.0")] - -extern crate smallvec; +//! Internally `ApInt` uses small-value optimization for values with a bit-width less than or +//! equal to a `Digit`. It uses `64` bit `Digit`s by default, however it can potentially be +//! configured to use other types. -#[cfg(feature = "specialized_div_rem")] -extern crate specialized_div_rem; +#![doc(html_root_url = "https://docs.rs/crate/apint/0.3.0")] -#[cfg(feature = "rand_support")] -extern crate rand; +// NOTE: The file structure used in this library has less to do with the actual dependencies between +// files and more about organizing files in a way that helps with programmers finding where implementations +// are. -#[cfg(feature = "serde_support")] -extern crate serde; +// The `ApInt` definition and most of the extremely unsafe function impls on `ApInt`s are located in +// `apint.rs`. The other bulk of unsafe functions is found in `access.rs` and `constructors.rs`. -#[cfg(feature = "serde_support")] -#[cfg_attr(feature = "serde_support", macro_use)] -extern crate serde_derive; +// Contains a variety of helper structs used throughout the crate. +pub(crate) mod info; -#[cfg(all(test, feature = "serde_support"))] -extern crate serde_test; +// Contains large allocating data and data access abstractions. +pub(crate) mod data; -#[cfg(test)] -extern crate itertools; +// Contains a variety of construction functions, casting, and implementations of external traits. +pub(crate) mod construction; -mod errors; -mod traits; -mod digit; -mod bitwidth; -mod bitpos; -mod storage; -mod radix; -mod apint; -mod digit_seq; -mod checks; -mod uint; -mod int; -mod utils; +// Contains the big integer logical and arithmetic operations +pub(crate) mod logic; -pub use apint::{ - ApInt, - ShiftAmount -}; -pub use errors::{ +pub use crate::info::{ + Radix, + BitWidth, + BitPos, + Width, + ShiftAmount, Result, Error, ErrorKind }; -pub use bitwidth::BitWidth; -pub use digit::{Bit}; -pub use radix::{Radix}; -pub use bitpos::{BitPos}; -pub use traits::{Width}; -pub use uint::{UInt}; -pub use int::{Int}; +pub use crate::data::{ + ApInt, + UInt, + Int +}; /// Re-exports some commonly used items of this crate. pub mod prelude { diff --git a/src/ll.rs b/src/ll.rs deleted file mode 100644 index c276a82..0000000 --- a/src/ll.rs +++ /dev/null @@ -1,49 +0,0 @@ -use digit::{Digit}; -use digit; - -/// Returns the result of a carry-add between `a` and `b` with -/// the given `carry`. -/// -/// # Computes & Returns -/// -/// result = a + b + carry -/// -/// # Note -/// -/// - The carry acts as input and will also store the -/// carry of this addition after the carry-add computation. -#[inline] -pub(crate) fn carry_add(a: Digit, b: Digit, carry: &mut Digit) -> Digit { - let (hi, lo) = (a.dd() + b.dd() + carry.dd()).hi_lo(); - *carry = hi; - lo -} - -/// Returns the result of a borrow-sub between `a` and `b` with -/// the given `borrow`. -/// -/// # Computes & Returns -/// -/// result = a - b - borrow -/// -/// # Note -/// -/// - Do not be confused: In subtraction the "carry" actually is called "borrow". -/// - The borrow acts as input and will also store the borrow of this subtraction -/// after the borrow-sub computation. -#[inline] -pub(crate) fn borrow_sub(a: Digit, b: Digit, borrow: &mut Digit) -> Digit { - let (hi, lo) = (digit::BASE + a.dd() - b.dd() - borrow.dd()).hi_lo(); - - // This is the actual computation: - // - // We subtract from the Digit's base which is equal to 2^64. - // The hi part then is the borrow for the next pair of Digits - // whereas the lo part is the actual wrapped result. - // - // hi * (base) + lo == 1 * (base) + ai - bi - borrow - // => a_i - b_i - borrow < 0 <==> hi == 0 - - *borrow = if hi == Digit::zero() { Digit::one() } else { Digit::zero() }; - lo -} diff --git a/src/logic.rs b/src/logic.rs new file mode 100644 index 0000000..437bc31 --- /dev/null +++ b/src/logic.rs @@ -0,0 +1,14 @@ +#![allow(clippy::needless_range_loop)] // Using iterators convolutes some code +#![allow(clippy::too_many_arguments)] + +mod add_sub; +mod bitwise; +mod div; +mod fuzz; +mod mul; +mod cmp; +mod shift; +mod traits; +mod utils; + +pub(crate) use self::utils::{forward_mut_impl, forward_bin_mut_impl, try_forward_bin_mut_impl}; \ No newline at end of file diff --git a/src/logic/add_sub.rs b/src/logic/add_sub.rs new file mode 100644 index 0000000..dcf7a8e --- /dev/null +++ b/src/logic/add_sub.rs @@ -0,0 +1,339 @@ +use crate::data::{ApInt, DataAccessMut, ZipDataAccessMutSelf::{Inl, Ext}, Digit}; +use crate::info::Result; +#[cfg(test)] +use crate::info::Width; +use crate::logic::{try_forward_bin_mut_impl,forward_mut_impl}; + +/// # Addition and Subtraction Operations +/// +/// **Note**: Unless otherwise noted in the function specific documentation, +/// +/// - **An Error is returned** if function arguments have unmatching bitwidths. +/// - The functions do **not** allocate memory. +/// - The function works for both signed and unsigned interpretations of an `ApInt`. In other words, in the low-level bit-wise representation there is no difference between a signed and unsigned operation by a certain function on fixed bit-width integers. (Cite: LLVM) +impl ApInt { + /// Increments this `ApInt` by one inplace. + pub fn wrapping_inc(&mut self) { + match self.access_data_mut() { + DataAccessMut::Inl(x) => { + *x = x.wrapping_add(Digit::one()); + } + DataAccessMut::Ext(x) => { + for i in 0..x.len() { + match x[i].overflowing_add(Digit::one()) { + (v,false) => { + x[i] = v; + break; + } + (v,true) => { + // This case is expected to match very rarely, unless `Digit::MAX` is + // common among the digits. + x[i] = v; + } + } + } + } + } + self.clear_unused_bits(); + } + + /// Increments this `ApInt` by one and returns the result. + pub fn into_wrapping_inc(self) -> ApInt { + forward_mut_impl(self, ApInt::wrapping_inc) + } + + /// Decrements this `ApInt` by one inplace. + pub fn wrapping_dec(&mut self) { + match self.access_data_mut() { + DataAccessMut::Inl(x) => { + *x = x.wrapping_sub(Digit::one()); + } + DataAccessMut::Ext(x) => { + for i in 0..x.len() { + match x[i].overflowing_sub(Digit::one()) { + (v,false) => { + x[i] = v; + break; + } + (v,true) => { + x[i] = v; + } + } + } + } + } + self.clear_unused_bits(); + } + + /// Decrements this `ApInt` by one and returns the result. + pub fn into_wrapping_dec(self) -> ApInt { + forward_mut_impl(self, ApInt::wrapping_dec) + } + + /// Negates this `ApInt` inplace. + pub fn wrapping_neg(&mut self) { + self.bitnot(); + self.wrapping_inc(); + //`wrapping_inc` handles clearing the unused bits + } + + /// Negates this `ApInt` and returns the result. + pub fn into_wrapping_neg(self) -> ApInt { + forward_mut_impl(self, ApInt::wrapping_neg) + } + + /// Add-assigns `rhs` to `self` inplace. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_add_assign(&mut self, rhs: &ApInt) -> Result<()> { + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => { + *lhs = lhs.wrapping_add(rhs); + } + Ext(lhs, rhs) => { + let (temp, mut carry) = lhs[0].carrying_add(rhs[0]); + lhs[0] = temp; + for i in 1..rhs.len() { + let temp = lhs[i].dd() + .wrapping_add(rhs[i].dd()) + .wrapping_add(carry.dd()); + lhs[i] = temp.lo(); + carry = temp.hi(); + } + } + } + self.clear_unused_bits(); + Ok(()) + } + + /// Adds `rhs` to `self` and returns the result. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_wrapping_add(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_add_assign) + } + + /// Add-assigns `rhs` to `self` inplace, and returns a boolean indicating if overflow occured, + /// according to the **unsigned** interpretation of overflow. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + #[cfg(test)] + pub(crate) fn overflowing_uadd_assign(&mut self, rhs: &ApInt) -> Result { + match rhs.width().unused_bits() { + 0 => { + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => { + let tmp = lhs.overflowing_add(rhs); + *lhs = tmp.0; + Ok(tmp.1) + } + Ext(lhs, rhs) => { + let (tmp, mut carry) = lhs[0].carrying_add(rhs[0]); + lhs[0] = tmp; + for i in 1..rhs.len() { + let tmp = lhs[i].dd() + .wrapping_add(rhs[i].dd()) + .wrapping_add(carry.dd()); + lhs[i] = tmp.lo(); + carry = tmp.hi(); + } + Ok(carry != Digit::zero()) + } + } + } + unused => { + let mask = Digit::ONES >> unused; + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => { + let tmp = lhs.wrapping_add(rhs); + // We do not need to use `overflowing_add` here, because `unused` must be + // at least 1 and no overflow of a `Digit` can actually happen + *lhs = tmp & mask; + // Excess bits are cleared by the mask. + Ok((tmp & mask) != tmp) + } + Ext(lhs, rhs) => { + let len = rhs.len(); + let (tmp, mut carry) = lhs[0].carrying_add(rhs[0]); + lhs[0] = tmp; + for i in 1..(len - 1) { + let tmp = lhs[i].dd() + .wrapping_add(rhs[i].dd()) + .wrapping_add(carry.dd()); + lhs[i] = tmp.lo(); + carry = tmp.hi(); + } + let tmp = lhs[len - 1] + .wrapping_add(rhs[len - 1]) + .wrapping_add(carry); + lhs[len - 1] = tmp & mask; + // Excess bits are cleared by the mask. + Ok((tmp & mask) != tmp) + } + } + } + } + } + + /// Add-assigns `rhs` to `self` inplace, and returns a boolean indicating if overflow occured, + /// according to the **signed** interpretation of overflow. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + #[cfg(test)] + pub(crate) fn overflowing_sadd_assign(&mut self, rhs: &ApInt) -> Result { + let self_sign = self.is_negative(); + let rhs_sign = rhs.is_negative(); + self.wrapping_add_assign(rhs)?; + Ok((self_sign == rhs_sign) && (self_sign != self.is_negative())) + } + + /// Subtract-assigns `rhs` from `self` inplace. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_sub_assign(&mut self, rhs: &ApInt) -> Result<()> { + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => { + *lhs = lhs.wrapping_sub(rhs); + } + Ext(lhs, rhs) => { + let (temp, mut carry) = lhs[0].dd() + .wrapping_add((!rhs[0]).dd()) + .wrapping_add(Digit::one().dd()).lo_hi(); + lhs[0] = temp; + for i in 1..rhs.len() { + let temp = lhs[i].dd() + .wrapping_add((!rhs[i]).dd()) + .wrapping_add(carry.dd()); + lhs[i] = temp.lo(); + carry = temp.hi(); + } + } + } + self.clear_unused_bits(); + Ok(()) + } + + /// Subtracts `rhs` from `self` and returns the result. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_wrapping_sub(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_sub_assign) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::info::BitWidth; + + mod inc { + use super::*; + use std::u64; + + #[test] + fn test() { + assert_eq!(ApInt::from(14u8).into_wrapping_inc(),ApInt::from(15u8)); + assert_eq!(ApInt::from(15u8).into_wrapping_inc(),ApInt::from(16u8)); + assert_eq!(ApInt::from(16u8).into_wrapping_inc(),ApInt::from(17u8)); + assert_eq!(ApInt::from(17u8).into_wrapping_inc(),ApInt::from(18u8)); + assert_eq!(ApInt::from([0u64,0,0]).into_wrapping_inc(),ApInt::from([0u64,0,1])); + assert_eq!(ApInt::from([0,7,u64::MAX]).into_wrapping_inc(),ApInt::from([0u64,8,0])); + assert_eq!(ApInt::from([u64::MAX,u64::MAX]).into_wrapping_inc(),ApInt::from([0u64,0])); + assert_eq!(ApInt::from([0,u64::MAX,u64::MAX - 1]) + .into_wrapping_inc(),ApInt::from([0,u64::MAX,u64::MAX])); + assert_eq!(ApInt::from([0,u64::MAX,0]).into_wrapping_inc(),ApInt::from([0,u64::MAX,1])); + } + } + + mod wrapping_neg { + use super::*; + + fn assert_symmetry(input: ApInt, expected: ApInt) { + assert_eq!(input.clone().into_wrapping_neg(), expected.clone()); + assert_eq!(expected.into_wrapping_neg(), input); + } + + fn test_vals() -> impl Iterator { + [0_i128, 1, 2, 4, 5, 7, 10, 42, 50, 100, 128, 150, + 1337, 123123, 999999, 987432, 77216417].into_iter().map(|v| *v) + } + + #[test] + fn simple() { + assert_symmetry(ApInt::zero(BitWidth::w1()), ApInt::zero(BitWidth::w1())); + assert_symmetry(ApInt::one(BitWidth::w1()), ApInt::all_set(BitWidth::w1())); + } + + #[test] + fn range() { + for v in test_vals() { + assert_symmetry(ApInt::from_i8(v as i8), ApInt::from_i8(-v as i8)); + assert_symmetry(ApInt::from_i16(v as i16), ApInt::from_i16(-v as i16)); + assert_symmetry(ApInt::from_i32(v as i32), ApInt::from_i32(-v as i32)); + assert_symmetry(ApInt::from_i64(v as i64), ApInt::from_i64(-v as i64)); + assert_symmetry(ApInt::from_i128(v), ApInt::from_i128(-v)); + } + } + } + + mod overflowing { + use super::*; + use std::u64; + + #[test] + fn simple() { + let mut x0 = ApInt::from(u64::MAX); + let b0 = x0.overflowing_uadd_assign(&ApInt::from(1u64)).unwrap(); + assert!(b0); + assert_eq!(x0, ApInt::from(0u64)); + + let mut x1 = ApInt::from([u64::MAX,u64::MAX]); + let b1 = x1.overflowing_uadd_assign(&ApInt::from([u64::MAX,u64::MAX])).unwrap(); + assert!(b1); + assert_eq!(x1, ApInt::from([u64::MAX,u64::MAX - 1])); + + let mut x2 = ApInt::from(u64::MAX - 1); + let b2 = x2.overflowing_uadd_assign(&ApInt::from(1u64)).unwrap(); + assert!(!b2); + assert_eq!(x2, ApInt::from(u64::MAX)); + + let bw = BitWidth::new(111).unwrap(); + let mut x3 = ApInt::from([u64::MAX,0]).into_truncate(bw).unwrap(); + let b3 = x3.overflowing_uadd_assign( + &ApInt::from([0,u64::MAX]).into_truncate(bw).unwrap() + ).unwrap(); + assert!(!b3); + assert_eq!(x3, ApInt::from([u64::MAX,u64::MAX]).into_truncate(bw).unwrap()); + + let bw = BitWidth::new(7).unwrap(); + let mut x3 = ApInt::from(31u8).into_truncate(bw).unwrap(); + let b3 = x3.overflowing_uadd_assign( + &ApInt::from(3u8 << 5).into_truncate(bw).unwrap() + ).unwrap(); + assert!(!b3); + assert_eq!(x3, ApInt::from(127u8).into_truncate(bw).unwrap()); + + let bw = BitWidth::new(2).unwrap(); + let mut x4 = ApInt::from(1u8).into_truncate(bw).unwrap(); + assert!(!x4.is_negative()); + let b4 = x4.overflowing_sadd_assign( + &ApInt::from(1u8).into_truncate(bw).unwrap() + ).unwrap(); + assert!(x4.is_negative()); + assert!(b4); + assert_eq!(x4, ApInt::from(2u8).into_truncate(bw).unwrap()); + } + } +} diff --git a/src/apint/bitwise.rs b/src/logic/bitwise.rs similarity index 73% rename from src/apint/bitwise.rs rename to src/logic/bitwise.rs index e696e7f..489d82e 100644 --- a/src/apint/bitwise.rs +++ b/src/logic/bitwise.rs @@ -1,120 +1,97 @@ -use apint::{ApInt}; -use digit::{Bit}; -use digit; -use errors::{Result}; -use apint::utils::{ - DataAccess, - DataAccessMut -}; -use bitpos::{BitPos}; -use traits::{Width}; -use checks; -use utils::{try_forward_bin_mut_impl, forward_mut_impl}; - -use std::ops::{ - Not, - BitAnd, - BitOr, - BitXor, - BitAndAssign, - BitOrAssign, - BitXorAssign -}; +use crate::data::{ApInt, Digit, DataAccess, DataAccessMut}; +use crate::info::{Result, BitPos, Width}; +use crate::logic::{try_forward_bin_mut_impl, forward_mut_impl}; /// # Bitwise Operations +/// +/// **Note**: unless otherwise noted in the function specific documentation, +/// +/// - **An Error is returned** if function arguments have unmatching bitwidths. +/// - The functions do **not** allocate memory. impl ApInt { - /// Flips all bits of `self` and returns the result. - pub fn into_bitnot(self) -> Self { - forward_mut_impl(self, ApInt::bitnot) - } - /// Flip all bits of this `ApInt` inplace. pub fn bitnot(&mut self) { - self.modify_digits(|digit| digit.not_inplace()); + self.modify_digits(Digit::flip_all); self.clear_unused_bits(); } - /// Tries to bit-and assign this `ApInt` inplace to `rhs` - /// and returns the result. - /// - /// # Errors - /// - /// If `self` and `rhs` have unmatching bit widths. - pub fn into_bitand(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::bitand_assign) + /// Flips all bits of `self` and returns the result. + pub fn into_bitnot(self) -> Self { + forward_mut_impl(self, ApInt::bitnot) } - /// Bit-and assigns all bits of this `ApInt` with the bits of `rhs`. - /// - /// **Note:** This operation is inplace of `self` and won't allocate memory. + /// Bitwise and-assigns `rhs` to `self` inplace. /// /// # Errors /// - /// If `self` and `rhs` have unmatching bit widths. + /// - If `self` and `rhs` have unmatching bitwidths. pub fn bitand_assign(&mut self, rhs: &ApInt) -> Result<()> { self.modify_zipped_digits(rhs, |l, r| *l &= r) } - /// Tries to bit-and assign this `ApInt` inplace to `rhs` - /// and returns the result. + /// Bitwise ands `rhs` to `self` and returns the result. /// /// # Errors /// - /// If `self` and `rhs` have unmatching bit widths. - pub fn into_bitor(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::bitor_assign) + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_bitand(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::bitand_assign) } - /// Bit-or assigns all bits of this `ApInt` with the bits of `rhs`. - /// - /// **Note:** This operation is inplace of `self` and won't allocate memory. + + /// Bitwise or-assigns `rhs` to `self` inplace. /// /// # Errors /// - /// If `self` and `rhs` have unmatching bit widths. + /// - If `self` and `rhs` have unmatching bitwidths. pub fn bitor_assign(&mut self, rhs: &ApInt) -> Result<()> { self.modify_zipped_digits(rhs, |l, r| *l |= r) } - /// Tries to bit-xor assign this `ApInt` inplace to `rhs` - /// and returns the result. + /// Bitwise ors `rhs` to `self` and returns the result. /// /// # Errors /// - /// If `self` and `rhs` have unmatching bit widths. - pub fn into_bitxor(self, rhs: &ApInt) -> Result { - try_forward_bin_mut_impl(self, rhs, ApInt::bitxor_assign) + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_bitor(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::bitor_assign) } - /// Bit-xor assigns all bits of this `ApInt` with the bits of `rhs`. - /// - /// **Note:** This operation is inplace of `self` and won't allocate memory. + /// Bitwise xor-assigns `rhs` to `self` inplace. /// /// # Errors /// - /// If `self` and `rhs` have unmatching bit widths. + /// - If `self` and `rhs` have unmatching bitwidths. pub fn bitxor_assign(&mut self, rhs: &ApInt) -> Result<()> { self.modify_zipped_digits(rhs, |l, r| *l ^= r) } + + /// Bitwise xors `rhs` to `self` and returns the result. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_bitxor(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::bitxor_assign) + } } /// # Bitwise Access +/// +/// **Note**: unless otherwise noted in the function specific documentation, +/// +/// - The functions do **not** allocate memory. impl ApInt { /// Returns the bit at the given bit position `pos`. /// - /// This returns - /// - /// - `Bit::Set` if the bit at `pos` is `1` - /// - `Bit::Unset` otherwise - /// /// # Errors /// /// - If `pos` is not a valid bit position for the width of this `ApInt`. - pub fn get_bit_at

(&self, pos: P) -> Result + pub fn get_bit_at

(&self, pos: P) -> Result where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; + pos.verify_bit_access(self)?; match self.access_data() { DataAccess::Inl(digit) => digit.get(pos), DataAccess::Ext(digits) => { @@ -133,7 +110,7 @@ impl ApInt { where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; + pos.verify_bit_access(self)?; match self.access_data_mut() { DataAccessMut::Inl(digit) => digit.set(pos), DataAccessMut::Ext(digits) => { @@ -152,7 +129,7 @@ impl ApInt { where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; + pos.verify_bit_access(self)?; match self.access_data_mut() { DataAccessMut::Inl(digit) => digit.unset(pos), DataAccessMut::Ext(digits) => { @@ -176,7 +153,7 @@ impl ApInt { where P: Into { let pos = pos.into(); - checks::verify_bit_access(self, pos)?; + pos.verify_bit_access(self)?; match self.access_data_mut() { DataAccessMut::Inl(digit) => digit.flip(pos), DataAccessMut::Ext(digits) => { @@ -188,11 +165,11 @@ impl ApInt { /// Sets all bits of this `ApInt` to one (`1`). pub fn set_all(&mut self) { - self.modify_digits(|digit| digit.set_all()); + self.modify_digits(Digit::set_all); self.clear_unused_bits(); } - /// Returns``true` if all bits in the `ApInt` are set. + /// Returns `true` if all bits in the `ApInt` are set. pub fn is_all_set(&self) -> bool { let (msb, rest) = self.split_most_significant_digit(); if let Some(excess_bits) = self.width().excess_bits() { @@ -205,7 +182,7 @@ impl ApInt { /// Sets all bits of this `ApInt` to zero (`0`). pub fn unset_all(&mut self) { - self.modify_digits(|digit| digit.unset_all()); + self.modify_digits(Digit::unset_all); } /// Returns `true` if all bits in the `ApInt` are unset. @@ -215,15 +192,13 @@ impl ApInt { /// Flips all bits of this `ApInt`. pub fn flip_all(&mut self) { - // TODO: remove since equal to ApInt::bitnot_assign - self.modify_digits(|digit| digit.flip_all()); - self.clear_unused_bits(); + self.bitnot(); } /// Returns the sign bit of this `ApInt`. /// /// **Note:** This is equal to the most significant bit of this `ApInt`. - pub fn sign_bit(&self) -> Bit { + pub fn sign_bit(&self) -> bool { self.most_significant_bit() } @@ -263,7 +238,7 @@ impl ApInt { /// Returns the number of ones in the binary representation of this `ApInt`. pub fn count_ones(&self) -> usize { self.as_digit_slice() - .into_iter() + .iter() .map(|d| d.repr().count_ones() as usize) .sum::() } @@ -271,26 +246,26 @@ impl ApInt { /// Returns the number of zeros in the binary representation of this `ApInt`. pub fn count_zeros(&self) -> usize { let zeros = self.as_digit_slice() - .into_iter() + .iter() .map(|d| d.repr().count_zeros() as usize) .sum::(); // Since `ApInt` instances with width's that are no powers of two - // have unused excess bits that are always zero we need to cut them off + // have unused bits that are always zero we need to cut them off // for a correct implementation of this operation. - zeros - (digit::BITS - self.width().excess_bits().unwrap_or(digit::BITS)) + zeros - self.width().unused_bits() } /// Returns the number of leading zeros in the binary representation of this `ApInt`. pub fn leading_zeros(&self) -> usize { let mut zeros = 0; - for d in self.as_digit_slice().into_iter().rev() { + for d in self.as_digit_slice().iter().rev() { let leading_zeros = d.repr().leading_zeros() as usize; zeros += leading_zeros; - if leading_zeros != digit::BITS { + if leading_zeros != Digit::BITS { break; } } - zeros - (digit::BITS - self.width().excess_bits().unwrap_or(digit::BITS)) + zeros - self.width().unused_bits() } /// Returns the number of trailing zeros in the binary representation of this `ApInt`. @@ -299,140 +274,21 @@ impl ApInt { for d in self.as_digit_slice() { let trailing_zeros = d.repr().trailing_zeros() as usize; zeros += trailing_zeros; - if trailing_zeros != digit::BITS { + if trailing_zeros != Digit::BITS { break; } } if zeros >= self.width().to_usize() { - zeros -= digit::BITS - self.width().excess_bits().unwrap_or(digit::BITS); + zeros -= self.width().unused_bits(); } zeros } } -// =========================================================================== -// `Not` (bitwise) impls -// =========================================================================== - -impl Not for ApInt { - type Output = ApInt; - - fn not(self) -> Self::Output { - forward_mut_impl(self, ApInt::bitnot) - } -} - -// =========================================================================== -// `BitAnd` impls -// =========================================================================== - -impl<'a> BitAnd<&'a ApInt> for ApInt { - type Output = ApInt; - - fn bitand(self, rhs: &'a ApInt) -> Self::Output { - self.into_bitand(rhs).unwrap() - } -} - -impl<'a, 'b> BitAnd<&'a ApInt> for &'b ApInt { - type Output = ApInt; - - fn bitand(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_bitand(rhs).unwrap() - } -} - -impl<'a, 'b> BitAnd<&'a ApInt> for &'b mut ApInt { - type Output = ApInt; - - fn bitand(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_bitand(rhs).unwrap() - } -} - -// =========================================================================== -// `BitOr` impls -// =========================================================================== - -impl<'a> BitOr<&'a ApInt> for ApInt { - type Output = ApInt; - - fn bitor(self, rhs: &'a ApInt) -> Self::Output { - self.into_bitor(rhs).unwrap() - } -} - -impl<'a, 'b> BitOr<&'a ApInt> for &'b ApInt { - type Output = ApInt; - - fn bitor(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_bitor(rhs).unwrap() - } -} - -impl<'a, 'b> BitOr<&'a ApInt> for &'b mut ApInt { - type Output = ApInt; - - fn bitor(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_bitor(rhs).unwrap() - } -} - -// =========================================================================== -// `BitXor` impls -// =========================================================================== - -impl<'a> BitXor<&'a ApInt> for ApInt { - type Output = ApInt; - - fn bitxor(self, rhs: &'a ApInt) -> Self::Output { - self.into_bitxor(rhs).unwrap() - } -} - -impl<'a, 'b> BitXor<&'a ApInt> for &'b ApInt { - type Output = ApInt; - - fn bitxor(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_bitxor(rhs).unwrap() - } -} - -impl<'a, 'b> BitXor<&'a ApInt> for &'b mut ApInt { - type Output = ApInt; - - fn bitxor(self, rhs: &'a ApInt) -> Self::Output { - self.clone().into_bitxor(rhs).unwrap() - } -} - -// =========================================================================== -// `BitAndAssign`, `BitOrAssign` and `BitXorAssign` impls -// =========================================================================== - -impl<'a> BitAndAssign<&'a ApInt> for ApInt { - fn bitand_assign(&mut self, rhs: &'a ApInt) { - self.bitand_assign(rhs).unwrap(); - } -} - -impl<'a> BitOrAssign<&'a ApInt> for ApInt { - fn bitor_assign(&mut self, rhs: &'a ApInt) { - self.bitor_assign(rhs).unwrap(); - } -} - -impl<'a> BitXorAssign<&'a ApInt> for ApInt { - fn bitxor_assign(&mut self, rhs: &'a ApInt) { - self.bitxor_assign(rhs).unwrap(); - } -} - #[cfg(test)] mod tests { use super::*; - - use bitwidth::{BitWidth}; + use crate::info::BitWidth; #[test] fn count_ones() { diff --git a/src/logic/cmp.rs b/src/logic/cmp.rs new file mode 100644 index 0000000..ac61852 --- /dev/null +++ b/src/logic/cmp.rs @@ -0,0 +1,338 @@ +use crate::data::{ApInt, Digit, DataAccess, ZipDataAccess}; +use crate::info::{Result, Width}; + +use std::cmp::Ordering; +use std::ops::Not; + +/// **Note:** If `self` and `other` have unmatching bit widths, `false` will be returned. +impl PartialEq for ApInt { + fn eq(&self, other: &ApInt) -> bool { + if self.len_bits() != other.len_bits() { + return false + } + self.as_digit_slice() == other.as_digit_slice() + } +} + +/// **Note:** If `self` and `other` have unmatching bit widths, `false` will be returned. +impl Eq for ApInt {} + +/// # Comparison Operations +/// +/// **Note**: Unless otherwise noted in the function specific documentation, +/// +/// - **An Error is returned** if function arguments have unmatching bitwidths. +/// - The functions do **not** allocate memory. +/// - The function works for both signed and unsigned interpretations of an `ApInt`. In other words, in the low-level bit-wise representation there is no difference between a signed and unsigned operation by a certain function on fixed bit-width integers. (Cite: LLVM) +impl ApInt { + /// Returns `true` if this `ApInt` represents the value zero (`0`). + /// + /// # Note + /// + /// - Zero (`0`) is also called the additive neutral element. + /// - This operation is more efficient than comparing two instances of `ApInt` + #[inline] + pub fn is_zero(&self) -> bool { + match self.access_data() { + DataAccess::Inl(digit) => digit.is_zero(), + DataAccess::Ext(digits) => { + digits.iter().all(|digit| digit.is_zero()) + } + } + } + + /// Returns `true` if the **unsigned** interpretation `ApInt` represents the value one (`1`). + /// + /// # Corner Case + /// + /// Normally, both signed and unsigned `ApInt`s have the same representation for the value one. + /// For the signed interpretation of `ApInt`s with a bitwidth of 1, the sign bit is the only + /// bit, meaning that it can only represent negative one and zero. This function treats the + /// `ApInt` as unsigned in this corner case. + /// + /// # Note + /// + /// - One (`1`) is also called the multiplicative neutral element. + /// - This operation is more efficient than comparing two instances of `ApInt` + #[inline] + pub fn is_one(&self) -> bool { + match self.access_data() { + DataAccess::Inl(digit) => digit == Digit::one(), + DataAccess::Ext(digits) => { + let (last, rest) = digits.split_last().unwrap(); + last.is_one() && rest.iter().all(|digit| digit.is_zero()) + } + } + } + + pub fn is_unsigned_max_value(&self) -> bool { + let mask = Digit::ONES >> self.width().unused_bits(); + match self.access_data() { + DataAccess::Inl(digit) => digit == mask, + DataAccess::Ext(digits) => { + let (first, rest) = digits.split_first().unwrap(); + *first == mask && rest.iter().all(|digit| digit.is_all_set()) + } + } + } + + pub fn is_signed_max_value(&self) -> bool { + let unused = self.width().unused_bits(); + let mask = if unused == Digit::BITS - 1 { + Digit::ZERO + } else { + Digit::ONES >> unused + }; + match self.access_data() { + DataAccess::Inl(digit) => digit == mask, + DataAccess::Ext(digits) => { + let (first, rest) = digits.split_first().unwrap(); + *first == mask && rest.iter().all(|digit| digit.is_all_set()) + } + } + } + + pub fn is_unsigned_min_value(&self) -> bool { + self.is_zero() + } + + pub fn is_signed_min_value(&self) -> bool { + let mask = Digit::ONE << (self.width().excess_bits().unwrap_or(Digit::BITS) - 1); + match self.access_data() { + DataAccess::Inl(digit) => digit == mask, + DataAccess::Ext(digits) => { + let (first, rest) = digits.split_first().unwrap(); + *first == mask && rest.iter().all(|digit| digit.is_all_set()) + } + } + } + + /// Returns `true` if this `ApInt` represents an even number. + /// Equivalent to testing if the least significant bit is zero. + #[inline] + pub fn is_even(&self) -> bool { + !self.least_significant_bit() + } + + /// Returns `true` if this `ApInt` represents an odd number. + /// Equivalent to testing if the least significant bit is one. + #[inline] + pub fn is_odd(&self) -> bool { + self.least_significant_bit() + } + + /// Returns `true` if the **signed** representation of this `ApInt` is positive. + /// Equivalent to testing if the most significant bit is zero. + #[inline] + pub fn is_positive(&self) -> bool { + !self.most_significant_bit() + } + + /// Returns `true` if the **signed** representation of this `ApInt` is negative. + /// Equivalent to testing if the most significant bit is one. + #[inline] + pub fn is_negative(&self) -> bool { + self.most_significant_bit() + } + + /// Unsigned less-than (`ult`) comparison between `self` and `rhs`, meaning the returned boolean + /// indicates if `self < rhs` for the **unsigned** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_ult(&self, rhs: &ApInt) -> Result { + match self + .zip_access_data(rhs) + .map_err(|err| err.with_annotation(format!( + "Error occured on unsigned less-than (slt) comparison with `lhs < rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + ))? + { + ZipDataAccess::Inl(lhs, rhs) => { + Ok(lhs.repr() < rhs.repr()) + } + ZipDataAccess::Ext(lhs, rhs) => { + for (l, r) in lhs.iter().rev() + .zip(rhs.iter().rev()) + { + match l.cmp(r) { + Ordering::Less => return Ok(true), + Ordering::Greater => return Ok(false), + Ordering::Equal => () + } + } + Ok(false) + } + } + } + + /// Unsigned less-equals (`ule`) comparison between `self` and `rhs`, meaning the returned + /// boolean indicates if `self <= rhs` for the **unsigned** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_ule(&self, rhs: &ApInt) -> Result { + rhs.checked_ult(self).map(Not::not) + .map_err(|err| err.with_annotation(format!( + "Error occured on unsigned less-than or equals (ule) comparison with `lhs <= rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } + + /// Unsigned greater-than (`ugt`) comparison between `self` and `rhs`, meaning the returned + /// boolean indicates if `self > rhs` for the **unsigned** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_ugt(&self, rhs: &ApInt) -> Result { + rhs.checked_ult(self) + .map_err(|err| err.with_annotation(format!( + "Error occured on unsigned greater-than (ugt) comparison with `lhs > rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } + + /// Unsigned greater-equals (`uge`) comparison between `self` and `rhs`, meaning the returned + /// boolean indicates if `self >= rhs` for the **unsigned** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_uge(&self, rhs: &ApInt) -> Result { + self.checked_ult(rhs).map(Not::not) + .map_err(|err| err.with_annotation(format!( + "Error occured on unsigned greater-than or equals (ule) comparison with `lhs >= rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } + + /// Signed less-than (`slt`) comparison between `self` and `rhs`, meaning the returned boolean + /// indicates if `self < rhs` for the **signed** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_slt(&self, rhs: &ApInt) -> Result { + let lhs = self; + lhs.zip_access_data(rhs).and_then(|zipped| { + match zipped { + ZipDataAccess::Inl(lhs, rhs) => { + let infate_abs = Digit::BITS - rhs.width().to_usize(); + let lhs = (lhs.repr() << infate_abs) as i64; + let rhs = (rhs.repr() << infate_abs) as i64; + Ok(lhs < rhs) + } + ZipDataAccess::Ext(_, _) => { + match (lhs.sign_bit(), rhs.sign_bit()) { + (false, false) => lhs.checked_ult(rhs), + (false, true) => Ok(false), + (true, false) => Ok(true), + (true, true) => rhs.checked_ugt(lhs) + } + } + } + }) + .map_err(|err| err.with_annotation(format!( + "Error occured on signed less-than (slt) comparison with `lhs < rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } + + /// Signed less-equals (`sle`) comparison between `self` and `rhs`, meaning the returned boolean + /// indicates if `self <= rhs` for the **signed** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_sle(&self, rhs: &ApInt) -> Result { + rhs.checked_slt(self).map(Not::not) + .map_err(|err| err.with_annotation(format!( + "Error occured on signed less-than or equals (ule) comparison with `lhs <= rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } + + /// Signed greater-than (`sgt`) comparison between `self` and `rhs`, meaning the returned + /// boolean indicates if `self > rhs` for the **signed** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_sgt(&self, rhs: &ApInt) -> Result { + rhs.checked_slt(self) + .map_err(|err| err.with_annotation(format!( + "Error occured on signed greater-than (ugt) comparison with `lhs > rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } + + /// Signed greater-equals (`sge`) comparison between `self` and `rhs`, meaning the returned + /// boolean indicates if `self >= rhs` for the **signed** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn checked_sge(&self, rhs: &ApInt) -> Result { + self.checked_slt(rhs).map(Not::not) + .map_err(|err| err.with_annotation(format!( + "Error occured on signed greater-than or equals (ule) comparison with `lhs >= rhs` where \ + \n\tlhs = {:?}\ + \n\trhs = {:?}", + self, rhs) + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + mod partial_eq { + use super::*; + + #[test] + fn simple_small() { + let a = ApInt::from_u8(42); + let b = ApInt::from_u8(42); + let c = ApInt::from_u8(77); + let d = ApInt::from_u16(42); + assert_eq!(a, b); + assert_ne!(a, c); + assert_ne!(a, d); + assert_ne!(b, c); + assert_ne!(b, d); + assert_ne!(c, d); + } + + #[test] + fn simple_large() { + let a = ApInt::from_u128(42); + let b = ApInt::from_u128(42); + let c = ApInt::from_u128(1337); + let d = ApInt::from_u64(42); + assert_eq!(a, b); + assert_ne!(a, c); + assert_ne!(a, d); + assert_ne!(b, c); + assert_ne!(b, d); + assert_ne!(c, d); + } + } +} diff --git a/src/logic/div.rs b/src/logic/div.rs new file mode 100644 index 0000000..7467ed4 --- /dev/null +++ b/src/logic/div.rs @@ -0,0 +1,1378 @@ +// The functions should not be split up more. +#![allow(clippy::cognitive_complexity)] + +use crate::data::{ApInt, ZipDataAccessMutSelf::{Inl, Ext}, ZipDataAccessMutBoth, Digit, DoubleDigit}; +use crate::info::{Error, Result, DivOp}; +use crate::logic::{try_forward_bin_mut_impl}; + +/// Divides `duo` by `div` and sets `duo` to the quotient and `div` to the remainder. +/// Assumptions: +/// - ini_duo_sd > 0 +/// - div_sd == 0 +fn large_div_by_small(duo: &mut [Digit], ini_duo_sd: usize, div: &mut Digit) { + // long division algorithm + let (mut quo,mut rem) = duo[ini_duo_sd].wrapping_divrem(*div); + duo[ini_duo_sd] = quo; + for duo_sd_sub1 in (0..ini_duo_sd).rev() { + let duo_double = DoubleDigit::from_lo_hi(duo[duo_sd_sub1],rem); + let tmp = duo_double.wrapping_divrem((*div).dd()); + // the high part is guaranteed to zero out when this is subtracted, + // so only the low parts need to be used + quo = tmp.0.lo(); + rem = tmp.1.lo(); + duo[duo_sd_sub1] = quo; + } + *div = rem; +} + +/// Divides `duo` by `div` and sets `duo` to the quotient and `div` to the remainder. +/// Assumptions: +/// - ini_duo_sd > 0 +/// - div_sd == 0 +/// - div[0].leading_zeros() >= (Digit::BITS / 2) +fn large_div_by_u32(duo: &mut [Digit], ini_duo_sd: usize, div: &mut Digit) { + // long division algorithm + let div_u32 = (*div).repr() as u32; + fn dd(x: u32) -> Digit {Digit(u64::from(x))} + fn lo(x: Digit) -> u32 {x.repr() as u32} + fn hi(x: Digit) -> u32 {(x.repr() >> 32) as u32} + fn from_lo_hi(lo: u32, hi: u32) -> Digit {Digit(u64::from(lo) | (u64::from(hi) << 32))} + fn wrapping_divrem(x: u32, y: u32) -> (u32,u32) {(x.wrapping_div(y),x.wrapping_rem(y))} + let (mut quo_hi,mut rem_hi) = wrapping_divrem(hi(duo[ini_duo_sd]),div_u32); + let duo_double = from_lo_hi(lo(duo[ini_duo_sd]), rem_hi); + let tmp = duo_double.wrapping_divrem(dd(div_u32)); + let mut quo_lo = lo(tmp.0); + let mut rem_lo = lo(tmp.1); + duo[ini_duo_sd] = from_lo_hi(quo_lo,quo_hi); + for duo_sd_sub1 in (0..ini_duo_sd).rev() { + let duo_double_hi = from_lo_hi(hi(duo[duo_sd_sub1]),rem_lo); + let temp_hi = duo_double_hi.wrapping_divrem(dd(div_u32)); + quo_hi = lo(temp_hi.0); + rem_hi = lo(temp_hi.1); + let duo_double_lo = from_lo_hi(lo(duo[duo_sd_sub1]),rem_hi); + let temp_lo = duo_double_lo.wrapping_divrem(dd(div_u32)); + quo_lo = lo(temp_lo.0); + rem_lo = lo(temp_lo.1); + duo[duo_sd_sub1] = from_lo_hi(quo_lo,quo_hi); + } + *div = Digit(u64::from(rem_lo)); +} + +// Note: This will be refactored to not use macros in the future. Macros are used for being able to +// change the temporary buffer types easily + +/// Sets the `$array` to be the two's complement of itself, all the way up to `$len` (exclusive) +macro_rules! twos_complement { + ($len:expr, $array:ident) => { + for i0 in 0..$len { + let bitnot = !$array[i0]; + match bitnot.overflowing_add(Digit::one()) { + (v,false) => { + $array[i0] = v; + for i1 in (i0 + 1)..$len { + $array[i1] = !$array[i1] + } + break; + } + (v,true) => { + $array[i0] = v; + } + } + } + }; +} + +/// Unsigned Greater or Equal to. +/// This checks for `$lhs >= $rhs`, checking only up to $lhs_len and $rhs_len (exclusive) +/// respectively, and runs `$ge_branch` if true and `$lt_branch` otherwise +macro_rules! uge { + ($lhs_len:expr, + $lhs:ident, + $rhs_len:expr, + $rhs:ident, + $ge_branch:block, + $lt_branch:block) => { + // the purpose of this macro is to allow for $lhs and $rhs to be different lengths + let mut inconclusive = true; + let mut b = true; + if $rhs_len <= $lhs_len { + for i in $rhs_len..$lhs_len { + if $lhs[i] != Digit::zero() { + inconclusive = false; + b = true; + break + } + } + if inconclusive { + for i in (0..$lhs_len).rev() { + if $lhs[i] < $rhs[i] { + b = false; + break + } else if $lhs[i] != $rhs[i] { + break + } + } + } + } else { + for i in $lhs_len..$rhs_len { + if $rhs[i] != Digit::zero() { + inconclusive = false; + b = false; + break + } + } + if inconclusive { + for i in (0..$rhs_len).rev() { + if $lhs[i] < $rhs[i] { + b = false; + break + } else if $lhs[i] != $rhs[i] { + break + } + } + } + } + if b {$ge_branch} else {$lt_branch} + }; +} + +/// Unsigned Greater Than. +/// This checks for `$lhs > $rhs`, checking only up to $lhs_len and $rhs_len (exclusive) +/// respectively, and runs `$gt_branch` if true and `$le_branch` otherwise +macro_rules! ugt { + ( + $lhs_len:expr, + $lhs:ident, + $rhs_len:expr, + $rhs:ident, + $gt_branch:block, + $le_branch:block + ) => { + // the purpose of this macro is to allow for $lhs and $rhs to be different lengths + let mut inconclusive = true; + let mut b = false; + if $rhs_len <= $lhs_len { + for i in $rhs_len..$lhs_len { + if $lhs[i] != Digit::zero() { + inconclusive = false; + b = true; + break + } + } + if inconclusive { + for i in (0..$lhs_len).rev() { + if $rhs[i] < $lhs[i] { + b = true; + break + } else if $lhs[i] != $rhs[i] { + break + } + } + } + } else { + for i in $lhs_len..$rhs_len { + if $rhs[i] != Digit::zero() { + inconclusive = false; + b = false; + break + } + } + if inconclusive { + for i in (0..$rhs_len).rev() { + if $rhs[i] < $lhs[i] { + b = true; + break + } else if $lhs[i] != $rhs[i] { + break + } + } + } + } + if b {$gt_branch} else {$le_branch} + }; +} + +/// Assigns `$sum + $sub` to `$target` (`sub` is intended to be the two's complement of some value), +/// and zeros out `$sum` except for it sets `$sum[0]` to `$val` +macro_rules! special0 { + ($len:expr,$sum:ident,$sub:ident,$target:ident,$val:expr) => {{ + //subtraction + let (sum, mut carry) = $sum[0].carrying_add($sub[0]); + $target[0] = sum; + for i in 1..($len-1) { + let tmp = $sum[i].dd() + .wrapping_add($sub[i].dd()) + .wrapping_add(carry.dd()); + $target[i] = tmp.lo(); + $sum[i].unset_all(); + carry = tmp.hi(); + } + $target[$len-1] = $sum[$len-1] + .wrapping_add($sub[$len-1]) + .wrapping_add(carry); + $sum[$len-1].unset_all(); + //set $val + $sum[0] = $val; + }} +} + +/// Assigns `$sum + $sub` to `$target` (up to `$sum_len`), +/// and assigns `$val + $add` to `$sum` (up to `$add_len`). +/// Assumes that the actual slice length of `$sum` >= `$add_len`. +macro_rules! special1 { + ($sum_len:expr,$sum:ident,$sub:ident,$target:ident,$val:expr,$add_len:expr,$add:ident) => {{ + let (tmp, mut carry) = $sum[0].carrying_add($sub[0]); + $target[0] = tmp; + for i in 1..($sum_len-1) { + let tmp = $sum[i].dd() + .wrapping_add($sub[i].dd()) + .wrapping_add(carry.dd()); + $target[i] = tmp.lo(); + carry = tmp.hi(); + } + $target[$sum_len-1] = $sum[$sum_len-1] + .wrapping_add($sub[$sum_len-1]) + .wrapping_add(carry); + // second assignment + let (tmp, mut carry) = $add[0].carrying_add($val); + $sum[0] = tmp; + for i0 in 1..$add_len { + if carry == Digit::zero() { + for i1 in i0..$add_len { + $sum[i1] = $add[i1]; + break + } + } + let tmp = $add[i0].carrying_add(carry); + $sum[i0] = tmp.0; + carry = tmp.1; + } + for i0 in $add_len..$sum_len { + $sum[i0].unset_all(); + } + }} +} + +/// assigns `$sum + $add` to `$sum`, using only the digits up to `$len` (exclusive) +macro_rules! add { + ($len:expr,$sum:ident,$add:ident) => {{ + let (sum, mut carry) = $sum[0].carrying_add($add[0]); + $sum[0] = sum; + for i in 1..($len-1) { + let tmp = $sum[i].dd() + .wrapping_add($add[i].dd()) + .wrapping_add(carry.dd()); + $sum[i] = tmp.lo(); + carry = tmp.hi(); + } + $sum[$len-1] = $sum[$len-1] + .wrapping_add($add[$len-1]) + .wrapping_add(carry); + }} +} + +/// the `mul` or `mul - 1` algorithm with addition from `quo` +fn mul_or_mulsub1_algorithm ( + len: usize, + duo: &mut [Digit], + duo_sd: usize, + duo_sig_dd: DoubleDigit, + div: &mut [Digit], + div_sig_dd: DoubleDigit, + quo: &[Digit] +) { + // Allocation could be avoided, but if it turns out `mul - 1` should be used, more + // long division would have to occur to recover `div`, followed by a second long + // multiplication with `mul - 1`. + + //this will become `-(div * mul)` + let mul = duo_sig_dd.wrapping_div(div_sig_dd).lo(); + // this will become `-(div * mul)` + let mut sub: Vec = Vec::with_capacity(len); + // first digit done and carry + let mut carry = Digit::ZERO; + for i in 0..=duo_sd { + let tmp = mul.carrying_mul_add(div[i], carry); + sub.push(tmp.0); + carry = tmp.1; + } + let sub_len = sub.len(); + // The case where `-(div * mul)` overflows the digits availiable in the slices is handled + // automatically here + let mut b = true; + if carry == Digit::zero() { + ugt!(sub_len, sub, sub_len, duo, + { + b = true; + }, + { + b = false; + } + ); + } + if b { + // quotient = `quo + (mult - 1)` + // remainder = `duo + (div - (div * mul))` + twos_complement!(sub_len, sub); + add!(sub_len,sub,div); + special1!(sub_len,duo,sub,div,mul.wrapping_sub(Digit::one()),quo.len(),quo); + for i in sub_len..=duo_sd { + duo[i].unset_all(); + } + } else { + // quotient = `quo + mul` + // remainder = `duo - (div * mult)` + twos_complement!(sub_len, sub); + special1!(sub_len,duo,sub,div,mul,quo.len(),quo); + } +} + +/// Special long division algorithm core. This adds `mul << bits` to `quo`, and subtracts +/// `(mul * div) << bits` from `duo`. +/// Assumptions: +/// - ini_duo_sd > 0 +/// - div_sd == 0 +fn special_long_division_core( + bits: usize, + duo: &mut [Digit], + duo_sd: usize, + duo_sig_dd: DoubleDigit, + div: &mut [Digit], + div_sig_d_add1: DoubleDigit, + quo: &mut [Digit] +) { + // bits_ll is the number of lesser bits in the digit that contains both lesser + // and greater bits + let (digits, bits_ll) = (bits / Digit::BITS, bits % Digit::BITS); + + // Unfortunately, `mul` here can be up to (2^2n - 1)/(2^(n-1)), where `n` + // is the number of bits in a `Digit`. This means that an `n+1` bit + // integer is needed to store mul. Because only one extra higher bit is involved, + // the algebraic simplification `(mul + 2^n)*div` to `mul*div + div*2^n` can be + // used when that highest bit is set. This just requires faster and simpler + // addition unrolling hell instead of long multiplication unrolling hell. + let mul = duo_sig_dd.wrapping_div(div_sig_d_add1); + + // add `mul << bits` to `quo` + // no unrolling hell here because `bits_ll < n` and it takes a shift of `n` to overflow + let split_mul = mul << bits_ll; + let (tmp, mut carry) = split_mul.lo().carrying_add(quo[digits]); + quo[digits] = tmp; + let tmp = split_mul.hi().dd() + .wrapping_add(quo[digits + 1].dd()) + .wrapping_add(carry.dd()); + quo[digits + 1] = tmp.lo(); + carry = tmp.hi(); + for i in (digits+2)..quo.len() { + if carry == Digit::ZERO {break} + let tmp = quo[i].carrying_add(carry); + quo[i] = tmp.0; + carry = tmp.1; + } + + // Subtracts `(mul * div) << bits` from `duo`. + // `mul` can be stored in a `Digit` plus one bit. The first block handles if that extra + // bit is not set, and the second block handles if the bit is set. + if mul.hi() == Digit::zero() { + let mul = mul.lo(); + // carry for bits that wrap across digit boundaries when `<< bits_ll` applied + let (tmp0, mut wrap_carry) = (div[0].dd() << bits_ll).lo_hi(); + // the regular multiplication carry + let (tmp1, mut mul_carry) = mul.dd().wrapping_mul(tmp0.dd()).lo_hi(); + // this carry includes the two's complement increment carry + let (tmp2, mut add_carry) = (!tmp1).dd() + .wrapping_add(duo[digits].dd()) + .wrapping_add(Digit::one().dd()) + .lo_hi(); + duo[digits] = tmp2; + + for i in (digits + 1)..=duo_sd { + let tmp0 = ( + (div[i - digits].dd() << bits_ll) | wrap_carry.dd() + ).lo_hi(); + wrap_carry = tmp0.1; + let tmp1 = mul.dd() + .wrapping_mul(tmp0.0.dd()) + .wrapping_add(mul_carry.dd()) + .lo_hi(); + mul_carry = tmp1.1; + let tmp2 = (!tmp1.0).dd() + .wrapping_add(duo[i].dd()) + .wrapping_add(add_carry.dd()).lo_hi(); + add_carry = tmp2.1; + duo[i] = tmp2.0; + } + } else { + // | the first column is this loop prelude (the `x` is here because there is no + // | previous carry), and the each of the next columns is a new iteration of the loop + // v + // 2222x <- mul_carry + // 7987 <- div + // 3 <- mul (13) without high bit + // *_____ + // 23961 <- tmp0 + // + // 1111xx <- add0_carry + // 23961 <- tmp0 + // 7987 <- div shifted up by one digit (to handle the high bit in `mul`) + // +______ + // 103831 <- tmp1 + // + // add to duo with the negated tmp1 (with the carry from the two's complement + // being put into `wrap_carry`) and shifted (with `wrap_carry`) + + let mul = mul.lo(); + let (tmp0, mut mul_carry) = mul.carrying_mul(div[0]); + let tmp1 = tmp0; + let mut add0_carry = Digit::zero(); + //the increment from the two's complement can be stored in `wrap_carry` + let (tmp2, mut wrap_carry) = + ((!tmp1).dd().wrapping_add(Digit::one().dd()) << bits_ll).lo_hi(); + let (tmp3, mut add1_carry) = tmp2.carrying_add(duo[digits]); + duo[digits] = tmp3; + + for i in (digits + 1)..=duo_sd { + let tmp0 = + mul.dd() + .wrapping_mul(div[i - digits].dd()) + .wrapping_add(mul_carry.dd()); + mul_carry = tmp0.hi(); + let tmp1 = + tmp0.lo().dd() + .wrapping_add(div[i - digits - 1].dd()) + .wrapping_add(add0_carry.dd()); + add0_carry = tmp1.hi(); + let tmp2 = + ((!tmp1.lo()).dd() << bits_ll) + .wrapping_add(wrap_carry.dd()); + wrap_carry = tmp2.hi(); + let tmp3 = + tmp2.lo().dd() + .wrapping_add(duo[i].dd()) + .wrapping_add(add1_carry.dd()); + add1_carry = tmp3.hi(); + duo[i] = tmp3.lo(); + } + } +} + +// assumes that: +// - ini_duo_sd > 1 +// - div_sd > 1 +// - (`duo` / `div`) > 1 +fn large_div_by_large( + duo: &mut [Digit], // the dividend which will become the quotient + ini_duo_sd: usize, // the initial most significant digit of `duo` + ini_duo_sb: usize, // the number of significant bits in `duo` + ini_duo_lz: usize, // the number of leading zeros in `duo[ini_duo_sd]` + div: &mut [Digit], // the divisor which will become the remainder + div_sd: usize, // the most significant digit of `div` + div_sb: usize, // the number of significant bits in `div` + div_lz: usize // the number of leading zeros in `div[div_sd]` +) { + // initial difference between the places of the most significant bits + let ini_diff_sb = ini_duo_sb - div_sb; + + // An unrolled `mul` or `mul - 1` branch is not used here unlike in `specialized_div_rem`. + + let mut duo_sd = ini_duo_sd; + let mut duo_lz = ini_duo_lz; + // the number of lesser significant bits not a part of the greater `div_sig_d` bits + let div_lesser_bits = Digit::BITS - (div_lz as usize) + (Digit::BITS * (div_sd - 1)); + // the most significant `Digit` bits of div + let div_sig_d = if div_lz == 0 { + div[div_sd] + } else { + (div[div_sd] << div_lz) | (div[div_sd - 1] >> (Digit::BITS - div_lz)) + }; + // has to be a `DoubleDigit` in case of overflow + let div_sig_d_add1 = div_sig_d.dd().wrapping_add(Digit::one().dd()); + let mut duo_lesser_bits; + let mut duo_sig_dd; + let quo_potential = (ini_diff_sb / Digit::BITS) + 1; + let mut quo: Vec = vec![Digit::zero(); quo_potential as usize]; + loop { + duo_lesser_bits = (Digit::BITS - (duo_lz as usize)) + (Digit::BITS * (duo_sd - 2)); + duo_sig_dd = if duo_lz == 0 { + DoubleDigit::from_lo_hi(duo[duo_sd - 1],duo[duo_sd]) + } else { + (duo[duo_sd].dd() << (duo_lz + Digit::BITS)) | + (duo[duo_sd - 1].dd() << duo_lz) | + (duo[duo_sd - 2].dd() >> (Digit::BITS - duo_lz)) + }; + if div_lesser_bits <= duo_lesser_bits { + let bits = duo_lesser_bits - div_lesser_bits; + special_long_division_core(bits, duo, duo_sd, duo_sig_dd, div, div_sig_d_add1, &mut quo); + } else { + let div_sig_dd = if duo_lz == 0 { + // avoid shr overflow + DoubleDigit::from_lo_hi(div[duo_sd - 1], div[duo_sd]) + } else { + (div[duo_sd].dd() << (duo_lz + Digit::BITS)) | + (div[duo_sd - 1].dd() << duo_lz) | + (div[duo_sd - 2].dd() >> (Digit::BITS - duo_lz)) + }; + mul_or_mulsub1_algorithm( + duo.len(), + duo, + duo_sd, + duo_sig_dd, + div, + div_sig_dd, + &quo + ); + return + } + // find the new `duo_sd` + for i in (0..=duo_sd).rev() { + if duo[i] != Digit::zero() { + duo_sd = i; + break + } + if i == 0 { + // quotient = `quo` + // remainder = 0 + duo[..quo.len()].copy_from_slice(&quo[..]); + for i in 0..=div_sd { + div[i] = Digit::zero(); + } + return + } + } + duo_lz = duo[duo_sd].leading_zeros() as usize; + let duo_sb = (duo_sd * Digit::BITS) + (Digit::BITS - duo_lz); + + // `quo` should have 0 added to it branch + if div_sb > duo_sb { + //quotient = `quo` + //remainder = `duo` + div[..=duo_sd].copy_from_slice(&duo[..=duo_sd]); + for i in (duo_sd + 1)..=div_sd { + div[i].unset_all(); + } + duo[..quo.len()].copy_from_slice(&quo[..]); + for i in quo.len()..=duo_sd { + duo[i].unset_all(); + } + return + } + + // `quo` should have 0 or 1 added to it branch + if duo_sb == div_sb { + let place = duo_sd + 1; + // if `div <= duo` + uge!(place,duo,place,div, + { + // quotient = `quo + 1` + // remainder = `duo - div` + twos_complement!(place,div); + add!(place,div,duo); + for i0 in 0..quo.len() { + match quo[i0].overflowing_add(Digit::one()) { + (v,false) => { + duo[i0] = v; + duo[(i0 + 1)..quo.len()].copy_from_slice(&quo[(i0 + 1)..]); + for i1 in quo.len()..place { + duo[i1].unset_all(); + } + return + } + (v,true) => { + duo[i0] = v; + } + } + } + for i in quo.len()..place { + duo[i].unset_all(); + } + return + }, + { + // quotient = `quo` + // remainder = `duo` + div[..place].copy_from_slice(&duo[..place]); + duo[..quo.len()].copy_from_slice(&quo[..]); + for i in quo.len()..place { + duo[i].unset_all(); + } + return + } + ); + } + + // This can only happen if `div_sd == 2` (because of previous "quo = 0 or 1" + // branches), but it is not worth it to unroll further. + if duo_sd < 2 { + // quotient = `quo + mul` + // remainder = `rem` + // simple division and addition + let duo_dd = DoubleDigit::from_lo_hi(duo[0],duo[1]); + let div_dd = DoubleDigit::from_lo_hi(div[0],div[1]); + let (mul, rem) = duo_dd.wrapping_divrem(div_dd); + div[0] = rem.lo(); + div[1] = rem.hi(); + let (tmp, mut carry) = quo[0].carrying_add(mul.lo()); + duo[0] = tmp; + let tmp = quo[1].dd() + .wrapping_add(mul.hi().dd()) + .wrapping_add(carry.dd()); + duo[1] = tmp.lo(); + carry = tmp.hi(); + for i0 in 2..quo.len() { + if carry == Digit::zero() { + duo[i0..quo.len()].copy_from_slice(&quo[i0..]); + return + } + let tmp = quo[i0].carrying_add(carry); + duo[i0] = tmp.0; + carry = tmp.1; + } + return + } + } +} + +/// # Division Operations +/// +/// **Note**: Unless otherwise noted in the function specific documentation, +/// +/// - **An error is returned** If division by zero is attempted or function arguments have unmatching bitwidths. +/// +/// - The functions **may allocate** memory. +/// +/// - The function works for only the signed or unsigned, but not both interpretations of an +/// `ApInt`. In other words, in the low-level bit-wise representation there is a difference +/// between a signed and unsigned operation by the function on fixed bit-width integers. +/// +/// - The divisions round towards zero and behave exactly like Rust's integers. There are +/// [more than 10 ways](https://en.wikipedia.org/wiki/Rounding) one might want to round to an +/// integer, however. This example makes the rounding floored: +/// +/// ``` +/// use apint::ApInt; +/// let mut lhs = ApInt::from(7i8); +/// let mut rhs = ApInt::from(-2i8); +/// +/// let b = lhs.is_negative() != rhs.is_negative(); +/// ApInt::wrapping_sdivrem_assign(&mut lhs, &mut rhs).unwrap(); +/// if b && !rhs.is_zero() { +/// lhs.wrapping_dec(); +/// // Some more operations can be added here, based on if the new remainder or a euclidean +/// // modulo is needed +/// } +/// +/// assert_eq!(lhs, ApInt::from(-4i8)); +/// ``` +/// +/// The divisions can be made to round any way by using a combination of `is_negative`, +/// `is_odd`, `is_zero`, `wrapping_dec`, `wrapping_inc`, and more. +/// +/// - There are edge cases involving `ApInt::signed_min_value(WIDTH)`. Functions (such as the +/// rounding function above) can break when using this value, and there is an overflow corner +/// case for signed division: +/// `ApInt::signed_min_value(WIDTH).into_wrapping_sdiv(&ApInt::one(WIDTH).into_wrapping_neg())`, +/// which overflows to `ApInt::signed_min_value(WIDTH)`. This matches the behavior of Rust's +/// `wrapping_div` functions on signed machine integers, e.g. `(-128i8).wrapping_div(-1i8)`. +/// +/// Note regarding "divrem" and "remdiv" functions: +/// In almost all integer division algorithms where "just" the quotient is calculated, the remainder +/// is also produced and actually exists in memory (or at least is only one 𝒪(n) operation away) +/// prior to being dropped or overwritten, and vice versa for remainder only calculations. Note here +/// that functions with `div` in their names (e.g. `wrapping_div`) should really be called `quo` +/// (quotient) functions, because the division process produces both the quotient and remainder. +/// However, to stay with Rust's naming scheme we have kept `div` naming. The instruction for +/// division on many CPUs sets registers to both results of the division process, and compilers will +/// detect if code uses both results and only use one division instruction. The compiler probably +/// does not realize this for the `ApInt` division process, and thus the `divrem` and `remdiv` type +/// instructions exist to explicitly use just one division function for both results. +/// +/// ## Performance +/// +/// All of the division functions in this `impl` quickly check for various edge cases and use an +/// efficient algorithm for these cases. +/// Small here means both small ApInt `BitWidth` and/or small signed or +/// unsigned numerical significance. +/// +/// - division by zero (no allocation, the inputs are left unmodified) +/// - division of zero by any size integer (no allocation) +/// - any division that will lead to the quotient being zero or one (no allocation) +/// - division of small (1 `Digit`) integers (no allocation) +/// - division of any integer by small (1 `Digit`) very small (0.5 `Digit`) integers (no allocation) +/// - division where the number of leading zeros of both arguments are within one `Digit` (less +/// allocation than what long division normally requires) +/// - during long division, the algorithm may encounter a case from above and will use that instead +/// - division of medium size (<= 512 bits) integers +/// +/// Currently, algorithms faster than 𝒪(n^2) are not implemented, so large integer division may be +/// very slow compared to other algorithms. +/// +/// Currently, there is just one internal division function that is optimized for the +/// `udivrem` kind of function instead of `uremdiv`. In the future, there is planned a better +/// implementation for the second one. It was found during designing the first implementation that +/// this future one should be slightly faster than the current one (noticable for small to +/// medium size `ApInt`s) because of the way `lhs` is subtracted. +impl ApInt { + // **Note:** The invariant of `ApInt`s where unused bits beyond the bit width must be all zero is + // used heavily here, so that no `clear_unused_bits` needs to be used. + + /// Dividing function. `duo` is divided by `div`, the quotient is assigned to `duo`, and the + /// remainder is assigned to `div`. `false` is returned if division by zero happened. Nothing is + /// modified in the case of division by zero. + pub(crate) fn algorithm_divrem(duo: &mut [Digit], div: &mut [Digit]) -> bool { + // Some parts were put into their own functions and macros because indentation levels were + // getting too high, even for me. + + // The algorithm here is just like the algorithm in + // https://github.com/AaronKutch/specialized-div-rem, + // except that there are more branches and preconditions. There are comments in these + // functions such as `// quotient is 0 or 1 branch` which correspond to the comments in + // `specialized-div-rem`. + + // Special cases are aggressively taken care of throughout this function, both because + // the core long division algorithm does not work on many edges, and because of optimization. + // This match finds the most significant non zeroes, checks for `duo` < `div`, and checks for + // division by zero. + match div.iter().rposition(|x| x != &Digit::zero()) { + // `div_sd` is the most significant nonzero `div` digit, which never changes + Some(div_sd) => { + // `ini_duo_sd` is the initial most significant nonzero duo digit + let ini_duo_sd: usize = match duo.iter().rposition(|x| x != &Digit::zero()) { + Some(x) => x, + None => { + // quotient = 0 + // remainder = 0 + // duo is already 0 + for x in div.iter_mut() { + x.unset_all() + } + return true + }, + }; + + // this is placed to handle the smallest inputs quickly + if div_sd == 0 { + // smaller division branch + if ini_duo_sd == 0 { + let tmp = duo[0].wrapping_divrem(div[0]); + duo[0] = tmp.0; + div[0] = tmp.1; + return true + } + if (div[0].leading_zeros() as usize) >= (Digit::BITS / 2) { + // regular long division branch + // faster than `large_div_by_small` + large_div_by_u32(duo,ini_duo_sd, &mut div[0]); + return true + } else { + // regular long division branch + large_div_by_small(duo, ini_duo_sd, &mut div[0]); + return true + } + } + + // leading zeros of the most significant digit of the initial value of `duo` + let ini_duo_lz = duo[ini_duo_sd].leading_zeros() as usize; + // leading zeros of the most significant digit of `div` + let div_lz = div[div_sd].leading_zeros() as usize; + // initial number of significant bits of `duo` + let ini_duo_sb = (ini_duo_sd * Digit::BITS) + (Digit::BITS - ini_duo_lz); + // initial number of significant bits of `div` + let div_sb = (div_sd * Digit::BITS) + (Digit::BITS - div_lz); + + // quotient is 0 branch (but `ini_duo_sb == div_sb` is not checked yet) + if ini_duo_sb < div_sb { + // quotient = 0 + // remainder = `duo` + for i in 0..=ini_duo_sd { + div[i] = duo[i]; + duo[i].unset_all(); + } + for i in (ini_duo_sd + 1)..=div_sd { + div[i].unset_all(); + } + return true + } + + // quotient is 0 or 1 branch + if ini_duo_sb == div_sb { + let place = ini_duo_sd + 1; + uge!(place,duo,place,div, + { + // quotient = 1 + // remainder = `duo` - `div` + twos_complement!(place,div); + special0!(place,duo,div,div,Digit::one()); + return true + }, + { + // quotient = 0 + // remainder = `duo` + for i in 0..place { + div[i] = duo[i]; + duo[i].unset_all(); + } + return true + } + ); + } + + // `ini_duo_sd` is 1 branch + // ini_duo_sd cannot be 0 or 1 for `large_div_by_large` + if ini_duo_sd == 1 { + let tmp = DoubleDigit::from_lo_hi(duo[0], duo[1]) + .wrapping_divrem(DoubleDigit::from_lo_hi(div[0],div[1])); + duo[0] = tmp.0.lo(); + duo[1] = tmp.0.hi(); + div[0] = tmp.1.lo(); + div[1] = tmp.1.hi(); + return true + } + + large_div_by_large( + duo, + ini_duo_sd, + ini_duo_sb, + ini_duo_lz, + div, + div_sd, + div_sb, + div_lz + ); + true + }, + None => false, + } + } + + /// This function is intended to be inlined into all of the unsigned quotient and remainder + /// functions for optimal assembly. + /// `duo` is divided by `div`, and the remainder is assigned to `duo` and quotient assigned + /// to `div` + /// `false` is returned if division by zero happened. Nothing is modified in the case of + /// division by zero. + #[inline] + pub(crate) fn algorithm_remdiv(duo: &mut [Digit], div: &mut [Digit]) -> bool { + if ApInt::algorithm_divrem(duo, div) { + let mut tmp; + for i in 0..duo.len() { + tmp = duo[i]; + duo[i] = div[i]; + div[i] = tmp; + } + true + } else { + false + } + } + + /// Divides `lhs` by `rhs` using **unsigned** interpretation and sets `lhs` equal to the + /// quotient and `rhs` equal to the remainder. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_udivrem_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { + match ApInt::zip_access_data_mut_both(lhs, rhs)? { + ZipDataAccessMutBoth::Inl(duo,div) => { + if *div != Digit::zero() { + let tmp = duo.wrapping_divrem(*div); + *duo = tmp.0; + *div = tmp.1; + return Ok(()) + } + } + ZipDataAccessMutBoth::Ext(duo,div) => { + if ApInt::algorithm_divrem(duo, div) { + return Ok(()) + } + } + } + // Note that the typical places `Err` `Ok` are returned is switched. This is because + // `rhs.is_zero()` is found as part of finding `duo_sd` inside `aarons_algorithm_divrem`, + // and `lhs.clone()` cannot be performed inside the match statement + Err(Error::division_by_zero(DivOp::UnsignedDivRem, lhs.clone())) + } + + /// Divides `lhs` by `rhs` using **unsigned** interpretation and sets `lhs` equal to the + /// remainder and `rhs` equal to the quotient. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_uremdiv_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { + match ApInt::zip_access_data_mut_both(lhs, rhs)? { + ZipDataAccessMutBoth::Inl(duo,div) => { + if *div != Digit::zero() { + let tmp = duo.wrapping_divrem(*div); + *duo = tmp.1; + *div = tmp.0; + return Ok(()) + } + } + ZipDataAccessMutBoth::Ext(duo,div) => { + if ApInt::algorithm_remdiv(duo, div) { + return Ok(()) + } + } + } + Err(Error::division_by_zero(DivOp::UnsignedRemDiv, lhs.clone())) + } + + /// Quotient-assigns `lhs` by `rhs` inplace using **unsigned** interpretation. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_udiv_assign(&mut self, rhs: &ApInt) -> Result<()> { + match self.zip_access_data_mut_self(rhs)? { + Inl(duo, div) => { + if !div.is_zero() { + *duo = duo.wrapping_div(div); + return Ok(()) + } + } + Ext(duo, div) => { + if ApInt::algorithm_divrem(duo, &mut div.to_vec()[..]) { + return Ok(()) + } + } + } + Err(Error::division_by_zero(DivOp::UnsignedDiv, self.clone())) + } + + /// Divides `lhs` by `rhs` using **unsigned** interpretation and returns the quotient. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_wrapping_udiv(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_udiv_assign) + } + + /// Remainder-assigns `lhs` by `rhs` inplace using **unsigned** interpretation. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_urem_assign(&mut self, rhs: &ApInt) -> Result<()> { + match self.zip_access_data_mut_self(rhs)? { + Inl(duo, div) => { + if !div.is_zero() { + *duo = duo.wrapping_rem(div); + return Ok(()) + } + } + Ext(duo, div) => { + if ApInt::algorithm_remdiv(duo, &mut div.to_vec()[..]) { + return Ok(()) + } + } + } + Err(Error::division_by_zero(DivOp::UnsignedRem, self.clone())) + } + + /// Divides `lhs` by `rhs` using **unsigned** interpretation and returns the remainder. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_wrapping_urem(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_urem_assign) + } + + /// Divides `lhs` by `rhs` using **signed** interpretation and sets `lhs` equal to the + /// quotient and `rhs` equal to the remainder. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_sdivrem_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { + if rhs.is_zero() { + return Err(Error::division_by_zero(DivOp::SignedDivRem, lhs.clone())) + } + let (negate_lhs, negate_rhs) = match ((*lhs).is_negative(), (*rhs).is_negative()) { + (false,false) => (false,false), + (true,false) => { + lhs.wrapping_neg(); + (true, true) + }, + (false,true) => { + rhs.wrapping_neg(); + (true, false) + }, + (true,true) => { + lhs.wrapping_neg(); + rhs.wrapping_neg(); + (false, true) + }, + }; + ApInt::wrapping_udivrem_assign(lhs, rhs).unwrap(); + if negate_lhs {lhs.wrapping_neg()} + if negate_rhs {rhs.wrapping_neg()} + //clearing unused bits is handled by `wrapping_neg()` + Ok(()) + } + + /// Divides `lhs` by `rhs` using **signed** interpretation and sets `lhs` equal to the + /// remainder and `rhs` equal to the quotient. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_sremdiv_assign(lhs: &mut ApInt, rhs: &mut ApInt) -> Result<()> { + if rhs.is_zero() { + return Err(Error::division_by_zero(DivOp::SignedRemDiv, lhs.clone())) + } + let (negate_lhs, negate_rhs) = match ((*lhs).is_negative(), (*rhs).is_negative()) { + (false,false) => (false,false), + (true,false) => { + lhs.wrapping_neg(); + (true, true) + }, + (false,true) => { + rhs.wrapping_neg(); + (false, true) + }, + (true,true) => { + lhs.wrapping_neg(); + rhs.wrapping_neg(); + (true, false) + }, + }; + ApInt::wrapping_uremdiv_assign(lhs, rhs).unwrap(); + if negate_lhs {lhs.wrapping_neg()} + if negate_rhs {rhs.wrapping_neg()} + //clearing unused bits is handled by `wrapping_neg()` + Ok(()) + } + + /// Quotient-assigns `lhs` by `rhs` inplace using **signed** interpretation. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_sdiv_assign(&mut self, rhs: &ApInt) -> Result<()> { + if rhs.is_zero() { + return Err(Error::division_by_zero(DivOp::SignedDiv, self.clone())) + } + let mut rhs_clone = (*rhs).clone(); + let negate_lhs = match ((*self).is_negative(), rhs_clone.is_negative()) { + (false,false) => false, + (true,false) => { + self.wrapping_neg(); + true + }, + (false,true) => { + rhs_clone.wrapping_neg(); + true + }, + (true,true) => { + self.wrapping_neg(); + rhs_clone.wrapping_neg(); + false + }, + }; + ApInt::wrapping_udivrem_assign(self, &mut rhs_clone).unwrap(); + if negate_lhs {self.wrapping_neg()} + //clearing unused bits is handled by `wrapping_neg()` + Ok(()) + } + + /// Divides `self` by `rhs` using **signed** interpretation and returns the quotient. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_wrapping_sdiv(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_sdiv_assign) + } + + /// Remainder-assigns `lhs` by `rhs` inplace using **signed** interpretation. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_srem_assign(&mut self, rhs: &ApInt) -> Result<()> { + if rhs.is_zero() { + return Err(Error::division_by_zero(DivOp::SignedRem, self.clone())) + } + let mut rhs_clone = (*rhs).clone(); + let negate_lhs = match ((*self).is_negative(), rhs_clone.is_negative()) { + (false,false) => false, + (true,false) => { + self.wrapping_neg(); + true + }, + (false,true) => { + rhs_clone.wrapping_neg(); + false + }, + (true,true) => { + self.wrapping_neg(); + rhs_clone.wrapping_neg(); + true + }, + }; + ApInt::wrapping_uremdiv_assign(self, &mut rhs_clone).unwrap(); + if negate_lhs {self.wrapping_neg()} + //clearing unused bits is handled by `wrapping_neg()` + Ok(()) + } + + /// Divides `self` by `rhs` using **signed** interpretation and returns the remainder. + /// + /// # Errors + /// + /// - If `rhs.is_zero()` + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn into_wrapping_srem(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_srem_assign) + } +} + +// Note: these tests only work if multiplication and a few other functions work +#[cfg(test)] +mod tests { + use super::*; + use crate::info::BitWidth; + use std::u64; + + /// This is mainly to determine that signed division is negating properly, and that errors are + /// correct. + #[test] + fn small() { + /// Does all of the small division tests. + /// + /// - `$signed`: if the functions are signed divisions or not + /// - `$fun_assign`: a division function such as `wrapping_udiv_assign` with that + /// signature + /// - `$fun_into`: a division function such as `into_wrapping_udiv` with that signature + /// - `$fun`: a division function such as `wrapping_udivrem_assign` with that signature + /// - `$r0`: the quotient or remainder or both of 80 by 7, depending on division + /// function type + /// - `$r1`, `$r2`, `$r3`: 80 by -7, -80 by 7, -80 by -7. These can be 0 if `$signed` is + /// false. + macro_rules! s { + ($signed:expr,$fun_assign:ident,$fun_into:ident,$r0:expr,$r1:expr,$r2:expr,$r3:expr/*,$div_op:ident*/) => { + //TODO: add division by zero testing after error refactoring is finished + /*match $fun_assign + match ApInt::from(123u8).$fun_into(&ApInt::from(0u8)) { + Err(Error{kind: ErrorKind::DivisionByZero{op: DivOp::$div_op, lhs: x}, message: _, annotation: _}) => { + assert_eq!(x,ApInt::from(123u8)); + }, + _ => unreachable!(), + } + match ApInt::from(12345678912345689123456789123456789u128).*/ + { + let lhs = ApInt::from(80i8); + let rhs = ApInt::from(7i8); + let mut tmp = lhs.clone(); + tmp.$fun_assign(&rhs).unwrap(); + assert_eq!(tmp, ApInt::from($r0)); + assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r0)); + } + if $signed { + { + let lhs = ApInt::from(80i8); + let rhs = ApInt::from(-7i8); + let mut tmp = lhs.clone(); + tmp.$fun_assign(&rhs).unwrap(); + assert_eq!(tmp, ApInt::from($r1)); + assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r1)); + } + { + let lhs = ApInt::from(-80i8); + let rhs = ApInt::from(7i8); + let mut tmp = lhs.clone(); + tmp.$fun_assign(&rhs).unwrap(); + assert_eq!(tmp, ApInt::from($r2)); + assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r2)); + } + { + let lhs = ApInt::from(-80i8); + let rhs = ApInt::from(-7i8); + let mut tmp = lhs.clone(); + tmp.$fun_assign(&rhs).unwrap(); + assert_eq!(tmp, ApInt::from($r3)); + assert_eq!(lhs.$fun_into(&rhs).unwrap(), ApInt::from($r3)); + } + } + }; + ($signed:expr,$fun:ident,$r0:expr,$r1:expr,$r2:expr,$r3:expr/*,$div_op:ident*/) => { + { + let mut lhs = ApInt::from(80i8); + let mut rhs = ApInt::from(7i8); + ApInt::$fun(&mut lhs, &mut rhs).unwrap(); + assert_eq!(lhs, ApInt::from($r0.0)); + assert_eq!(rhs, ApInt::from($r0.1)); + } + if $signed { + { + let mut lhs = ApInt::from(80i8); + let mut rhs = ApInt::from(-7i8); + ApInt::$fun(&mut lhs, &mut rhs).unwrap(); + assert_eq!(lhs, ApInt::from($r1.0)); + assert_eq!(rhs, ApInt::from($r1.1)); + } + { + let mut lhs = ApInt::from(-80i8); + let mut rhs = ApInt::from(7i8); + ApInt::$fun(&mut lhs, &mut rhs).unwrap(); + assert_eq!(lhs, ApInt::from($r2.0)); + assert_eq!(rhs, ApInt::from($r2.1)); + } + { + let mut lhs = ApInt::from(-80i8); + let mut rhs = ApInt::from(-7i8); + ApInt::$fun(&mut lhs, &mut rhs).unwrap(); + assert_eq!(lhs, ApInt::from($r3.0)); + assert_eq!(rhs, ApInt::from($r3.1)); + } + } + } + } + s!(false,wrapping_udiv_assign,into_wrapping_udiv,11i8,0,0,0); + s!(false,wrapping_urem_assign,into_wrapping_urem,3i8,0,0,0); + s!(true,wrapping_sdiv_assign,into_wrapping_sdiv,11i8,-11i8,-11i8,11i8); + s!(true,wrapping_srem_assign,into_wrapping_srem,3i8,3i8,-3i8,-3i8); + s!(false,wrapping_udivrem_assign,(11i8,3i8),(0,0),(0,0),(0,0)); + s!(false,wrapping_uremdiv_assign,(3i8,11i8),(0,0),(0,0),(0,0)); + s!(true,wrapping_sdivrem_assign,(11i8,3i8),(-11i8,3i8),(-11i8,-3i8),(11i8,-3i8)); + s!(true,wrapping_sremdiv_assign,(3i8,11i8),(3i8,-11i8),(-3i8,-11i8),(-3i8,11i8)); + } + + /// Shifts small numbers up, for easier debugging. + #[test] + fn large_shifts() { + let resize = [ + 7usize, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 137, 200, 255, + 256, 700, 907, 1024, 2018, 2019, + ]; + let lhs_shl = [ + 0usize, 1, 0, 1, 4, 6, 4, 10, 13, 0, 31, 25, 7, 17, 32, 50, 0, 64, 249, 8, 777, 0, + 900, 0, + ]; + let rhs_shl = [ + 0usize, 0, 1, 1, 3, 5, 4, 14, 10, 0, 0, 25, 0, 18, 32, 49, 100, 64, 0, 256, 64, + 900, 1000, 0, + ]; + for (i, _) in resize.iter().enumerate() { + let lhs = ApInt::from(5u8) + .into_zero_resize(BitWidth::new(resize[i]).unwrap()) + .into_wrapping_shl(lhs_shl[i]) + .unwrap(); + let rhs = ApInt::from(11u8) + .into_zero_resize(BitWidth::new(resize[i]).unwrap()) + .into_wrapping_shl(rhs_shl[i]) + .unwrap(); + let zero = ApInt::from(0u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); + let one = ApInt::from(1u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); + let product = lhs.clone().into_wrapping_mul(&rhs).unwrap(); + assert_eq!(zero.clone().into_wrapping_udiv(&lhs).unwrap(), zero); + assert_eq!(zero.clone().into_wrapping_udiv(&rhs).unwrap(), zero); + assert_eq!(lhs.clone().into_wrapping_udiv(&one).unwrap(), lhs); + assert_eq!(rhs.clone().into_wrapping_udiv(&one).unwrap(), rhs); + assert_eq!(lhs.clone().into_wrapping_udiv(&lhs).unwrap(), one); + assert_eq!(rhs.clone().into_wrapping_udiv(&rhs).unwrap(), one); + let tmp = product.clone().into_wrapping_udiv(&lhs).unwrap(); + if tmp != rhs { + panic!("lhs_shl:{:?}\nrhs_shl:{:?}\nlhs:{:?}\nrhs:{:?}\n={:?}\ntemp:{:?}",lhs_shl[i],rhs_shl[i],lhs,rhs,product,tmp); + } + assert_eq!(product.clone().into_wrapping_udiv(&rhs).unwrap(), lhs); + assert_eq!(zero.clone().into_wrapping_urem(&lhs).unwrap(), zero); + assert_eq!(zero.clone().into_wrapping_urem(&rhs).unwrap(), zero); + assert_eq!(lhs.clone().into_wrapping_urem(&one).unwrap(), zero); + assert_eq!(rhs.clone().into_wrapping_urem(&one).unwrap(), zero); + assert_eq!(lhs.clone().into_wrapping_urem(&lhs).unwrap(), zero); + assert_eq!(rhs.clone().into_wrapping_urem(&rhs).unwrap(), zero); + assert_eq!(product.clone().into_wrapping_urem(&lhs).unwrap(), zero); + assert_eq!(product.clone().into_wrapping_urem(&rhs).unwrap(), zero); + assert_eq!(product.clone().into_wrapping_add(&one).unwrap().into_wrapping_urem(&lhs).unwrap(), one); + assert_eq!(product.clone().into_wrapping_add(&one).unwrap().into_wrapping_urem(&rhs).unwrap(), one); + } + } + + /// This tests a number of problematic cases found during development + #[test] + fn special() { + assert_eq!( + ApInt::from(123u8) + .into_wrapping_udiv(&ApInt::from(7u8)).unwrap(), + ApInt::from(17u8)); + assert_eq!( + ApInt::from([9223372019674906879u64,18446743523953745919]) + .into_wrapping_urem(&ApInt::from([1u64,18446744073709550592])).unwrap(), + ApInt::from([1u64,18446734727860984831]) + ); + assert_eq!( + ApInt::from([9223372019674906879u64,18446743523953745919]) + .into_wrapping_urem(&ApInt::from([1u64,18446744073709550592])).unwrap(), + ApInt::from([1u64,18446734727860984831]) + ); + assert_eq!( + ApInt::from([0u64,0,0,123]) + .into_wrapping_udiv(&ApInt::from([0u64,0,0,7])).unwrap(), + ApInt::from([0u64,0,0,17])); + assert_eq!( + ApInt::from([0u64,0,0,0]) + .into_wrapping_udiv(&ApInt::from([0u64,0,0,7])).unwrap(), + ApInt::from([0u64,0,0,0])); + assert_eq!( + ApInt::from([0u64,0,0,3]) + .into_wrapping_udiv(&ApInt::from([0u64,0,0,7])).unwrap(), + ApInt::from([0u64,0,0,0])); + assert_eq!( + ApInt::from([0u64,0,0,0]) + .into_wrapping_udiv(&ApInt::from([0u64,7,0,0])).unwrap(), + ApInt::from([0u64,0,0,0])); + assert_eq!( + ApInt::from([0u64,0,0,7]) + .into_wrapping_udiv(&ApInt::from([0u64,4,0,0])).unwrap(), + ApInt::from([0u64,0,0,0])); + assert_eq!( + ApInt::from([0u64,0,3,0]) + .into_wrapping_udiv(&ApInt::from([0u64,4,0,0])).unwrap(), + ApInt::from([0u64,0,0,0])); + assert_eq!( + ApInt::from([0u64,1,0,0]) + .into_wrapping_udiv(&ApInt::from([0u64,0,0,4])).unwrap(), + ApInt::from([0u64,0,u64::MAX / 4 + 1,0])); + assert_eq!( + ApInt::from([0u64,1,0,0]) + .into_wrapping_udiv(&ApInt::from([0u64,0,1,0])).unwrap(), + ApInt::from([0u64,0,1,0])); + assert_eq!( + ApInt::from([1u64,2,3,4]) + .into_wrapping_udiv(&ApInt::from([1u64,2,3,4])).unwrap(), + ApInt::from([0u64,0,0,1])); + assert_eq!( + ApInt::from([0u64,1,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX]) + .into_wrapping_udiv(&ApInt::from([0u64,0,0,0,0,0,0,2])).unwrap() + ,ApInt::from([0u64,0,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX,u64::MAX])); + assert_eq!( + ApInt::from([u64::MAX,u64::MAX - 1,1,u64::MAX - 1,u64::MAX - 1,2,u64::MAX - 1,1]) + .into_wrapping_udiv(&ApInt::from([0,0,0,0,u64::MAX,u64::MAX,0,u64::MAX])).unwrap(), + ApInt::from([0,0,0,0,u64::MAX,u64::MAX,0,u64::MAX]) + ); + assert_eq!(ApInt::from(61924494876344321u128).into_wrapping_urem(&ApInt::from(167772160u128)).unwrap(),ApInt::from(1u128)); + assert_eq!(ApInt::from([18446744073709551615u64, 18446744073709551615, 1048575, 18446462598732840960]).into_wrapping_urem(&ApInt::from([0u64, 0, 140668768878592, 0])).unwrap(), ApInt::from([0,0, 136545601323007, 18446462598732840960u64])); + assert_eq!(ApInt::from([1u64, 17293821508111564796, 2305843009213693952]).into_wrapping_urem(&ApInt::from([0u64,1,18446742978492891132])).unwrap(),ApInt::from([0u64,0,0])); + assert_eq!(ApInt::from([1u64,18446744073692774368,268435456]).into_wrapping_add(&ApInt::from([0u64,1,18446744073709519359])).unwrap().into_wrapping_udiv(&ApInt::from([0u64,1,18446744073709551584])).unwrap(),ApInt::from([0u64,0,18446744073701163008])); + assert_eq!(ApInt::from([1u64,18446744073692774368,268435456]).into_wrapping_udiv(&ApInt::from([0u64,1,18446744073709551584])).unwrap(),ApInt::from([0u64,0,18446744073701163008])); + assert_eq!(ApInt::from([18446744073709551615u64,18446744073709551615,18446739675663040512,2199023255552]).into_wrapping_urem(&ApInt::from([18446744073709551615u64,18446744073709551615,18446739675663040512,2199023255552])).unwrap(),ApInt::from([0u64,0,0,0])); + assert_eq!(ApInt::from([1u64,18446462598730776592,1047972020113]).into_wrapping_udiv(&ApInt::from([0u64,16383,18446744056529682433])).unwrap(),ApInt::from([0u64,0,2251782633816065])); + assert_eq!(ApInt::from([54467619767447688u64, 18446739675392512496, 5200531536562092095, 18446744073709551615]).into_wrapping_udiv(&ApInt::from([0u64, 8255, 18446462598732840960, 0])).unwrap(), ApInt::from([0u64,0, 6597337677824, 288230376151678976])); + assert_eq!(ApInt::from([0u64, 35184372080640, 0]).into_wrapping_mul(&ApInt::from([0u64,0,1048575])).unwrap().into_wrapping_add(&ApInt::from([0u64, 123456789, 0])).unwrap().into_wrapping_urem(&ApInt::from([0u64, 35184372080640, 0])).unwrap(),ApInt::from([0u64,123456789,0])); + } +} diff --git a/src/logic/fuzz.rs b/src/logic/fuzz.rs new file mode 100644 index 0000000..0155723 --- /dev/null +++ b/src/logic/fuzz.rs @@ -0,0 +1,395 @@ +// A note about the insane amount of cloning here: a lot of this is just unrelated one time +// assertions and we do not want potential invariant bugs to propagate down a long sequence. +// The insane amount of unwrapping is done here instead of returning `Result<(), Error>` from all +// the functions, because it removes the ability to see what line an error came from. + +#[cfg(test)] +mod tests { + use crate::data::ApInt; + use crate::info::BitWidth; + + mod general_regressions { + use super::*; + use std::u64; + + #[test] + fn pull_request_35_regression() { + let width = BitWidth::new(65).unwrap(); + //arithmetic shift right shift + assert_eq!( + ApInt::from([1u64, u64::MAX - (1 << 6)]).into_truncate(width).unwrap(), + ApInt::from([1u64, u64::MAX - (1 << 10)]).into_truncate(width).unwrap() + .into_wrapping_ashr(4).unwrap() + ); + //multiplication related + let v1 = ApInt::from((1u128 << 64) | (7u128)).into_zero_resize(width); + let v2 = ApInt::one(BitWidth::w1()).into_zero_extend(width).unwrap().into_wrapping_shl(64).unwrap(); + let v3 = v1.clone().into_wrapping_mul(&v2).unwrap(); + assert_eq!(v1, ApInt::from([1u64,7]).into_zero_resize(width)); + assert_eq!(v2, ApInt::from([1u64,0]).into_zero_resize(width)); + assert_eq!(v3, ApInt::from([1u64,0]).into_zero_resize(width)); + let width = BitWidth::new(193).unwrap(); + let v3 = ApInt::from([0u64, 0, 17179852800, 1073676288]).into_zero_resize(width).into_wrapping_mul(&ApInt::from(1u128 << 115).into_zero_resize(width)).unwrap(); + assert_eq!(v3, ApInt::from([0u64, 0, 17179852800, 1073676288]).into_wrapping_shl(115).unwrap().into_zero_resize(width)); + } + } + + mod megafuzz { + use super::*; + use std::u64; + use rand::random; + + //throws all the functions together for an identities party. If one function is incorrect, + //the whole thing should break. + fn identities_unary(size: usize, width: BitWidth, lhs: ApInt) { + let shift = random::() % size; + // basic addition and subtraction tests + // subtracts one below and one above `lhs` + let mut temp = lhs.clone().into_wrapping_inc(); + assert_eq!(temp, lhs.clone().into_wrapping_add(&ApInt::one(width)).unwrap()); + assert_eq!(temp, lhs.clone().into_wrapping_sub(&ApInt::all_set(width)).unwrap()); + temp.wrapping_dec(); + assert_eq!(temp, lhs); + temp.wrapping_dec(); + assert_eq!(temp, lhs.clone().into_wrapping_sub(&ApInt::one(width)).unwrap()); + assert_eq!(temp, lhs.clone().into_wrapping_add(&ApInt::all_set(width)).unwrap()); + temp.wrapping_inc(); + assert_eq!(temp, lhs); + + // rotation tests + let rotated_left = if shift == 0 { + lhs.clone() + } else { + lhs.clone().into_wrapping_shl(shift).unwrap() | (&lhs.clone().into_wrapping_lshr(size - shift).unwrap()) + }; + assert_eq!(rotated_left, lhs.clone().into_rotate_left(shift).unwrap()); + let rotated_right = if shift == 0 { + lhs.clone() + } else { + lhs.clone().into_wrapping_lshr(shift).unwrap() | (&lhs.clone().into_wrapping_shl(size - shift).unwrap()) + }; + assert_eq!(rotated_right, lhs.clone().into_rotate_right(shift).unwrap()); + + // unsigned power of two multiplication shifting + let mut tmp1 = ApInt::one(BitWidth::w1()).into_zero_extend(width).unwrap().into_wrapping_shl(shift).unwrap(); + assert_eq!( + lhs.clone().into_wrapping_shl(shift).unwrap(), + lhs.clone().into_wrapping_mul(&tmp1).unwrap() + ); + + // signed power of two multiplication shifting + assert_eq!( + lhs.clone().into_wrapping_neg().into_wrapping_shl(shift).unwrap(), + lhs.clone().into_wrapping_mul( + &ApInt::one(BitWidth::w1()).into_sign_extend(width).unwrap().into_wrapping_shl(shift).unwrap() + ).unwrap() + ); + + // unsigned power of two division shifting + assert_eq!( + lhs.clone().into_wrapping_lshr(shift).unwrap(), + lhs.clone().into_wrapping_udiv(&tmp1).unwrap() + ); + + // signed power of two division shifting + if (tmp1 == ApInt::signed_min_value(width)) && (lhs == ApInt::signed_min_value(width)) { + // numerical corner case where the result of the shift is -1 but the division ends + // up as +1 + assert_eq!( + lhs.clone().into_wrapping_ashr(shift).unwrap(), + ApInt::all_set(width) + ); + assert_eq!( + lhs.clone().into_wrapping_sdiv(&tmp1).unwrap(), + ApInt::one(width) + ); + } else { + let mut tmp0 = lhs.clone(); + // make it a floored division, and note that `b` is set to `tmp0.is_negative()` + // instead of `tmp0.is_negative() != tmp1.is_negative()` because of how the `tmp1` + // division behaves at shift = width - 1 + let b = tmp0.is_negative(); + ApInt::wrapping_sdivrem_assign(&mut tmp0, &mut tmp1).unwrap(); + if b && !tmp1.is_zero() { + tmp0.wrapping_dec(); + } + + assert_eq!(tmp0, lhs.clone().into_wrapping_ashr(shift).unwrap()); + } + } + + fn identities_binary(size: usize, width: BitWidth, zero: &ApInt, lhs: ApInt, rhs: ApInt, third: ApInt) { + let rand_width = BitWidth::new((random::() % size) + 1).unwrap(); + let wrapping_add = lhs.clone().into_wrapping_add(&rhs).unwrap(); + + //overflowing addition test + let mut overflowing_uadd = lhs.clone(); + let uoverflow = overflowing_uadd.overflowing_uadd_assign(&rhs).unwrap(); + assert_eq!(wrapping_add, overflowing_uadd); + if lhs.checked_uge(&rhs).unwrap() { + assert_eq!(wrapping_add.checked_ult(&lhs).unwrap(), uoverflow); + } else { + assert_eq!(wrapping_add.checked_ult(&rhs).unwrap(), uoverflow); + } + + let mut overflowing_sadd = lhs.clone(); + let soverflow = overflowing_sadd.overflowing_sadd_assign(&rhs).unwrap(); + assert_eq!(wrapping_add, overflowing_sadd); + if lhs.is_negative() == rhs.is_negative() { + assert!((overflowing_sadd.is_negative() != lhs.is_negative()) == soverflow); + } else { + assert!(!soverflow); + } + + // Wrapping multiplication test. Note: this just tests that the nonwrapping and + // wrapping multiplication branches correspond to each other, but not that the + // multiplications in general are working. The test for that is in `mul.rs`. + assert_eq!( + lhs.clone() + .into_zero_extend(BitWidth::new(size * 2).unwrap()).unwrap() + .into_wrapping_mul( + &rhs.clone() + .into_zero_extend(BitWidth::new(size * 2).unwrap()).unwrap() + ).unwrap() + .into_zero_resize(rand_width), + lhs.clone() + .into_wrapping_mul(&rhs).unwrap() + .into_zero_resize(rand_width) + ); + + // overflowing multiplication test + let tmp0 = lhs.clone().into_zero_extend(BitWidth::new(size * 2).unwrap()).unwrap() + .into_wrapping_mul( + &rhs.clone().into_zero_extend(BitWidth::new(size * 2).unwrap()).unwrap() + ).unwrap(); + let mut tmp1 = lhs.clone(); + let overflow = tmp1.overflowing_umul_assign(&rhs).unwrap(); + // first check that overflow is working + assert_eq!((size * 2) - tmp0.leading_zeros() > size, overflow); + // check that the result is equal to what the wrapping mul produces + assert_eq!(tmp0.into_truncate(width).unwrap(), tmp1); + + //multiplication and division tests + //the following tests that `((lhs * rhs) + (third % rhs)) / rhs == lhs` and + //`((lhs * rhs) + (third % rhs)) % rhs == (third % rhs)` + let tot_leading_zeros = lhs.leading_zeros() + rhs.leading_zeros(); + //this trims down `lhs` until overflow will not happen + let anti_overflow_mask = if tot_leading_zeros < size { + if rhs.leading_zeros() == 0 { + ApInt::zero(width) + } else { + ApInt::one(BitWidth::w1()).into_sign_extend(rhs.leading_zeros()).unwrap().into_zero_extend(width).unwrap() + } + } else { + ApInt::one(BitWidth::w1()).into_sign_extend(width).unwrap() + }; + let mul = (lhs.clone() & &anti_overflow_mask).into_wrapping_mul(&rhs).unwrap(); + if rhs != *zero { + let rem = third.clone().into_wrapping_urem(&rhs).unwrap(); + let mut temp0 = mul.clone(); + if !temp0.overflowing_uadd_assign(&rem).unwrap() { + let mut temp1 = rhs.clone(); + let mul_plus_rem = temp0.clone(); + ApInt::wrapping_udivrem_assign(&mut temp0, &mut temp1).unwrap(); + if temp0 != (lhs.clone() & &anti_overflow_mask) {panic!("wrong div\nlhs:{:?}\nrhs:{:?}\nthird:{:?}\nrem:{:?}\nmul:{:?}\nmul_plus_rem:{:?}\ntemp0:{:?}\ntemp1:{:?}",(lhs.clone() & &anti_overflow_mask),rhs,third,rem,mul,mul_plus_rem,temp0,temp1)} + if temp1 != rem {panic!("wrong rem\nlhs:{:?}\nrhs:{:?}\nthird:{:?}\nrem:{:?}\nmul:{:?}\nmul_plus_rem:{:?}\ntemp0:{:?}\ntemp1:{:?}",(lhs.clone() & &anti_overflow_mask),rhs,third,rem,mul,mul_plus_rem,temp0,temp1)} + } + } + } + + //random length AND, XOR, and OR fuzzer; + fn fuzz_random(size: usize, iterations: usize) { + let width = BitWidth::new(size).unwrap(); + let mut lhs = ApInt::from(0u64).into_zero_resize(width); + let mut rhs = ApInt::from(0u64).into_zero_resize(width); + let mut third = ApInt::from(0u64).into_zero_resize(width); + let zero = ApInt::from(0u64).into_zero_resize(width); + for _ in 0..iterations { + //the `identities_` functions are very expensive so it makes sense to run this + //multiple times to cover a larger space + for _ in 0..12 { + let r0 = random::() % size; + let r1 = random::() % size; + let mask = if r0 == 0 { + ApInt::zero(BitWidth::new(size).unwrap()) + } else { + ApInt::one(BitWidth::new(1).unwrap()) + .into_sign_extend(r0).unwrap() + .into_zero_extend(width).unwrap() + .into_rotate_left(r1).unwrap() + }; + match random::() % 16 { + 0 => lhs |= &mask, + 1 => lhs &= &mask, + 2 => lhs ^= &mask, + 3 => lhs ^= &mask, + 4 => rhs |= &mask, + 5 => rhs &= &mask, + 6 => rhs ^= &mask, + 7 => rhs ^= &mask, + 8 => third |= &mask, + 9 => third &= &mask, + 10 => third ^= &mask, + 11 => third ^= &mask, + 12 => rhs |= &mask, + 13 => rhs &= &mask, + 14 => rhs ^= &mask, + 15 => rhs ^= &mask, + _ => unreachable!() + } + } + identities_unary(size, width, lhs.clone()); + identities_unary(size, width, rhs.clone()); + identities_unary(size, width, third.clone()); + identities_binary(size, width, &zero, lhs.clone(), rhs.clone(), third.clone()); + identities_binary(size, width, &zero, rhs.clone(), lhs.clone(), third.clone()); + identities_binary(size, width, &zero, lhs.clone(), lhs.clone(), rhs.clone()); + } + } + + //named so because nesting this causes an explosion in testing time + macro_rules! explode { + ($cd:ident, $temp:ident, $i_zero:ident, $i_one:ident, $inner:tt) => {{ + for $i_zero in 0..(2usize.pow(($cd * 2) as u32)) { + let mut $temp: Vec = Vec::with_capacity($cd); + for $i_one in 0..$cd { + match ($i_zero >> ($i_one * 2)) & 0b11 { + 0b0 => $temp.push(0), + 0b1 => $temp.push(1), + 0b10 => $temp.push(u64::MAX - 1), + 0b11 => $temp.push(u64::MAX), + _ => unreachable!() + } + } + $inner + } + }} + } + + //catch edge and corner cases involving 0, 1, Digit::MAX - 1, and Digit::MAX + fn fuzz_edge_unary(size: usize) { + let width = BitWidth::new(size).unwrap(); + let cd = + if (size % 64) == 0 {size / 64} + else {(size / 64) + 1}; + explode!(cd,temp0,i0,i1,{ + identities_unary(size, width, + ApInt::from_vec_u64(temp0.clone()).unwrap().into_truncate(size).unwrap()); + }) + } + + fn fuzz_edge_binary(size: usize) { + let width = BitWidth::new(size).unwrap(); + let zero = ApInt::from(0u64).into_zero_resize(width); + let cd = + if (size % 64) == 0 {size / 64} + else {(size / 64) + 1}; + explode!(cd,temp0,i0,i1, + {explode!(cd,temp1,i1,i2, + {explode!(cd,temp2,i2,i3, + {identities_binary(size, width, &zero, + ApInt::from_vec_u64(temp0.clone()).unwrap().into_truncate(size).unwrap(), + ApInt::from_vec_u64(temp1.clone()).unwrap().into_truncate(size).unwrap(), + ApInt::from_vec_u64(temp2.clone()).unwrap().into_truncate(size).unwrap());} + )} + )} + ) + } + + #[test] + fn fuzz_test_random() { + assert_eq!(ApInt::from_vec_u64(vec![32u64,234,23]).unwrap(),ApInt::from([32u64,234,23])); + let a = 10000; + fuzz_random(1, a); + fuzz_random(2, a); + fuzz_random(3, a); + fuzz_random(8, a); + //trying to catch edge cases by going one bit below and over + fuzz_random(31, a); + fuzz_random(32, a); + fuzz_random(33, a); + fuzz_random(63, a); + fuzz_random(64, a); + fuzz_random(65, a); + fuzz_random(100, a); + fuzz_random(127, a); + fuzz_random(128, a); + fuzz_random(129, a); + fuzz_random(150, a); + fuzz_random(191, a); + fuzz_random(192, a); + fuzz_random(193, a); + fuzz_random(200, a); + fuzz_random(255, a); + fuzz_random(256, a); + //this is for functions like `rotate_left_assign` which like to fail on many digits + fuzz_random(33*64, a); + } + + #[test] + fn fuzz_test_edge() { + fuzz_edge_unary(63); + fuzz_edge_unary(64); + fuzz_edge_unary(65); + fuzz_edge_unary(100); + fuzz_edge_unary(127); + fuzz_edge_unary(128); + fuzz_edge_unary(129); + fuzz_edge_unary(150); + fuzz_edge_unary(191); + fuzz_edge_unary(192); + fuzz_edge_unary(193); + fuzz_edge_unary(200); + fuzz_edge_unary(255); + fuzz_edge_unary(256); + fuzz_edge_binary(63); + fuzz_edge_binary(64); + fuzz_edge_binary(65); + fuzz_edge_binary(100); + fuzz_edge_binary(127); + fuzz_edge_binary(128); + fuzz_edge_binary(129); + fuzz_edge_binary(150); + fuzz_edge_binary(191); + fuzz_edge_binary(192); + } + + // TODO: use `rayon` to automatically split up this work among several threads + + //takes a long time to run, so this was split up into 4 tests for 4 threads + #[test] + #[ignore] + fn expensive_0() { + let a = 10000; + fuzz_random(301, a); + fuzz_random(512, a); + fuzz_random(777, a); + fuzz_random(64*5, a); + fuzz_random(16*64, a); + for _ in 0..1000 { + fuzz_random((random::() % (16 * 64)) + 1, 100); + } + for _ in 0..100 { + fuzz_edge_unary(random::() % (9 * 64) + 1) + } + } + + #[test] + #[ignore] + fn expensive_1() { + fuzz_edge_binary(193); + } + + #[test] + #[ignore] + fn expensive_2() { + fuzz_edge_binary(255); + } + + #[test] + #[ignore] + fn expensive_3() { + fuzz_edge_binary(256); + } + } +} \ No newline at end of file diff --git a/src/logic/mul.rs b/src/logic/mul.rs new file mode 100644 index 0000000..8531573 --- /dev/null +++ b/src/logic/mul.rs @@ -0,0 +1,567 @@ +use crate::data::{ApInt, Digit, DoubleDigit, ZipDataAccessMutSelf::{Inl, Ext}}; +use crate::info::Result; +#[cfg(test)] +use crate::info::Width; +use crate::logic::try_forward_bin_mut_impl; + +/// Multiply-assign `lhs` with `rhs` inplace. `lhs_sig_nonzero` and `rhs_sig_nonzero` indicate +/// the indexes of the most significant nonzero digits. +/// +/// Unused bits are **not** cleared by this function. +fn digitwise_wrapping_mul(lhs: &mut [Digit], rhs: &[Digit], len: usize) { + //wrapping (modular) multiplication + let sig_nonzero = len - 1; + //first digit done and carry + let tmp = lhs[0].carrying_mul(rhs[0]); + //the goal here with `sum` is to allocate and initialize it only once + //here. + //first row + let mut sum = Vec::with_capacity(len); + sum.push(tmp.0); + let mut mul_carry = tmp.1; + for rhs_i in 1..sig_nonzero { + let tmp = lhs[0].carrying_mul_add(rhs[rhs_i], mul_carry); + sum.push(tmp.0); + mul_carry = tmp.1; + } + //final digit of first row + sum.push(lhs[0].wrapping_mul_add(rhs[sig_nonzero], mul_carry)); + //middle rows + for lhs_i in 1..sig_nonzero { + //first digit of this row + let tmp0 = lhs[lhs_i].carrying_mul(rhs[0]); + mul_carry = tmp0.1; + let tmp1 = sum[lhs_i].carrying_add(tmp0.0); + //sum[lhs_i] does not need to be used again + sum[lhs_i] = tmp1.0; + let mut add_carry = tmp1.1; + //as we get to the higher lhs digits, the higher rhs digits do not + //need to be considered + let rhs_i_upper = sig_nonzero.wrapping_sub(lhs_i); + //middle digits of this row + for rhs_i in 1..rhs_i_upper { + let tmp0 = lhs[lhs_i].carrying_mul_add(rhs[rhs_i], mul_carry); + mul_carry = tmp0.1; + let tmp1: DoubleDigit = sum[lhs_i + rhs_i].dd() + .wrapping_add(tmp0.0.dd()) + .wrapping_add(add_carry.dd()); + sum[lhs_i + rhs_i] = tmp1.lo(); + add_carry = tmp1.hi(); + } + //final digit of this row + sum[sig_nonzero] = lhs[lhs_i] + .wrapping_mul(rhs[rhs_i_upper]) + .wrapping_add(mul_carry) + .wrapping_add(sum[sig_nonzero]) + .wrapping_add(add_carry); + } + lhs[..sig_nonzero].copy_from_slice(&sum[..sig_nonzero]); + //final digit (the only one in its row) + lhs[sig_nonzero] = lhs[sig_nonzero] + .wrapping_mul_add(rhs[0], sum[sig_nonzero]); +} + +/// Multiply-assign `lhs` with `rhs` inplace. `lhs_sig_nonzero` and `rhs_sig_nonzero` indicate +/// the indexes of the most significant nonzero digits. +/// +/// When calling this function, there must be no possibility of overflow of `lhs`. +/// +/// Unused bits are **not** cleared by this function. +fn digitwise_nonwrapping_mul( + lhs: &mut [Digit], + rhs: &[Digit], + len: usize, + lhs_sig_nonzero: usize, + rhs_sig_nonzero: usize +) { + match (lhs_sig_nonzero == 0, rhs_sig_nonzero == 0) { + (false, false) => { + // For several routines below there was a nested loop that had its first and last + // iterations unrolled (and the unrolled loops had their first and last iterations + // unrolled), and then some if statements are added for digit overflow checks. + // This is done because the compiler probably cannot properly unroll the carry + // system, overflow system, and figure out that only `Digit` multiplications were + // needed instead of `DoubleDigit` multiplications in some places. + + //first digit of first row + let mult = lhs[0]; + let tmp = mult.carrying_mul(rhs[0]); + //middle digits of first row + //the goal here with `sum` is to allocate and initialize it only once + //here. + let mut sum = Vec::with_capacity(lhs_sig_nonzero + rhs_sig_nonzero + 2); + sum.push(tmp.0); + let mut mul_carry = tmp.1; + for rhs_i in 1..rhs_sig_nonzero { + let tmp = mult.carrying_mul_add(rhs[rhs_i], mul_carry); + sum.push(tmp.0); + mul_carry = tmp.1; + } + let tmp = mult.carrying_mul_add(rhs[rhs_sig_nonzero], mul_carry); + sum.push(tmp.0); + sum.push(tmp.1); + //middle rows + for lhs_i in 1..lhs_sig_nonzero { + let mult = lhs[lhs_i]; + //first digit of this row + let tmp0 = mult.carrying_mul(rhs[0]); + let mut mul_carry = tmp0.1; + let tmp1 = sum[lhs_i].carrying_add(tmp0.0); + sum[lhs_i] = tmp1.0; + let mut add_carry = tmp1.1; + //middle digits of this row + for rhs_i in 1..rhs_sig_nonzero { + let tmp0 = mult.carrying_mul_add(rhs[rhs_i], mul_carry); + mul_carry = tmp0.1; + let tmp1: DoubleDigit = sum[lhs_i + rhs_i].dd() + .wrapping_add(tmp0.0.dd()) + .wrapping_add(add_carry.dd()); + sum[lhs_i + rhs_i] = tmp1.lo(); + add_carry = tmp1.hi(); + } + //final digits of this row + let tmp0 = mult.carrying_mul_add(rhs[rhs_sig_nonzero],mul_carry); + let tmp1: DoubleDigit = sum[lhs_i + rhs_sig_nonzero].dd() + .wrapping_add(tmp0.0.dd()) + .wrapping_add(add_carry.dd()); + sum[lhs_i + rhs_sig_nonzero] = tmp1.lo(); + sum.push(tmp1.hi().wrapping_add(tmp0.1)); + } + let mult = lhs[lhs_sig_nonzero]; + //first digit of final row + let tmp0 = mult.carrying_mul(rhs[0]); + let mut mul_carry = tmp0.1; + let tmp1 = sum[lhs_sig_nonzero].carrying_add(tmp0.0); + sum[lhs_sig_nonzero] = tmp1.0; + let mut add_carry = tmp1.1; + //middle digits of final row + for rhs_i in 1..rhs_sig_nonzero { + let tmp0 = mult.carrying_mul_add(rhs[rhs_i], mul_carry); + mul_carry = tmp0.1; + let tmp1: DoubleDigit = sum[lhs_sig_nonzero + rhs_i].dd() + .wrapping_add(tmp0.0.dd()) + .wrapping_add(add_carry.dd()); + sum[lhs_sig_nonzero + rhs_i] = tmp1.lo(); + add_carry = tmp1.hi(); + } + + // final two digits + let tmp0 = mult.carrying_mul_add(rhs[rhs_sig_nonzero], mul_carry); + let tmp1: DoubleDigit = sum[lhs_sig_nonzero + rhs_sig_nonzero].dd() + .wrapping_add(tmp0.0.dd()) + .wrapping_add(add_carry.dd()); + sum[lhs_sig_nonzero + rhs_sig_nonzero] = tmp1.lo(); + sum.push(tmp1.hi().wrapping_add(tmp0.1)); + + if lhs.len() < sum.len() { + lhs.copy_from_slice(&sum[..lhs.len()]); + } else { + lhs[..sum.len()].copy_from_slice(&sum[..]); + } + }, + (true, false) => { + let mult = lhs[0]; + //first digit done and carry + let tmp = mult.carrying_mul(rhs[0]); + lhs[0] = tmp.0; + let mut mul_carry = tmp.1; + //middle of row + for rhs_i in 1..rhs_sig_nonzero { + let tmp = mult.carrying_mul_add(rhs[rhs_i], mul_carry); + lhs[rhs_i] = tmp.0; + mul_carry = tmp.1; + } + //final digit + if rhs_sig_nonzero == len - 1 { + lhs[rhs_sig_nonzero] = mult + .wrapping_mul_add(rhs[rhs_sig_nonzero], mul_carry); + } else { + let tmp = mult.carrying_mul_add(rhs[rhs_sig_nonzero], mul_carry); + lhs[rhs_sig_nonzero] = tmp.0; + lhs[rhs_sig_nonzero + 1] = tmp.1; + } + }, + (false, true) => { + //first digit done and carry + let tmp = rhs[0].carrying_mul(lhs[0]); + lhs[0] = tmp.0; + let mut mul_carry = tmp.1; + //middle of row + for lhs_i in 1..lhs_sig_nonzero { + let tmp = rhs[0].carrying_mul_add(lhs[lhs_i], mul_carry); + lhs[lhs_i] = tmp.0; + mul_carry = tmp.1; + } + //final digit + if lhs_sig_nonzero == len - 1 { + lhs[lhs_sig_nonzero] = rhs[0] + .wrapping_mul_add(lhs[lhs_sig_nonzero], mul_carry); + } else { + let tmp = rhs[0].carrying_mul_add(lhs[lhs_sig_nonzero], mul_carry); + lhs[lhs_sig_nonzero] = tmp.0; + lhs[lhs_sig_nonzero + 1] = tmp.1; + } + }, + (true, true) => { + let tmp0 = lhs[0].carrying_mul(rhs[0]); + lhs[0] = tmp0.0; + lhs[1] = tmp0.1; + } + } +} + +/// # Multiplication Operations +/// +/// **Note**: unless otherwise noted in the function specific documentation, +/// +/// - **An Error is returned** if function arguments have unmatching bitwidths. +/// - The functions **may allocate** memory. +/// - The function works for both signed and unsigned interpretations of an `ApInt`. In other words, +/// in the low-level bit-wise representation there is no difference between a signed and unsigned +/// operation by a certain function on fixed bit-width integers. (Cite: LLVM) +/// +/// ## Performance +/// +/// All of the multiplication functions in this `impl` quickly check for various edge cases and use +/// an efficient algorithm for these cases. If the function detects a large number of leading zeros +/// in front of the most significant set bit, it will apply optimizations so that wasted +/// multiplications and additions of zero are avoided. This function is designed to efficiently +/// handle 5 common kinds of multiplication. +/// Small here means both small ApInt `BitWidth` and/or small **unsigned** numerical +/// significance. (Signed multiplication works, but two's complement negative numbers may have a +/// large number of leading ones, leading to potential inefficiency.) +/// +/// - multiplication of zero by any size integer (no allocation) +/// - multiplication of small (<= 1 `Digit`) integers (no allocation) +/// - multiplication of small integers by large integers (or vice versa) (no allocation) +/// - wrapping multiplication of medium size (<= 512 bits) integers +/// - multiplication of medium size integers that will not overflow +/// +/// Currently, Karatsuba multiplication is not implemented, so large integer multiplication +/// may be very slow compared to other algorithms. According to Wikipedia, Karatsuba algorithms +/// outperform 𝒪(n^2) algorithms, starting around 320-640 bits +impl ApInt { + /// Multiply-assigns `rhs` to `self` inplace. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + pub fn wrapping_mul_assign(&mut self, rhs: &ApInt) -> Result<()> { + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => { + *lhs = lhs.wrapping_mul(rhs); + } + Ext(lhs, rhs) => { + let len = rhs.len(); + //finds the most significant nonzero digit (for later optimizations) and handles + //early return of multiplication by zero. + let rhs_sig_nonzero: usize = match rhs.iter().rposition(|x| x != &Digit::zero()) { + Some(x) => x, + None => { + for x in lhs.iter_mut() { + x.unset_all() + } + return Ok(()); + } + }; + let lhs_sig_nonzero: usize = match lhs.iter().rposition(|x| x != &Digit::zero()) { + Some(x) => x, + None => { + for x in lhs.iter_mut() { + x.unset_all() + } + return Ok(()); + } + }; + let lhs_sig_bits = (lhs_sig_nonzero * Digit::BITS) + + (Digit::BITS - (lhs[lhs_sig_nonzero].leading_zeros() as usize)); + let rhs_sig_bits = (rhs_sig_nonzero * Digit::BITS) + + (Digit::BITS - (rhs[rhs_sig_nonzero].leading_zeros() as usize)); + let tot_sig_bits = lhs_sig_bits + rhs_sig_bits; + if tot_sig_bits <= (len * Digit::BITS) { + digitwise_nonwrapping_mul(lhs, rhs, len, lhs_sig_nonzero, rhs_sig_nonzero); + } else { + digitwise_wrapping_mul(lhs, rhs, len); + } + } + } + self.clear_unused_bits(); + Ok(()) + } + + /// Multiplies `rhs` with `self` and returns the result. + pub fn into_wrapping_mul(self, rhs: &ApInt) -> Result { + try_forward_bin_mut_impl(self, rhs, ApInt::wrapping_mul_assign) + } + + /// Multiply-assigns `rhs` to `self` inplace, and returns a boolean indicating if overflow + /// occured, according to the **unsigned** interpretation of overflow. + /// + /// # Errors + /// + /// - If `self` and `rhs` have unmatching bitwidths. + #[cfg(test)] + pub(crate) fn overflowing_umul_assign(&mut self, rhs: &ApInt) -> Result { + let mask = Digit::ONES >> rhs.width().unused_bits(); + match self.zip_access_data_mut_self(rhs)? { + Inl(lhs, rhs) => { + let tmp = lhs.overflowing_mul(rhs); + // clear unused bits + *lhs = tmp.0 & mask; + Ok(tmp.1 || ((tmp.0 & mask) != tmp.0)) + } + Ext(lhs, rhs) => { + let len = rhs.len(); + let rhs_sig_nonzero: usize = match rhs.iter().rposition(|x| x != &Digit::zero()) { + Some(x) => x, + None => { + for x in lhs.iter_mut() { + x.unset_all() + } + return Ok(false); + } + }; + let lhs_sig_nonzero: usize = match lhs.iter().rposition(|x| x != &Digit::zero()) { + Some(x) => x, + None => { + for x in lhs.iter_mut() { + x.unset_all() + } + return Ok(false); + } + }; + let lhs_sig_bits = (lhs_sig_nonzero * Digit::BITS) + + (Digit::BITS - (lhs[lhs_sig_nonzero].leading_zeros() as usize)); + let rhs_sig_bits = (rhs_sig_nonzero * Digit::BITS) + + (Digit::BITS - (rhs[rhs_sig_nonzero].leading_zeros() as usize)); + let tot_sig_bits = lhs_sig_bits + rhs_sig_bits; + // The smallest that `lhs` and `rhs` can be is `2^(lhs_sig_bits - 1)` and + // `2^(rhs_sig_bits - 1)`, respectively. This represents the most significant set + // bit being the only set bit. The largest that `lhs` and `rhs` can be is + // `2^lhs_sig_bits - 1` and `2^rhs_sig_bits - 1`, respectively, which represents + // all bits up to the most significant set bit being set. + // The extreme values are: + // 2^(lhs_sig_bits - 1) * 2^(rhs_sig_bits - 1) = 2^(tot_sig_bits - 2), + // (2^lhs_sig_bits - 1) * (2^rhs_sig_bits - 1) = + // 2^tot_sig_bits - 2^lhs_sig_bits - 2^rhs_sig_bits + 1 + // `lhs_sig_bits` and `rhs_sig_bits` are at least 1, so the bounds can be slightly + // expanded to `2^(tot_sig_bits - 2) <= result < 2^tot_sig_bits`. + // If `tot_sig_bits` is `len * Digit::BITS`, then the result must fit in the + // digitwise representation (`result < 2^bit_len`). For `2^(tot_sig_bits - 2)` to be + // equal to `2^bit_len`, which guarantees overflow, `tot_sig_bits` must be + // `len * Digit::BITS + 2` or more. + if tot_sig_bits <= (len * Digit::BITS) { + digitwise_nonwrapping_mul(lhs, rhs, len, lhs_sig_nonzero, rhs_sig_nonzero); + // test for overflow into the unused bits + let tmp = lhs[len - 1] & mask; + let overflow = tmp != lhs[len - 1]; + // clear unused bits + lhs[len - 1] = tmp; + Ok(overflow) + } else if tot_sig_bits >= (len * Digit::BITS) + 2 { + digitwise_wrapping_mul(lhs, rhs, len); + lhs[len - 1] = lhs[len - 1] & mask; + Ok(true) + } else { + let mut sum: Vec = vec![Digit::ZERO; len]; + let mut overflow = false; + for lhs_i in 0..len { + let mut add_carry = Digit::ZERO; + let mut mul_carry = Digit::ZERO; + for rhs_i in 0..(len - lhs_i - 1) { + let mul_tmp = lhs[lhs_i].carrying_mul_add(rhs[rhs_i], mul_carry); + let add_tmp = sum[lhs_i + rhs_i].dd() + .wrapping_add(mul_tmp.0.dd()) + .wrapping_add(add_carry.dd()).lo_hi(); + sum[lhs_i + rhs_i] = add_tmp.0; + add_carry = add_tmp.1; + mul_carry = mul_tmp.1; + } + // take out the last iteration to test for nonzeroed bits + let mul_tmp = lhs[lhs_i].carrying_mul_add(rhs[len - lhs_i - 1], mul_carry); + let add_tmp = sum[len - 1].dd() + .wrapping_add(mul_tmp.0.dd()) + .wrapping_add(add_carry.dd()).lo_hi(); + if lhs_i != 0 && lhs[lhs_i] != Digit::ZERO { + // In normal wrapping multiplication, we know that multiplying certain + // digits will always overflow and have no affect on `sum` (the + // `len - lhs_i - 1` bound excludes these), however we must check if + // bits are set there. Only one digit needs to be checked due to the 2 + // bit `tot_sig_bit` range where this branch occurs. + if rhs[len - lhs_i] != Digit::ZERO { + overflow = true; + } + } + // clear unused bits + sum[len - 1] = add_tmp.0 & mask; + if mul_tmp.1 != Digit::ZERO + || add_tmp.1 != Digit::ZERO + || (add_tmp.0 & mask) != add_tmp.0 { + overflow = true; + } + } + lhs[..].copy_from_slice(&sum[..]); + Ok(overflow) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::{u8,u64}; + use crate::info::BitWidth; + + #[test] + fn wrapping_nine_test() { + //there are many special case and size optimization paths, so this test must be very + //rigorous. + + //multiplication of apints composed of only u8::MAX in their least significant digits + //only works for num_u8 > 1 + fn nine_test(num_u8: usize) { + let mut lhs; + let mut rhs = ApInt::from(0u8).into_zero_resize(BitWidth::new(num_u8 * 8).unwrap()); + let nine = + ApInt::from(u8::MAX).into_zero_resize(BitWidth::new(num_u8 * 8).unwrap()); + for rhs_nine in 0..num_u8 { + rhs.wrapping_shl_assign(8usize).unwrap(); + rhs |= &nine; + lhs = ApInt::from(0u8).into_zero_resize(BitWidth::new(num_u8 * 8).unwrap()); + 'outer: for lhs_nine in 0..num_u8 { + lhs.wrapping_shl_assign(8usize).unwrap(); + lhs |= &nine; + //imagine multiplying a string of base 10 nines together. + //It will produce things like 998001, 8991, 98901, 9989001. + //this uses a formula for the number of nines, eights, and zeros except here + //nine is u8::MAX, eight is u8::MAX - 1, and zero is 0u8 + let zeros_after_one = if lhs_nine < rhs_nine { + lhs_nine + } else { + rhs_nine + }; + let nines_before_eight = if lhs_nine > rhs_nine { + lhs_nine - rhs_nine + } else { + rhs_nine - lhs_nine + }; + let nines_after_eight = if lhs_nine < rhs_nine { + lhs_nine + } else { + rhs_nine + }; + let mut result = lhs.clone().into_wrapping_mul(&rhs).unwrap(); + assert_eq!(result.clone().resize_to_u8(), 1u8); + for i in 0..zeros_after_one { + if i >= num_u8 - 1 { + continue 'outer + } + result.wrapping_lshr_assign(8usize).unwrap(); + assert_eq!(result.clone().resize_to_u8(),0); + } + for i in 0..nines_before_eight { + if zeros_after_one + i >= num_u8 - 1 { + continue 'outer + } + result.wrapping_lshr_assign(8usize).unwrap(); + assert_eq!(result.clone().resize_to_u8(), u8::MAX); + } + if zeros_after_one + nines_before_eight >= num_u8 - 1 { + continue 'outer + } + result.wrapping_lshr_assign(8usize).unwrap(); + assert_eq!(result.clone().resize_to_u8(),u8::MAX - 1); + for i in 0..nines_after_eight { + if 1 + zeros_after_one + nines_before_eight + i >= num_u8 - 1 { + continue 'outer + } + result.wrapping_lshr_assign(8usize).unwrap(); + assert_eq!(result.clone().resize_to_u8(),u8::MAX); + } + } + } + } + //test inl apints + assert_eq!( + ApInt::from(u8::MAX) + .into_wrapping_mul(&ApInt::from(u8::MAX)) + .unwrap(), + ApInt::from(1u8) + ); + nine_test(2); + nine_test(3); + nine_test(4); + nine_test(7); + nine_test(8); + //test ext apints + nine_test(9); + nine_test(16); + nine_test(24); + nine_test(32); + //5 digits wide + nine_test(40); + nine_test(63); + } + + #[test] + fn wrapping_hardcoded() { + //non overflowing test + let resize = [ + 7usize, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 137, 200, 255, + 256, 700, 907, 1024, 2018, 2019, + ]; + let lhs_shl = [ + 0usize, 1, 0, 1, 4, 7, 4, 10, 13, 0, 31, 25, 7, 17, 32, 50, 0, 64, 249, 8, 777, 0, + 1000, 0, + ]; + let rhs_shl = [ + 0usize, 0, 1, 1, 3, 6, 4, 14, 10, 0, 0, 25, 0, 18, 32, 49, 100, 64, 0, 256, 64, + 900, 1000, 0, + ]; + for (i, _) in resize.iter().enumerate() { + let lhs = ApInt::from(5u8) + .into_zero_resize(BitWidth::new(resize[i]).unwrap()) + .into_wrapping_shl(lhs_shl[i]) + .unwrap(); + let rhs = ApInt::from(11u8) + .into_zero_resize(BitWidth::new(resize[i]).unwrap()) + .into_wrapping_shl(rhs_shl[i]) + .unwrap(); + let zero = ApInt::from(0u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); + let one = ApInt::from(1u8).into_zero_resize(BitWidth::new(resize[i]).unwrap()); + let expected = ApInt::from(55u8) + .into_zero_resize(BitWidth::new(resize[i]).unwrap()) + .into_wrapping_shl(rhs_shl[i] + lhs_shl[i]) + .unwrap(); + assert_eq!(lhs.clone().into_wrapping_mul(&zero).unwrap(), zero); + assert_eq!(zero.clone().into_wrapping_mul(&rhs).unwrap(), zero); + assert_eq!(lhs.clone().into_wrapping_mul(&one).unwrap(), lhs); + assert_eq!(one.clone().into_wrapping_mul(&rhs).unwrap(), rhs); + assert_eq!(lhs.clone().into_wrapping_mul(&rhs).unwrap(), expected); + } + assert_eq!( + ApInt::from([0,0,0,0,u64::MAX,0,u64::MAX,u64::MAX]) + .into_wrapping_mul(&ApInt::from([0,0,0,0,u64::MAX,u64::MAX,0,u64::MAX])).unwrap() + ,ApInt::from([u64::MAX,0,1,u64::MAX - 3,1,u64::MAX,u64::MAX,1])); + } + + #[test] + fn overflowing() { + // more rigorous testing is done in `fuzz.rs` + let bw = BitWidth::new(65).unwrap(); + let mut lhs = ApInt::from(1u128 << 64).into_zero_resize(bw); + let rhs = lhs.clone(); + assert!(lhs.overflowing_umul_assign(&rhs).unwrap()); + let mut lhs = ApInt::from(1u128 << 64).into_zero_resize(bw); + let rhs = ApInt::unsigned_max_value(bw); + assert!(lhs.overflowing_umul_assign(&rhs).unwrap()); + let mut lhs = ApInt::from((u64::MAX as u128) << 64); + let rhs = ApInt::one(BitWidth::w128()); + assert!(!lhs.overflowing_umul_assign(&rhs).unwrap()); + let mut lhs = ApInt::one(BitWidth::w128()); + let rhs = ApInt::from((u64::MAX as u128) << 64); + assert!(!lhs.overflowing_umul_assign(&rhs).unwrap()); + } +} diff --git a/src/logic/shift.rs b/src/logic/shift.rs new file mode 100644 index 0000000..e6001ad --- /dev/null +++ b/src/logic/shift.rs @@ -0,0 +1,880 @@ +use crate::data::{ApInt, DataAccessMut, Digit}; +use crate::info::{Result, Width, ShiftAmount}; +use crate::logic::try_forward_bin_mut_impl; + +/// # Shift Operations +/// +/// **Note**: unless otherwise noted in the function specific documentation, +/// +/// - The functions do **not** allocate memory. +impl ApInt { + + /// Left-shifts this `ApInt` by the given `shift_amount` bits. + /// + /// # Note + /// + /// Left shifts can act as a very fast multiplication by a power of two for both the signed and unsigned + /// interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn wrapping_shl_assign(&mut self, shift_amount: S) -> Result<()> + where S: Into + { + let s = shift_amount.into(); + s.verify_shift_amount(self)?; + //prevents shift overflow below + if s.is_zero() {return Ok(())} + let (digits, bits) = s.digit_bit_steps(); + match self.access_data_mut() { + DataAccessMut::Inl(x) => { + *x <<= bits; + } + DataAccessMut::Ext(x) => { + let uns = Digit::BITS - bits; + if digits == 0 { + //subdigit shift + for i in (0..(x.len() - 1)).rev() { + x[i + 1] = (x[i] >> uns) | (x[i + 1] << bits); + } + x[0] <<= bits; + } else if bits == 0 { + //digit shift + for i in (digits..x.len()).rev() { + x[i] = x[i - digits]; + } + for i in 0..digits { + x[i].unset_all(); + } + } else { + //digit and subdigit shift + for i in ((digits + 1)..x.len()).rev() { + x[i] = (x[i - 1 - digits] >> uns) | (x[i - digits] << bits); + } + x[digits] = x[0] << bits; + for i in 0..digits { + x[i].unset_all(); + } + } + } + } + self.clear_unused_bits(); + Ok(()) + } + + /// Shift this `ApInt` left by the given `shift_amount` bits and returns the result. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn into_wrapping_shl(self, shift_amount: S) -> Result + where S: Into + { + try_forward_bin_mut_impl(self, shift_amount, ApInt::wrapping_shl_assign) + } + + /// Logically right-shifts this `ApInt` by the given `shift_amount` bits. + /// + /// # Note + /// + /// Logical right shifts do not copy the sign bit (the most significant bits are filled up with + /// zeros), and thus can act as a very fast floored division by a power of two for the **unsigned** + /// interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn wrapping_lshr_assign(&mut self, shift_amount: S) -> Result<()> + where S: Into + { + let s = shift_amount.into(); + s.verify_shift_amount(self)?; + //prevents shift overflow below + if s.is_zero() {return Ok(())} + let (digits, bits) = s.digit_bit_steps(); + match self.access_data_mut() { + DataAccessMut::Inl(x) => { + *x >>= bits + } + DataAccessMut::Ext(x) => { + let uns = Digit::BITS - bits; + let diff = x.len() - digits; + if digits == 0 { + //subdigit shift + for i in 0..(x.len() - 1) { + x[i] = (x[i] >> bits) | (x[i + 1] << uns); + } + x[x.len() - 1] >>= bits; + } else if bits == 0 { + //digit shift + for i in digits..x.len() { + x[i - digits] = x[i]; + } + for i in 0..digits { + x[i + diff].unset_all(); + } + } else { + //digit and subdigit shift + for i in digits..(x.len() - 1) { + x[i - digits] = (x[i] >> bits) | (x[i + 1] << uns); + } + x[diff - 1] = x[x.len() - 1] >> bits; + for i in 0..digits { + x[i + diff].unset_all(); + } + } + } + } + Ok(()) + } + + /// Logically right-shifts this `ApInt` by the given `shift_amount` bits + /// and returns the result. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn into_wrapping_lshr(self, shift_amount: S) -> Result + where S: Into + { + try_forward_bin_mut_impl(self, shift_amount, ApInt::wrapping_lshr_assign) + } + + /// Arithmetically right-shifts this `ApInt` by the given `shift_amount` bits. + /// + /// # Note + /// + /// Arithmetic right shifts copy the sign bit to the most significant bits, and thus can act as + /// a very fast floored division by a power of two for the **signed** interpretation of `ApInt`s. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn wrapping_ashr_assign(&mut self, shift_amount: S) -> Result<()> + where S: Into + { + if !self.sign_bit() { + return self.wrapping_lshr_assign(shift_amount) + } + let s = shift_amount.into(); + s.verify_shift_amount(self)?; + //prevents shift overflow below + if s.is_zero() {return Ok(())} + let width = self.width(); + let width_bits = width.to_usize() % Digit::BITS; + let (digits, bits) = s.digit_bit_steps(); + let uns = Digit::BITS - bits; + match self.access_data_mut() { + DataAccessMut::Inl(x) => { + *x = (*x >> bits) | (Digit::ONES << (width.to_usize() - bits)); + } + DataAccessMut::Ext(x) => { + if width_bits != 0 { + x[x.len() - 1].sign_extend_from(width_bits).unwrap(); + } + let diff = x.len() - digits; + if digits == 0 { + //subdigit shift + for i in 0..(x.len() - 1) { + x[i] = (x[i] >> bits) | (x[i + 1] << uns); + } + x[x.len() - 1] = (x[x.len() - 1] >> bits) | (Digit::ONES << uns); + } else if bits == 0 { + //digit shift + for i in digits..x.len() { + x[i - digits] = x[i]; + } + for i in 0..digits { + x[i + diff].set_all(); + } + } else { + //digit and subdigit shift + for i in digits..(x.len() - 1) { + x[i - digits] = (x[i] >> bits) | (x[i + 1] << uns); + } + x[diff - 1] = (x[x.len() - 1] >> bits) | (Digit::ONES << uns); + for i in 0..digits { + x[i + diff].set_all(); + } + } + } + } + self.clear_unused_bits(); + Ok(()) + } + + /// Arithmetically right-shifts this `ApInt` by the given `shift_amount` bits + /// and returns the result. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn into_wrapping_ashr(self, shift_amount: S) -> Result + where S: Into + { + try_forward_bin_mut_impl(self, shift_amount, ApInt::wrapping_ashr_assign) + } + + /// Circularly left-rotates this `ApInt` by the given `shift_amount` bits. In other words, the + /// bits are shifted like a logical left shift would, except the bits that go outside the bit + /// width of the `ApInt` wrap around to the least significant bits. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + /// + /// # Performance + /// + /// This function is equivalent to the following: + /// ``` + /// use apint::{ApInt, Width}; + /// let mut input = ApInt::from([1u64, 2, 3, 4]); + /// let clone = input.clone(); + /// // rotate left by one whole digit + /// let shift = 64usize; + /// + /// let mut output = input.clone(); + /// if shift != 0 { + /// output.wrapping_shl_assign(shift).unwrap(); + /// input.wrapping_lshr_assign(input.width().to_usize() - shift).unwrap(); + /// output |= &input; + /// }; + /// + /// assert_eq!(output, clone.into_rotate_left(shift).unwrap()); + // TODO: after adding a ApInt little-endian constructor, fix this + // Note that the rotate functions on slices work according to big-endian and the bitwise rotate + // functions in Rust and here work according to little-endian, i.e. they work in opposite + // directions + /// let mut array = [1u64, 2, 3, 4]; + /// array.rotate_left(1); + /// assert_eq!(output, ApInt::from(array)); + /// ``` + /// + /// However, this function avoids allocation and has many optimized branches for different input + /// sizes and shifts. + pub fn rotate_left_assign(&mut self, shift_amount: S) -> Result<()> + where S: Into + { + //A rotate left function that assumes `(0 < s < Digit::BITS) && (x.len() > 1)`, and treats + //`x` as a whole `ApInt`. + fn subdigit_rotate_left(x: &mut [Digit], s: usize) { + let uns = Digit::BITS - s; + //keep the end for wrapping around to the beginning + let wrap_around = (x[x.len() - 1] >> uns) | (x[0] << s); + for i in (0..(x.len() - 1)).rev() { + x[i + 1] = (x[i] >> uns) | (x[i + 1] << s); + } + x[0] = wrap_around; + } + + //A rotate right function that assumes `(0 < s < Digit::BITS) && (x.len() > 1)`, and treats + //`x` as one whole `ApInt` + fn subdigit_rotate_right(x: &mut [Digit], s: usize) { + let uns = Digit::BITS - s; + //keep the beginning for wrapping around to the end + let wrap_around = (x[x.len() - 1] >> s) | (x[0] << uns); + for i in 0..(x.len() - 1) { + x[i] = (x[i] >> s) | (x[i + 1] << uns); + } + x[x.len() - 1] = wrap_around; + } + + //A rotate left function that assumes + //`(0 < s < Digit::BITS) && (end_bits > 0) && (x.len() > 1)`. `end_bits` is + //`width % Digit::BITS`. + fn subdigit_rotate_left_nonmultiple(x: &mut [Digit], end_bits: usize, s: usize) { + let uns = Digit::BITS - s; + let end_mask = Digit::ONES >> (Digit::BITS - end_bits); + //handle tricky wrap around from the end to be beginning + let mut tmp0 = if s > end_bits { + (x[x.len() - 2] >> (Digit::BITS + end_bits - s)) + | (x[x.len() - 1] << (s - end_bits)) + | (x[0] << s) + } else { + (x[x.len() - 1] >> (end_bits - s)) | (x[0] << s) + }; + let mut tmp1: Digit; + let mut i = 0; + loop { + tmp1 = (x[i] >> uns) | (x[i + 1] << s); + x[i] = tmp0; + i += 1; + if i == (x.len() - 1) { + x[i] = tmp1 & end_mask; + return + } + tmp0 = (x[i] >> uns) | (x[i + 1] << s); + x[i] = tmp1; + i += 1; + if i == (x.len() - 1) { + x[i] = tmp0 & end_mask; + return + } + } + } + + //for `ApInt`s with a bit width that is not an integer multiple of `Digit`s, and a shift + //equal to or larger than a digit, `subdigit_rotate_left` and `digit_rotate_left` should + //be used followed by this correct the shift. + //assumes `(digits > 0) && (end_bits > 0)` + fn nonmultiple_rotate_correction(x: &mut [Digit], end_bits: usize, digits: usize, shift_bits: usize) { + //digits > 0, so the bits after the end_bits will always all be wrap around bits + let unshift = Digit::BITS - shift_bits; + let unbits = Digit::BITS - end_bits; + if shift_bits == 0 { + //This will require the indexing equivalent of magic numbers, so there is just no + //way to explain this in words. The only way is to look at a diagram of bits. + //Digit::BITS is 8 here, and the bits are each represented by a single alpha numeric + //character. + //01234567_89ABCDEF //all the bits of `x`, including unused end bits + //end_bits == 1 in this example, and the shift is 8, so `digit_rotate_left` is used + //to produce this + //89ABCDEF_01234567 + //the garbage bits are marked by `x`s, and the bits separated by the space are the + //wraparound + //8xxxxxxx_01234567 + //the result needs to look like this + //12345678_0 //the lowest bits and end bits are shifted to get a correct answer + + //By looking at diagram and visually changing the shift_bits and end_bits, we arrive + //at this solution. + //in reverse order to avoid temporaries + for i in (0..(digits - 1)).rev() { + x[i + 1] = (x[i] >> end_bits) | (x[i + 1] << unbits); + } + //wrap around. + //Note that `digits > 0`, so bits after the end bits will always wrap around + x[0] = (x[x.len() - 1] >> end_bits) | (x[0] << unbits); + //get rid of the wraparound + x[x.len() - 1] = x[x.len() - 1] & (Digit::ONES >> unbits); + } else if unbits < shift_bits { + //whenever the garbage bits are located in one digit + //01234567_89ABCDEF_GHIJKLMN //all of x + //01234567_89ABCDEF_GHIJKL //actual width end_bits = 6 + //DEFGHIJK_LMN01234_56789ABC //x shifted by 11 + //DEFGHIJK_Lxx01234_56789A BC + //BCDEFGHI_JKL01234_56789A //the result + + //01234567_89ABCDEF //all of x + //01234567_89ABCDE //actual width end_bits = 7 + //56789ABC_DEF01234 //x shifted by 11 + //56789ABC_DEx0123 4 + //456789AB_CDE0123 //the result + + //start with overwriting the bits in the middle + x[digits] = (x[digits - 1] >> end_bits) + | ((x[digits] & (Digit::ONES >> (unshift + unbits))) << unbits) + | (x[digits] & (Digit::ONES << shift_bits)); + + //shift the lower bits up in reverse order to avoid temporaries + for i in (0..(digits - 1)).rev() { + x[i + 1] = (x[i] >> end_bits) | (x[i + 1] << unbits); + } + //wrap around + x[0] = (x[x.len() - 1] >> end_bits) | (x[0] << unbits); + //get rid of the left overs + x[x.len() - 1] = x[x.len() - 1] & (Digit::ONES >> unbits); + } else { + //same as above but the bits that we want to overwrite are across digit boundaries + //01234567_89ABCDEF_GHIJKLMN //all of x + //01234567_89ABCDEF_GHI //end_bits = 3 + //DEFGHIJK_LMN01234_56789ABC //shifted left 11 + //DEFGHIxx_xxx01234_567 89ABC + //89ABCDEF_GHI01234_567 + + //01234567_89ABCDEF_GHIJKLMN //all of x + //01234567_89ABCDEF_G //end_bits = 1 + //FGHIJKLM_N0123456_789ABCDE //shifted left 9 + //FGxxxxxx_x0123456_7 89ABCDE + //89ABCDEF_G0123456_7 + + //this can also handle unbits == shift_bits + //01234567_89ABCDEF_GHIJKLMN //all of x + //01234567_89ABCDEF_GHIJKLM //end_bits = 7 + //FGHIJKLM_N0123456_789ABCDE //shifted left 9 + //FGHIJKLM_x0123456_789ABCD E + //EFGHIJKL_M0123456_789ABCD + + x[digits] = ((x[digits - 1] >> end_bits) & (Digit::ONES >> unshift)) + | (x[digits] & (Digit::ONES << shift_bits)); + + //in reverse order to avoid temporaries + for i in (0..(digits - 1)).rev() { + x[i + 1] = (x[i] >> end_bits) | (x[i + 1] << unbits); + } + //wrap around + x[0] = (x[x.len() - 1] >> end_bits) | (x[0] << unbits); + //get rid of the left overs + x[x.len() - 1] = x[x.len() - 1] & (Digit::ONES >> unbits); + } + } + + let s = shift_amount.into(); + s.verify_shift_amount(self)?; + if s.is_zero() {return Ok(())} + let (digits, bits) = s.digit_bit_steps(); + //this is necessary, otherwise there can be shifts by `Digit::BITS` which causes overflows + let width = self.width().to_usize(); + match self.access_data_mut() { + DataAccessMut::Inl(x) => { + *x = (((*x) << bits) | ((*x) >> (width - bits))) & (Digit::ONES >> (Digit::BITS - width)); + } + DataAccessMut::Ext(x) => { + let end_bits = width % Digit::BITS; + match (digits == 0, bits == 0, end_bits == 0) { + //`bits != 0` in the following two cases + (true, _, true) => subdigit_rotate_left(x, bits), + (true, _, false) => subdigit_rotate_left_nonmultiple(x, end_bits, bits), + (false, true, true) => x.rotate_right(digits), + (false, false, true) => { + //it is not worth it to have a single function for this, which was learned + //the hard way (extra masking operations cause the complicated function to + //have about the same number of operations per digit as two separate shift + //functions). Optimizing each function separately for SIMD is probably the + //most performant. + if digits == (x.len() - 1) { + //faster branch + subdigit_rotate_right(x, Digit::BITS - bits); + } else { + x.rotate_right(digits); + subdigit_rotate_left(x, bits); + } + }, + (false, true, false) => { + x.rotate_right(digits); + nonmultiple_rotate_correction(x, end_bits, digits, bits); + }, + (false, false, false) => { + //not using the `subdigit_rotate_left_nonmultiple` function because it cuts + //off needed end bits for the `nonmultiple_rotate_correction` + if digits == (x.len() - 1) { + //faster branch + subdigit_rotate_right(x, Digit::BITS - bits); + } else { + x.rotate_right(digits); + subdigit_rotate_left(x, bits); + } + nonmultiple_rotate_correction(x, end_bits, digits, bits); + }, + } + } + } + Ok(()) + } + + /// Circularly left-rotates this `ApInt` by the given `shift_amount` bits and returns the + /// result. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn into_rotate_left(self, shift_amount: S) -> Result + where S: Into + { + try_forward_bin_mut_impl(self, shift_amount, ApInt::rotate_left_assign) + } + + /// Circularly right-rotates this `ApInt` by the given `shift_amount` bits. In other words, the + /// bits are shifted like a logical right shift would, except the bits that go outside the bit + /// width of the `ApInt` wrap around to the most significant bits. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + /// + /// # Performance + /// + /// This function is equivalent to the following: + /// ``` + /// use apint::{ApInt, Width}; + /// let mut input = ApInt::from([1u64, 2, 3,4 ]); + /// let clone = input.clone(); + /// // rotate right by one whole digit + /// let shift = 64usize; + /// + /// let mut output: ApInt = input.clone(); + /// if shift != 0 { + /// output.wrapping_lshr_assign(shift).unwrap(); + /// input.wrapping_shl_assign(input.width().to_usize() - shift).unwrap(); + /// output |= &input; + /// }; + /// + /// assert_eq!(output, clone.into_rotate_right(shift).unwrap()); + // TODO: after adding a ApInt little-endian constructor, fix this + // Note that the rotate functions on slices work according to big-endian and the bitwise rotate + // functions in Rust and here work according to little-endian, i.e. they work in opposite + // directions + /// let mut array = [1u64, 2, 3, 4]; + /// array.rotate_right(1); + /// assert_eq!(output, ApInt::from(array)); + /// ``` + /// + /// However, this function avoids allocation and has many optimized branches for different input + /// sizes and shifts. + pub fn rotate_right_assign(&mut self, shift_amount: S) -> Result<()> + where S: Into + { + //compiler should be able to clean this up + let s = shift_amount.into(); + // this is needed so that `width - s` does not overflow + s.verify_shift_amount(self)?; + if s.is_zero() {return Ok(())} + let width = self.width().to_usize(); + self.rotate_left_assign(ShiftAmount::from(width - s.to_usize())) + } + + /// Circularly right-rotates this `ApInt` by the given `shift_amount` bits and returns the + /// result. + /// + /// # Errors + /// + /// - If the given `shift_amount` is invalid for the bit width of this `ApInt`. + pub fn into_rotate_right(self, shift_amount: S) -> Result + where S: Into + { + try_forward_bin_mut_impl(self, shift_amount, ApInt::rotate_right_assign) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_reprs_w64() -> impl Iterator { + vec![ + 0x0123_4567_89AB_CDEF, + 0xFEDC_BA98_7654_3210, + 0x0000_0000_0000_0000, + 0x5555_5555_5555_5555, + 0xAAAA_AAAA_AAAA_AAAA, + 0xFFFF_FFFF_FFFF_FFFF, + ] + .into_iter() + } + + fn test_apints_w64() -> impl Iterator { + test_reprs_w64().map(ApInt::from_u64) + } + + fn test_reprs_w128() -> impl Iterator { + vec![ + 0x0123_4567_89AB_CDEF_0011_2233_4455_6677, + 0xFEDC_BA98_7654_3210_7766_5544_3322_1100, + 0x0000_0000_0000_0000_0000_0000_0000_0001, + 0x8000_0000_0000_0000_0000_0000_0000_0000, + 0x0000_0000_0000_0000_0000_0000_0000_0000, + 0x5555_5555_5555_5555_5555_5555_5555_5555, + 0xAAAA_AAAA_AAAA_AAAA_AAAA_AAAA_AAAA_AAAA, + 0xFFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF_FFFF, + ] + .into_iter() + } + + fn test_apints_w128() -> impl Iterator { + test_reprs_w128().map(ApInt::from_u128) + } + + mod shl { + use super::*; + + #[test] + fn assign_small_ok() { + for repr in test_reprs_w64() { + for shamt in 0..64 { + let mut result = ApInt::from_u64(repr); + result.wrapping_shl_assign(shamt).unwrap(); + let expected = ApInt::from_u64(repr << shamt); + assert_eq!(result, expected); + } + } + } + + #[test] + fn assign_large_ok() { + for repr in test_reprs_w128() { + for shamt in 0..128 { + let mut result = ApInt::from_u128(repr); + result.wrapping_shl_assign(shamt).unwrap(); + let expected = ApInt::from_u128(repr << shamt); + assert_eq!(result, expected); + } + } + } + + #[test] + fn assign_xtra_large_ok() { + let d0 = 0xFEDC_BA98_7654_3210; + let d1 = 0x5555_5555_4444_4444; + let d2 = 0xAAAA_AAAA_CCCC_CCCC; + let d3 = 0xFFFF_7777_7777_FFFF; + let input: [u64; 4] = [d0, d1, d2, d3]; + { + let shamt = 100; + let digit_steps = shamt / 64; + let bit_steps = shamt % 64; + assert_eq!(digit_steps, 1); + assert_eq!(bit_steps, 36); + let result = ApInt::from(input) + .into_wrapping_shl(shamt) + .unwrap(); + let expected: [u64; 4] = [ + (d1 << bit_steps) | (d2 >> (Digit::BITS - bit_steps)), + (d2 << bit_steps) | (d3 >> (Digit::BITS - bit_steps)), + (d3 << bit_steps), + 0 + ]; + let expected = ApInt::from(expected); + assert_eq!(result, expected); + } + { + let shamt = 150; + let digit_steps = shamt / 64; + let bit_steps = shamt % 64; + assert_eq!(digit_steps, 2); + assert_eq!(bit_steps, 22); + let result = ApInt::from(input) + .into_wrapping_shl(shamt) + .unwrap(); + let expected: [u64; 4] = [ + (d2 << bit_steps) | (d3 >> (Digit::BITS - bit_steps)), + (d3 << bit_steps), + 0, + 0 + ]; + let expected = ApInt::from(expected); + assert_eq!(result, expected); + } + { + let shamt = 200; + let digit_steps = shamt / 64; + let bit_steps = shamt % 64; + assert_eq!(digit_steps, 3); + assert_eq!(bit_steps, 8); + let result = ApInt::from(input) + .into_wrapping_shl(shamt) + .unwrap(); + let expected: [u64; 4] = [ + (d3 << bit_steps), + 0, + 0, + 0 + ]; + let expected = ApInt::from(expected); + assert_eq!(result, expected); + } + } + + #[test] + fn assign_small_fail() { + for mut apint in test_apints_w64() { + assert!(apint.wrapping_shl_assign(64).is_err()) + } + } + + #[test] + fn assign_large_fail() { + for mut apint in test_apints_w128() { + assert!(apint.wrapping_shl_assign(128).is_err()) + } + } + + #[test] + fn into_equivalent_small() { + for apint in test_apints_w64() { + for shamt in 0..64 { + let mut x = apint.clone(); + let y = apint.clone(); + x.wrapping_shl_assign(shamt).unwrap(); + let y = y.into_wrapping_shl(shamt).unwrap(); + assert_eq!(x, y); + } + } + } + + #[test] + fn into_equivalent_large() { + for apint in test_apints_w128() { + for shamt in 0..128 { + let mut x = apint.clone(); + let y = apint.clone(); + x.wrapping_shl_assign(shamt).unwrap(); + let y = y.into_wrapping_shl(shamt).unwrap(); + assert_eq!(x, y); + } + } + } + } + + mod lshr { + use super::*; + + #[test] + fn assign_small_ok() { + for repr in test_reprs_w64() { + for shamt in 0..64 { + let mut result = ApInt::from_u64(repr); + result.wrapping_lshr_assign(shamt).unwrap(); + let expected = ApInt::from_u64(repr >> shamt); + assert_eq!(result, expected); + } + } + } + + #[test] + fn assign_large_ok() { + for repr in test_reprs_w128() { + for shamt in 0..128 { + let mut result = ApInt::from_u128(repr); + result.wrapping_lshr_assign(shamt).unwrap(); + let expected = ApInt::from_u128(repr >> shamt); + assert_eq!(result, expected); + } + } + } + + #[test] + fn assign_small_fail() { + for mut apint in test_apints_w64() { + assert!(apint.wrapping_lshr_assign(64).is_err()) + } + } + + #[test] + fn assign_large_fail() { + for mut apint in test_apints_w128() { + assert!(apint.wrapping_lshr_assign(128).is_err()) + } + } + + #[test] + fn into_equivalent_small() { + for apint in test_apints_w64() { + for shamt in 0..64 { + let mut x = apint.clone(); + let y = apint.clone(); + x.wrapping_lshr_assign(shamt).unwrap(); + let y = y.into_wrapping_lshr(shamt).unwrap(); + assert_eq!(x, y); + } + } + } + + #[test] + fn into_equivalent_large() { + for apint in test_apints_w128() { + for shamt in 0..128 { + let mut x = apint.clone(); + let y = apint.clone(); + x.wrapping_lshr_assign(shamt).unwrap(); + let y = y.into_wrapping_lshr(shamt).unwrap(); + assert_eq!(x, y); + } + } + } + } + + mod ashr { + use super::*; + + #[test] + fn regression_stevia_01() { + let input = ApInt::from_i32(-8); + let expected = ApInt::from_u32(0x_FFFF_FFFE); + assert_eq!(input.into_wrapping_ashr(ShiftAmount::from(2)).unwrap(), expected); + } + + #[test] + fn assign_small_ok() { + for repr in test_reprs_w64() { + for shamt in 0..64 { + let mut result = ApInt::from_u64(repr); + result.wrapping_ashr_assign(shamt).unwrap(); + let expected = ApInt::from_i64((repr as i64) >> shamt); + assert_eq!(result, expected); + } + } + } + + #[test] + fn assign_large_ok() { + for repr in test_reprs_w128() { + for shamt in 0..128 { + let mut result = ApInt::from_u128(repr); + result.wrapping_ashr_assign(shamt).unwrap(); + let expected = ApInt::from_i128((repr as i128) >> shamt); + assert_eq!(result, expected); + } + } + } + + #[test] + fn assign_small_fail() { + for mut apint in test_apints_w64() { + assert!(apint.wrapping_ashr_assign(64).is_err()) + } + } + + #[test] + fn assign_large_fail() { + for mut apint in test_apints_w128() { + assert!(apint.wrapping_ashr_assign(128).is_err()) + } + } + + #[test] + fn into_equivalent_small() { + for apint in test_apints_w64() { + for shamt in 0..64 { + let mut x = apint.clone(); + let y = apint.clone(); + x.wrapping_ashr_assign(shamt).unwrap(); + let y = y.into_wrapping_ashr(shamt).unwrap(); + assert_eq!(x, y); + } + } + } + + #[test] + fn into_equivalent_large() { + for apint in test_apints_w128() { + for shamt in 0..128 { + let mut x = apint.clone(); + let y = apint.clone(); + x.wrapping_ashr_assign(shamt).unwrap(); + let y = y.into_wrapping_ashr(shamt).unwrap(); + assert_eq!(x, y); + } + } + } + } + + mod rotate { + use super::*; + use std::u128; + + #[test] + fn rotate_left() { + assert_eq!(ApInt::from(1u8).into_rotate_left(0).unwrap(),ApInt::from(1u8)); + assert_eq!(ApInt::from(123u8).into_rotate_left(7).unwrap(),ApInt::from(123u8.rotate_left(7))); + assert_eq!(ApInt::from(1u128).into_rotate_left(0).unwrap(),ApInt::from(1u128)); + assert_eq!(ApInt::from(1u128).into_rotate_left(1).unwrap(),ApInt::from(0b10u128)); + assert_eq!(ApInt::from(1u128).into_rotate_left(32).unwrap(),ApInt::from(0x1_0000_0000u128)); + assert_eq!(ApInt::from(1u128).into_rotate_left(64).unwrap(),ApInt::from(0x1_0000_0000_0000_0000u128)); + assert_eq!(ApInt::from(1u128).into_rotate_left(68).unwrap(),ApInt::from(0x10_0000_0000_0000_0000u128)); + assert_eq!(ApInt::from(1u128 << 126).into_rotate_left(33).unwrap(),ApInt::from(1u128 << 31)); + assert_eq!(ApInt::from(1u128 << 126).into_rotate_left(97).unwrap(),ApInt::from(1u128 << 95)); + assert_eq!(ApInt::from((1u128 << 2) + (1 << 126) + (1 << 66)).into_rotate_left(64).unwrap(),ApInt::from((1u128 << 66) + (1 << 62) + (1 << 2))); + assert_eq!(ApInt::from((1u128 << 2) + (1 << 126) + (1 << 66)).into_rotate_left(33).unwrap(),ApInt::from((1u128 << 35) + (1 << 31) + (1 << 99))); + assert_eq!(ApInt::from((1u128 << 2) + (1 << 126) + (1 << 66)).into_rotate_left(97).unwrap(),ApInt::from((1u128 << 99) + (1 << 95) + (1 << 35))); + assert_eq!(ApInt::from(u128::MAX - 1).into_rotate_left(68).unwrap(),ApInt::from(u128::MAX - (1 << 68))); + assert_eq!(ApInt::from([8u64,4,2]).into_rotate_left(127).unwrap(),ApInt::from([1u64,4,2])); + assert_eq!(ApInt::from([0u64,0,2]).into_zero_resize(129).into_rotate_left(127).unwrap(),ApInt::from([1u64,0,0]).into_zero_resize(129)); + assert_eq!(ApInt::from(1u128 << 70).into_zero_resize(127).into_rotate_left(70).unwrap(),ApInt::from(1u128 << 13).into_zero_resize(127)); + assert_eq!(ApInt::from(1u128 << 126).into_zero_resize(127).into_rotate_left(70).unwrap(),ApInt::from(1u128 << 69).into_zero_resize(127)); + assert_eq!(ApInt::from(1u128 << 121).into_zero_resize(127).into_rotate_left(70).unwrap(),ApInt::from(1u128 << 64).into_zero_resize(127)); + assert_eq!(ApInt::from(1u128).into_zero_resize(127).into_rotate_left(82).unwrap(),ApInt::from(1u128 << 82).into_zero_resize(127)); + assert_eq!(ApInt::from([1u64,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]).into_rotate_left(6*64).unwrap(),ApInt::from([7u64,8,9,10,11,12,13,14,15,16,1,2,3,4,5,6])); + } + } +} diff --git a/src/logic/traits.rs b/src/logic/traits.rs new file mode 100644 index 0000000..12b8ecd --- /dev/null +++ b/src/logic/traits.rs @@ -0,0 +1,153 @@ +use crate::data::ApInt; +use std::hash::{Hash, Hasher}; + +use std::ops::{ + Not, + Neg, + BitAnd, + BitOr, + BitXor, + Add, + Sub, + Mul, + BitAndAssign, + BitOrAssign, + BitXorAssign, + AddAssign, + SubAssign, + MulAssign +}; + +/// ============================================================================ +/// Standard `ops` trait implementations. +/// ---------------------------------------------------------------------------- +/// **Note:** These ops will panic if their corresponding functions return an +/// error. These ops all happen inplace and no cloning is happening internally, +/// but they can allocate memory if their corresponding function does. +/// +/// `ApInt` implements some `std::ops` traits for improved usability. +/// Only traits for operations that do not depend on the signedness +/// interpretation of the specific `ApInt` instance are actually implemented. +/// Operations like `div` and `rem` are not expected to have an +/// implementation since a favor in unsigned or signed cannot be decided. +/// +/// Also note that no traits have been implemented `for &'b ApInt` or +/// `for &'b mut ApInt`, because doing so involves cloning. This crate strives +/// for clearly exposing where expensive operations happen, so in this case we +/// favor the user side to be more explicit. +/// ============================================================================ + +// miscellanious ops + +impl Hash for ApInt { + fn hash(&self, state: &mut H) { + self.len.hash(state); + self.as_digit_slice().hash(state); + } +} + +// unary ops + +impl Not for ApInt { + type Output = ApInt; + + fn not(self) -> Self::Output { + self.into_bitnot() + } +} + +impl Neg for ApInt { + type Output = ApInt; + + fn neg(self) -> Self::Output { + self.into_wrapping_neg() + } +} + +// binary ops + +impl<'a> BitAnd<&'a ApInt> for ApInt { + type Output = ApInt; + + fn bitand(self, rhs: &'a ApInt) -> Self::Output { + self.into_bitand(rhs).unwrap() + } +} + +impl<'a> BitOr<&'a ApInt> for ApInt { + type Output = ApInt; + + fn bitor(self, rhs: &'a ApInt) -> Self::Output { + self.into_bitor(rhs).unwrap() + } +} + +impl<'a> BitXor<&'a ApInt> for ApInt { + type Output = ApInt; + + fn bitxor(self, rhs: &'a ApInt) -> Self::Output { + self.into_bitxor(rhs).unwrap() + } +} + +impl<'a> Add<&'a ApInt> for ApInt { + type Output = ApInt; + + fn add(self, rhs: &'a ApInt) -> Self::Output { + self.into_wrapping_add(rhs).unwrap() + } +} + +impl<'a> Sub<&'a ApInt> for ApInt { + type Output = ApInt; + + fn sub(self, rhs: &'a ApInt) -> Self::Output { + self.into_wrapping_sub(rhs).unwrap() + } +} + +impl<'a> Mul<&'a ApInt> for ApInt { + type Output = ApInt; + + fn mul(self, rhs: &'a ApInt) -> Self::Output { + self.into_wrapping_mul(rhs).unwrap() + } +} + +// assignment ops + +impl<'a> BitAndAssign<&'a ApInt> for ApInt { + fn bitand_assign(&mut self, rhs: &'a ApInt) { + self.bitand_assign(rhs).unwrap(); + } +} + +impl<'a> BitOrAssign<&'a ApInt> for ApInt { + fn bitor_assign(&mut self, rhs: &'a ApInt) { + self.bitor_assign(rhs).unwrap(); + } +} + +impl<'a> BitXorAssign<&'a ApInt> for ApInt { + fn bitxor_assign(&mut self, rhs: &'a ApInt) { + self.bitxor_assign(rhs).unwrap(); + } +} + +impl<'a> AddAssign<&'a ApInt> for ApInt { + fn add_assign(&mut self, rhs: &'a ApInt) { + self.wrapping_add_assign(rhs).unwrap() + } +} + +impl<'a> SubAssign<&'a ApInt> for ApInt { + fn sub_assign(&mut self, rhs: &'a ApInt) { + self.wrapping_sub_assign(rhs).unwrap() + } +} + +impl<'a> MulAssign<&'a ApInt> for ApInt { + fn mul_assign(&mut self, rhs: &'a ApInt) { + self.wrapping_mul_assign(rhs).unwrap(); + } +} \ No newline at end of file diff --git a/src/utils.rs b/src/logic/utils.rs similarity index 71% rename from src/utils.rs rename to src/logic/utils.rs index 4853a77..99e9706 100644 --- a/src/utils.rs +++ b/src/logic/utils.rs @@ -1,9 +1,9 @@ -use errors::Result; +use crate::info::Result; /// Consumes `entity` and forwards it to an inplace-mutating function. /// /// Returns the entity afterwards. -pub fn forward_mut_impl(entity: T, op: F) -> T +pub(crate) fn forward_mut_impl(entity: T, op: F) -> T where F: Fn(&mut T) -> () { @@ -15,7 +15,7 @@ where /// Consumes `entity` and forwards it to an inplace-mutating function. /// /// Returns the entity afterwards. -pub fn forward_bin_mut_impl(entity: L, rhs: R, op: F) -> L +pub(crate) fn forward_bin_mut_impl(entity: L, rhs: R, op: F) -> L where F: Fn(&mut L, R) -> () { @@ -27,11 +27,11 @@ where /// Consumes `entity` and forwards it to an inplace-mutating function. /// /// Returns the entity afterwards. -pub fn try_forward_bin_mut_impl(entity: L, rhs: R, op: F) -> Result +pub(crate) fn try_forward_bin_mut_impl(entity: L, rhs: R, op: F) -> Result where F: Fn(&mut L, R) -> Result<()> { let mut this = entity; op(&mut this, rhs)?; Ok(this) -} +} \ No newline at end of file