From 6691528554c50ba0381f18581e640877d3b74f37 Mon Sep 17 00:00:00 2001 From: Kate Cheney Date: Sat, 9 Jun 2018 13:09:18 -0700 Subject: [PATCH 1/4] I added tests to core/impl/tensor_impl.hpp --- include/tnt/core/impl/tensor_impl.hpp | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/include/tnt/core/impl/tensor_impl.hpp b/include/tnt/core/impl/tensor_impl.hpp index 8c5bcba..c077e62 100644 --- a/include/tnt/core/impl/tensor_impl.hpp +++ b/include/tnt/core/impl/tensor_impl.hpp @@ -26,6 +26,13 @@ namespace tnt template inline Tensor::Tensor() noexcept {} +TEST_CASE_TEMPLATE("Tensor()", T, test_data_types) +{ + Tensor t1; + REQUIRE(t1.data == AlignedPtr()); + REQUIRE(t1.shape == Shape()); +} + template inline Tensor::Tensor(const Shape& shape) { @@ -33,6 +40,15 @@ inline Tensor::Tensor(const Shape& shape) this->data = AlignedPtr(shape.total()); } +TEST_CASE_TEMPLATE("Tensor(Shape&)", T, test_data_types) +{ + Shape s1; + Tensor t1(s1); + + REQUIRE(t1.shape == s1); + REQUIRE(t1.data == AlignedPtr(s1.total())); +} + // Shape + Data template inline Tensor::Tensor(const Shape& shape, const PtrType& data) @@ -41,6 +57,16 @@ inline Tensor::Tensor(const Shape& shape, const PtrType& data) this->data = data; } +TEST_CASE_TEMPLATE("Tensor(Shape&, PtrType&)", T, test_data_types) +{ + Shape s1; + AlignedPtr ptr1; + Tensor t1(s1, ptr1); + + REQUIRE(t1.shape == s1); + REQUIRE(t1.data == ptr1); +} + // Shape + Value template inline Tensor::Tensor(const Shape& shape, const DataType& value) From 234bd22818f8edf7ad0f00da72c9e9d951a01ce9 Mon Sep 17 00:00:00 2001 From: Kate Cheney Date: Tue, 12 Jun 2018 19:23:25 -0700 Subject: [PATCH 2/4] I added tests to the rest of constructors and iterators --- include/tnt/core/impl/tensor_impl.hpp | 168 +++++++++++++++++++++++++- 1 file changed, 166 insertions(+), 2 deletions(-) diff --git a/include/tnt/core/impl/tensor_impl.hpp b/include/tnt/core/impl/tensor_impl.hpp index c077e62..283be46 100644 --- a/include/tnt/core/impl/tensor_impl.hpp +++ b/include/tnt/core/impl/tensor_impl.hpp @@ -42,11 +42,27 @@ inline Tensor::Tensor(const Shape& shape) TEST_CASE_TEMPLATE("Tensor(Shape&)", T, test_data_types) { + /* generic Shape constructor test */ Shape s1; Tensor t1(s1); REQUIRE(t1.shape == s1); - REQUIRE(t1.data == AlignedPtr(s1.total())); + REQUIRE(t1.data.size == s1.total()); + + /* Shape given initializer_list */ + Shape s2{0, 1, 2}; + Tensor t2(s2); + + REQUIRE(t2.shape == s2); + REQUIRE(t2.data.size == s2.total()); + + /* Shape given vector */ + Shape s3(std::vector{1, 2, 3, 4}); + Tensor t3(s3); + + REQUIRE(t3.shape == s3); + REQUIRE(t3.data.size == s3.total()); + } // Shape + Data @@ -59,12 +75,30 @@ inline Tensor::Tensor(const Shape& shape, const PtrType& data) TEST_CASE_TEMPLATE("Tensor(Shape&, PtrType&)", T, test_data_types) { + /* Generic Shape, only testing Aligned_ptr + * + * Ptr with no parameters + */ + Shape s1; AlignedPtr ptr1; Tensor t1(s1, ptr1); - REQUIRE(t1.shape == s1); REQUIRE(t1.data == ptr1); + + /* ptr with size parameter only */ + AlignedPtr ptr2(10); + Tensor t2(s1, ptr2); + REQUIRE(t2.data.size == 10); + + /* ptr with size and value parameters */ + T data[5] = {1, 2, 3, 4, 5}; + AlignedPtr ptr3(data, 5); + Tensor t3(s1, ptr3); + REQUIRE(t3.data.size == 5); + REQUIRE(t3.data[2] == 3); + REQUIRE(t3.data[4] == 5); + } // Shape + Value @@ -77,6 +111,17 @@ inline Tensor::Tensor(const Shape& shape, const DataType& value) this->operator=(value); // Set to value } +TEST_CASE_TEMPLATE("Tensor(const Shape&, const DataType&)", T, test_data_types) +{ + Shape s1{2, 2}; + Tensor t1(s1, (T) 4); + AlignedPtr test = t1.data; + + for (int i = 0; i < 4; i++) { + REQUIRE(test[i] == 4); + } +} + template inline Tensor::Tensor(const SelfType& other) { @@ -84,6 +129,18 @@ inline Tensor::Tensor(const SelfType& other) this->data = other.data; } +TEST_CASE_TEMPLATE("Tensor(const SelfType&)", T, test_data_types) +{ + Shape s1{2, 2}; + T data[4] = {1, 2, 3, 4}; + AlignedPtr ptr(data, 4); + const Tensor t1(s1, ptr); + Tensor t2(t1); + + REQUIRE(t2.shape == t1.shape); + REQUIRE(t2.data == t1.data); +} + template inline Tensor& Tensor::operator=(const SelfType& other) { @@ -93,6 +150,18 @@ inline Tensor& Tensor::operator=(const SelfType& other) return *this; } +TEST_CASE_TEMPLATE("Tensor::operator=(const SelfType&)", T, test_data_types) +{ + Shape s1{2, 2}; + T data[4] = {1, 2, 3, 4}; + AlignedPtr ptr(data, 4); + Tensor t1(s1, ptr); + Tensor t2 = t1; + + REQUIRE(t2.shape == t1.shape); + REQUIRE(t2.data == t1.data); +} + template inline Tensor::Tensor(SelfType&& other) noexcept { @@ -104,6 +173,21 @@ inline Tensor::Tensor(SelfType&& other) noexcept other.data = PtrType(); } +TEST_CASE_TEMPLATE("Tensor(const SelfType&&)", T, test_data_types) +{ + Shape s1{2, 2}; + T data[4] = {1, 2, 3, 4}; + AlignedPtr ptr(data, 4); + Tensor t1(s1, ptr); + Tensor t2(std::move(t1)); + + REQUIRE(t2.shape == s1); + REQUIRE(t2.data == ptr); + REQUIRE(t1.shape == Shape()); + REQUIRE(t1.data == AlignedPtr()); + +} + template inline Tensor& Tensor::operator=(SelfType&& other) noexcept { @@ -117,6 +201,20 @@ inline Tensor& Tensor::operator=(SelfType&& other) noexcept return *this; } +TEST_CASE_TEMPLATE("Tensor::operator=(const SelfType&&)", T, test_data_types) +{ + Shape s1{2, 2}; + T data[4] = {1, 2, 3, 4}; + AlignedPtr ptr(data, 4); + Tensor t1(s1, ptr); + Tensor t2 = std::move(t1); + + REQUIRE(t2.shape == s1); + REQUIRE(t2.data == ptr); + REQUIRE(t1.shape == Shape()); + REQUIRE(t1.data == AlignedPtr()); +} + // ---------------------------------------------------------------------------- // Iterators @@ -126,24 +224,90 @@ inline typename Tensor::IteratorType Tensor::begin() const n return this->data.data; } +TEST_CASE_TEMPLATE("Tensor::begin()", T, test_data_types) +{ + /* int */ + Shape s1{2, 2}; + T data[4] = {(T) 1, (T) 2, (T) 3, (T) 4}; + AlignedPtr ptr(data, 4); + Tensor t1(s1, ptr); + + REQUIRE(*t1.begin() == 1); + + data[0] = (T) 5; + AlignedPtr ptr1(data, 4); + Tensor t2(s1, ptr1); + REQUIRE(*t2.begin() == 5); + +} + template inline typename Tensor::IteratorType Tensor::end() const noexcept { return this->data.is_null() ? nullptr : this->data.data + this->shape.total(); } +TEST_CASE_TEMPLATE("Tensor::end()", T, test_data_types) +{ + Shape s1{2, 2}; + T data[4] = {(T) 1, (T) 2, (T) 3, (T) 4}; + AlignedPtr ptr(data, 4); + Tensor t1(s1, ptr); + T* it = t1.begin(); + + int cnt = 1; + while (*it != *t1.end()) { + REQUIRE(*it == (T) cnt); + it++; + cnt++; + } +} + template inline typename Tensor::ConstIteratorType Tensor::cbegin() const noexcept { return this->data.data; } +TEST_CASE_TEMPLATE("Tensor::cbegin()", T, test_data_types) +{ + /* int */ + Shape s1{2, 2}; + T data[4] = {(T) 1, (T) 2, (T) 3, (T) 4}; + AlignedPtr ptr(data, 4); + const Tensor t1(s1, ptr); + + REQUIRE(*t1.begin() == 1); + + data[0] = (T) 5; + AlignedPtr ptr1(data, 4); + const Tensor t2(s1, ptr1); + REQUIRE(*t2.begin() == 5); + +} + template inline typename Tensor::ConstIteratorType Tensor::cend() const noexcept { return this->data.is_null() ? nullptr : this->data.data + this->shape.total(); } +TEST_CASE_TEMPLATE("Tensor::cend()", T, test_data_types) +{ + Shape s1{2, 2}; + T data[4] = {(T) 1, (T) 2, (T) 3, (T) 4}; + AlignedPtr ptr(data, 4); + const Tensor t1(s1, ptr); + T* it = t1.begin(); + + int cnt = 1; + while (*it != *t1.end()) { + REQUIRE(*it == (T) cnt); + it++; + cnt++; + } +} + // ---------------------------------------------------------------------------- // Operators From 2fba95e8a6c8120058029d11d3bf37463fd07f67 Mon Sep 17 00:00:00 2001 From: Kate Cheney Date: Fri, 22 Jun 2018 19:38:39 -0700 Subject: [PATCH 3/4] added new function for gaussian elimination given a coefficient matrix --- include/tnt/linear/Gaussian_elimination.hpp | 41 ++++ .../linear/impl/Gaussian_elimination_impl.hpp | 176 ++++++++++++++++++ include/tnt/linear/linear.hpp | 1 + 3 files changed, 218 insertions(+) create mode 100644 include/tnt/linear/Gaussian_elimination.hpp create mode 100644 include/tnt/linear/impl/Gaussian_elimination_impl.hpp diff --git a/include/tnt/linear/Gaussian_elimination.hpp b/include/tnt/linear/Gaussian_elimination.hpp new file mode 100644 index 0000000..fc7dafa --- /dev/null +++ b/include/tnt/linear/Gaussian_elimination.hpp @@ -0,0 +1,41 @@ +#ifndef TNT_GAUSSIAN_ELIMINATION_HPP +#define TNT_GAUSSIAN_ELIMINATION_HPP + +#include +#include + +namespace tnt +{ + +namespace detail +{ + + template + struct OptimizedGaussianElimination + { + static Tensor eval(const Tensor&); + }; + +} // namespace detail + +/// \brief Executes Gaussian Elimination of a Matrix +/// +/// \param T1 The tensor. It has size `MxN` +/// \returns A tensor in Reduced Row Echelon Form +/// \requires T1 shall be two dimensional + +template +inline Tensor gaussian_elim(Tensor& T1) +{ + static_assert(std::is_floating_point::value, + "eigenvalues() requires signed data"); + + TNT_ASSERT(T1.shape.num_axes() == 2, InvalidParameterException("tnt::gaussian_elim()", + __FILE__, __LINE__, "Gaussian elimination requires 2D tensors")) + +return detail::OptimizedGaussianElimination::eval(T1); +} + +} // namespace tnt + +#endif // TNT_GAUSSIAN_ELIMINATION_HPP \ No newline at end of file diff --git a/include/tnt/linear/impl/Gaussian_elimination_impl.hpp b/include/tnt/linear/impl/Gaussian_elimination_impl.hpp new file mode 100644 index 0000000..5d7cf0b --- /dev/null +++ b/include/tnt/linear/impl/Gaussian_elimination_impl.hpp @@ -0,0 +1,176 @@ +#ifndef TNT_GAUSSIAN_ELIMINATION_IMPL_HPP +#define TNT_GAUSSIAN_ELIMINATION_IMPL_HPP + +#include +#include +#include + +namespace tnt { + + namespace detail { + + template + + struct OptimizedGaussianElimination { + + static Tensor eval(Tensor &tensor) { + + DataType *ptr = tensor.data.data; + int num_rows = tensor.shape.axes[0]; + int num_cols = tensor.shape.axes[1]; + + for (int i = 0; i < num_rows; i++) { + int lead = num_cols * i; + while (ptr[lead] == 0 && lead < (i*num_cols + num_rows)) { + lead++; + } + + if (ptr[lead] == 1) { + clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols); + } else if (lead != 0) { + divideRow(ptr, ptr[lead], i, num_cols); + clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols); + } + } + naturalOrder(ptr, num_cols, num_rows); + tensor.data = AlignedPtr(ptr, tensor.shape.total()); + + return tensor; + } + + private: + + static int findLead(DataType* data, int num_cols, int row, int num_rows) { + int lead_index = num_cols * row; + while (data[lead_index] == 0 && lead_index < (row * num_cols + num_rows)) { + lead_index++; + } + return lead_index - (row * num_cols); + } + + static void switchRows(DataType* data, int row1, int row2, int num_cols) { + std::cout << "switching rows " << row1 << " & " << row2 << std::endl; + for (int i = 0; i < num_cols; i++) { + int temp = data[row1 * num_cols + i]; + data[row1 * num_cols + i] = data[row2 * num_cols + i]; + data[row2 * num_cols + i] = temp; + } + + } + + static void naturalOrder(DataType* data, int num_cols, int num_rows) + { + std::cout << "tensor is: " << std::endl; + for (int i = 0; i < num_rows; i++) { + for (int j = 0; j < num_cols; j++) { + std::cout << data[i*num_cols + j]; + } + std::cout << "\n"; + } + + for (int i = 1; i < num_rows; i++) { + int j = i; + while (j >= 1 && (findLead(data, num_cols, j, num_rows) < findLead(data, num_cols, j - 1, num_rows))) { + switchRows(data, j, j - 1, num_cols); + j = j - 1; + } + } + } + + static void divideRow(DataType *data, DataType lead, int row, int num_cols) + { + for (int i = 0; i < num_cols; i++) { + if (data[(num_cols * row) + i] != 0) + data[(num_cols * row) + i] = data[(num_cols * row) + i] / lead; + } + } + + static void subtractRow(DataType *data, int row, DataType val, int num_cols, int base) + { + for (int i = 0; i < num_cols; i++) { + data[(num_cols * row) + i] = data[(num_cols * row) + i] - (val * (data[(num_cols * base) + i])); + } + } + + static void clearColumn(DataType *data, int row, int leading_index, int num_rows, int num_cols) + { + for (int i = 0; i < num_rows; i++) { + if (i != row) { + DataType match = data[(num_cols * i) + leading_index]; + if (match != 0) { + subtractRow(data, i, match, num_cols, row); + } + } + } + } + }; + + } // namespace detail + +} // namespace tnt + +using multiply_data_types = doctest::Types; + +TEST_CASE_TEMPLATE("gaussian_elim(const Tensor&)", T, multiply_data_types) +{ + T data[9] = {(T) 1, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 1}; + tnt::AlignedPtr ptr(data, 9); + tnt::Tensor t1(tnt::Shape{3, 3}, ptr); + T rref[9] = {(T) 1, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 1}; + tnt::AlignedPtr ptr1(rref, 9); + tnt::Tensor t2(tnt::Shape{3, 3}, ptr1); + + REQUIRE(gaussian_elim(t1) == t2); + + T data1[9] = {(T) 1, (T) 2, (T) 3, (T) 2, (T) 0, (T) 1, (T) 1, (T) 0, (T) 1}; + tnt::AlignedPtr ptr2(data1, 9); + tnt::Tensor t3(tnt::Shape{3, 3}, ptr2); + + REQUIRE(gaussian_elim(t3) == t2); + + T data3[9] = {(T) 1, (T) 0, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 1, (T) 0}; + tnt::AlignedPtr ptr6(data3, 9); + tnt::Tensor t4(tnt::Shape{3, 3}, ptr6); + + REQUIRE(gaussian_elim(t4) == t2); + + T data2[9] = {(T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0}; + tnt::AlignedPtr ptr4(data2, 9); + tnt::Tensor t5(tnt::Shape{3, 3}, ptr4); + T rref2[9] = {(T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0}; + tnt::AlignedPtr ptr5(rref2, 9); + tnt::Tensor t6(tnt::Shape{3, 3}, ptr5); + + REQUIRE(gaussian_elim(t5) == t6); + + T data5[9] = {(T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0}; + tnt::AlignedPtr ptr7(data5, 9); + tnt::Tensor t9(tnt::Shape{3, 3}, ptr7); + T rref3[9] = {(T) 1, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 0}; + tnt::AlignedPtr ptr8(rref3, 9); + tnt::Tensor t10(tnt::Shape{3, 3}, ptr8); + + REQUIRE(gaussian_elim(t9) == t10); + + T data11[9] = {(T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0}; + tnt::AlignedPtr ptr11(data11, 9); + tnt::Tensor t11(tnt::Shape{3, 3}, ptr11); + T rref11[9] = {(T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0, (T) 0}; + tnt::AlignedPtr ptr12(rref11, 9); + tnt::Tensor t12(tnt::Shape{3, 3}, ptr12); + + REQUIRE(gaussian_elim(t11) == t12); + + T d[6] = {(T) 1, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0}; + tnt::AlignedPtr p(d, 6); + tnt::Tensor tt(tnt::Shape{3, 2}, p); + T r[6] = {(T) 1, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0}; + tnt::AlignedPtr p1(r, 6); + tnt::Tensor tt1(tnt::Shape{3, 2}, p1); + + REQUIRE(gaussian_elim(tt) == tt1); + +} + + +#endif // TNT_GAUSSIAN_ELIMINATION_IMPL_HPP \ No newline at end of file diff --git a/include/tnt/linear/linear.hpp b/include/tnt/linear/linear.hpp index f792b67..cbf871b 100644 --- a/include/tnt/linear/linear.hpp +++ b/include/tnt/linear/linear.hpp @@ -4,6 +4,7 @@ #include #include #include +#include //#include #endif // TNT_LINEAR_HPP From 027c1eaab77bccc12e7310d0316a0d80d4a730ea Mon Sep 17 00:00:00 2001 From: Kate Cheney Date: Wed, 4 Jul 2018 10:54:39 -0700 Subject: [PATCH 4/4] added ability to compute augmented matrices for G_E. Created orthogonal projection function that returns vector projected onto line L spanned by vector s. --- cmake/headers.cmake | 3 +- include/tnt/core/impl/tensor_impl.hpp | 31 +++++ include/tnt/linear/Gaussian_elimination.hpp | 16 +++ .../linear/impl/Gaussian_elimination_impl.hpp | 115 ++++++++++++++---- .../impl/orthogonal_projection_impl.hpp | 80 ++++++++++++ include/tnt/linear/linear.hpp | 1 + include/tnt/linear/orthogonal_projection.hpp | 46 +++++++ 7 files changed, 270 insertions(+), 22 deletions(-) create mode 100644 include/tnt/linear/impl/orthogonal_projection_impl.hpp create mode 100644 include/tnt/linear/orthogonal_projection.hpp diff --git a/cmake/headers.cmake b/cmake/headers.cmake index 281bf3b..d4d0419 100644 --- a/cmake/headers.cmake +++ b/cmake/headers.cmake @@ -54,4 +54,5 @@ set(TNT_HEADERS include/tnt/tnt.hpp include/tnt/linear/impl/convolution_3d_impl.hpp include/tnt/linear/discrete_cosine_transform.hpp include/tnt/linear/impl/discrete_cosine_transform_impl.hpp -) + ) + diff --git a/include/tnt/core/impl/tensor_impl.hpp b/include/tnt/core/impl/tensor_impl.hpp index 283be46..c0c5888 100644 --- a/include/tnt/core/impl/tensor_impl.hpp +++ b/include/tnt/core/impl/tensor_impl.hpp @@ -318,6 +318,17 @@ inline bool Tensor::operator== (const SelfType& other) const noexcept && this->data == other.data; } +TEST_CASE_TEMPLATE("Tensor::operator== (const SelfType&", T, test_data_types) +{ + Shape s1{2, 2}; + T data[3] = {1, 2, 3}; + AlignedPtr ptr(data, 3); + Tensor t1(s1, ptr); + Tensor t2(s1, ptr); + REQUIRE(t1 == t2); + +} + template inline bool Tensor::operator!= (const SelfType& other) const noexcept { @@ -325,6 +336,18 @@ inline bool Tensor::operator!= (const SelfType& other) const noexcept || this->data != other.data; } +TEST_CASE_TEMPLATE("Tensor::operator== (const SelfType&", T, test_data_types) +{ + Shape s1{2, 2}; + T data[3] = {1, 2, 3}; + T data1[3] = {3, 2, 1}; + AlignedPtr ptr(data, 3); + AlignedPtr ptr1(data1, 3); + Tensor t1(s1, ptr); + Tensor t2(s1, ptr1); + REQUIRE(t1 != t2); +} + template inline Tensor::operator DataType() { @@ -337,6 +360,14 @@ inline Tensor::operator DataType() return this->data[0]; } +TEST_CASE_TEMPLATE("Tensor::operator DataType()", T, test_data_types) +{ + Shape s1{2, 2}; + T data[3] = {1, 2, 3}; + AlignedPtr ptr(data, 3); + Tensor t1(s1, ptr); +} + template inline Tensor::operator TensorView() { diff --git a/include/tnt/linear/Gaussian_elimination.hpp b/include/tnt/linear/Gaussian_elimination.hpp index fc7dafa..8d27e98 100644 --- a/include/tnt/linear/Gaussian_elimination.hpp +++ b/include/tnt/linear/Gaussian_elimination.hpp @@ -14,6 +14,7 @@ namespace detail struct OptimizedGaussianElimination { static Tensor eval(const Tensor&); + static Tensor eval(const Tensor&, const Tensor&); }; } // namespace detail @@ -36,6 +37,21 @@ inline Tensor gaussian_elim(Tensor& T1) return detail::OptimizedGaussianElimination::eval(T1); } +template +inline Tensor gaussian_elim(Tensor& T1, Tensor& V1) +{ + static_assert(std::is_floating_point::value, + "eigenvalues() requires signed data"); + + TNT_ASSERT(T1.shape.num_axes() == 2, InvalidParameterException("tnt::gaussian_elim()", + __FILE__, __LINE__, "Gaussian elimination requires 2D tensors")) + + TNT_ASSERT(V1.shape.axes[1] == 1, InvalidParameterException("tnt::gaussian_elim()", + __FILE__, __LINE__, "Solution vector requires vector")); + + return detail::OptimizedGaussianElimination::eval(T1, V1); +} + } // namespace tnt #endif // TNT_GAUSSIAN_ELIMINATION_HPP \ No newline at end of file diff --git a/include/tnt/linear/impl/Gaussian_elimination_impl.hpp b/include/tnt/linear/impl/Gaussian_elimination_impl.hpp index 5d7cf0b..1c7fdf5 100644 --- a/include/tnt/linear/impl/Gaussian_elimination_impl.hpp +++ b/include/tnt/linear/impl/Gaussian_elimination_impl.hpp @@ -13,6 +13,35 @@ namespace tnt { struct OptimizedGaussianElimination { + /* returns vector solution to G_E of tensor. Must be of same data type */ + static Tensor eval(Tensor &tensor, Tensor &vector) { + + DataType *ptr = tensor.data.data; + DataType *vct = vector.data.data; + int num_rows = tensor.shape.axes[0]; + int num_cols = tensor.shape.axes[1]; + + for (int i = 0; i < num_rows; i++) { + int lead = num_cols * i; + while (ptr[lead] == 0 && lead < (i*num_cols + num_rows)) { + lead++; + } + + if (ptr[lead] == 1) { + clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols, vct); + } else if (lead != 0) { + divideRow(ptr, ptr[lead], i, num_cols); + divideVector(ptr, vector, i); + clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols, vct); + } + } + + naturalOrder(ptr, num_cols, num_rows, vct); + vector.data = AlignedPtr(vct, vector.shape.total()); + + return vector; + } + static Tensor eval(Tensor &tensor) { DataType *ptr = tensor.data.data; @@ -26,13 +55,13 @@ namespace tnt { } if (ptr[lead] == 1) { - clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols); + clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols, nullptr); } else if (lead != 0) { divideRow(ptr, ptr[lead], i, num_cols); - clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols); + clearColumn(ptr, i, (lead - (num_cols * i)), num_rows, num_cols, nullptr); } } - naturalOrder(ptr, num_cols, num_rows); + naturalOrder(ptr, num_cols, num_rows, nullptr); tensor.data = AlignedPtr(ptr, tensor.shape.total()); return tensor; @@ -40,7 +69,13 @@ namespace tnt { private: - static int findLead(DataType* data, int num_cols, int row, int num_rows) { + static void divideVector(DataType* vct, int row, DataType value) + { + vct[row] = vct[row] - value; + } + + static int findLead(DataType* data, int num_cols, int row, int num_rows) + { int lead_index = num_cols * row; while (data[lead_index] == 0 && lead_index < (row * num_cols + num_rows)) { lead_index++; @@ -48,30 +83,27 @@ namespace tnt { return lead_index - (row * num_cols); } - static void switchRows(DataType* data, int row1, int row2, int num_cols) { - std::cout << "switching rows " << row1 << " & " << row2 << std::endl; + static void switchRows(DataType* data, int row1, int row2, int num_cols, DataType* vct) + { for (int i = 0; i < num_cols; i++) { int temp = data[row1 * num_cols + i]; data[row1 * num_cols + i] = data[row2 * num_cols + i]; data[row2 * num_cols + i] = temp; } + if (vct != nullptr) { + int temp = vct[row1]; + vct[row1] = vct[row2]; + vct[row2] = temp; + } } - static void naturalOrder(DataType* data, int num_cols, int num_rows) + static void naturalOrder(DataType* data, int num_cols, int num_rows, DataType *vct) { - std::cout << "tensor is: " << std::endl; - for (int i = 0; i < num_rows; i++) { - for (int j = 0; j < num_cols; j++) { - std::cout << data[i*num_cols + j]; - } - std::cout << "\n"; - } - for (int i = 1; i < num_rows; i++) { int j = i; while (j >= 1 && (findLead(data, num_cols, j, num_rows) < findLead(data, num_cols, j - 1, num_rows))) { - switchRows(data, j, j - 1, num_cols); + switchRows(data, j, j - 1, num_cols, vct); j = j - 1; } } @@ -85,20 +117,21 @@ namespace tnt { } } - static void subtractRow(DataType *data, int row, DataType val, int num_cols, int base) + static void subtractRow(DataType *data, int row, DataType val, int num_cols, int base, DataType *vct) { for (int i = 0; i < num_cols; i++) { data[(num_cols * row) + i] = data[(num_cols * row) + i] - (val * (data[(num_cols * base) + i])); } + if (vct != nullptr) vct[row] = vct[row] - (val * (vct[base])); } - static void clearColumn(DataType *data, int row, int leading_index, int num_rows, int num_cols) + static void clearColumn(DataType *data, int row, int leading_index, int num_rows, int num_cols, DataType *vct) { for (int i = 0; i < num_rows; i++) { if (i != row) { DataType match = data[(num_cols * i) + leading_index]; if (match != 0) { - subtractRow(data, i, match, num_cols, row); + subtractRow(data, i, match, num_cols, row, vct); } } } @@ -109,9 +142,9 @@ namespace tnt { } // namespace tnt -using multiply_data_types = doctest::Types; +using elim_data_types = doctest::Types; -TEST_CASE_TEMPLATE("gaussian_elim(const Tensor&)", T, multiply_data_types) +TEST_CASE_TEMPLATE("gaussian_elim(const Tensor&)", T, elim_data_types) { T data[9] = {(T) 1, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 1}; tnt::AlignedPtr ptr(data, 9); @@ -172,5 +205,45 @@ TEST_CASE_TEMPLATE("gaussian_elim(const Tensor&)", T, multiply_data_types) } +TEST_CASE_TEMPLATE("gaussian_elim(const Tensor&, const Tensor&)", T, elim_data_types) +{ + T data[9] = {(T) 1, (T) 0, (T) 0, (T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 1}; + tnt::AlignedPtr ptr(data, 9); + tnt::Tensor t1(tnt::Shape{3, 3}, ptr); + + T sol[3] = {(T) 5, (T) 6, (T) 7}; + tnt::AlignedPtr vct(sol, 3); + tnt::Tensor t2(tnt::Shape{3, 1}, vct); + + tnt::Tensor t3(tnt::Shape{3, 1}, vct); + + REQUIRE(gaussian_elim(t1, t2) == t3); + + T data1[9] = {(T) 1, (T) 0, (T) 0, (T) 1, (T) 1, (T) 0, (T) 0, (T) 0, (T) 1}; + tnt::AlignedPtr ptr1(data1, 9); + tnt::Tensor t4(tnt::Shape{3, 3}, ptr1); + T sol1[3] = {(T) 5, (T) 1, (T) 7}; + tnt::AlignedPtr vct1(sol1, 3); + tnt::Tensor t5(tnt::Shape{3, 1}, vct1); + + REQUIRE(gaussian_elim(t4, t2) == t5); +} + +TEST_CASE_TEMPLATE("gaussian_elim(const Tensor&, const Tensor&)", T, elim_data_types) +{ + T data[9] = {(T) 0, (T) 1, (T) 0, (T) 0, (T) 0, (T) 1, (T) 1, (T) 0, (T) 0}; + tnt::AlignedPtr ptr(data, 9); + tnt::Tensor t1(tnt::Shape{3, 3}, ptr); + + T sol[3] = {(T) 5, (T) 6, (T) 7}; + tnt::AlignedPtr vct(sol, 3); + tnt::Tensor t2(tnt::Shape{3, 1}, vct); + + T sol1[3] = {(T) 7, (T) 5, (T) 6}; + tnt::AlignedPtr vct1(sol1, 3); + tnt::Tensor t3(tnt::Shape{3, 1}, vct1); + + REQUIRE(gaussian_elim(t1, t2) == t3); +} #endif // TNT_GAUSSIAN_ELIMINATION_IMPL_HPP \ No newline at end of file diff --git a/include/tnt/linear/impl/orthogonal_projection_impl.hpp b/include/tnt/linear/impl/orthogonal_projection_impl.hpp new file mode 100644 index 0000000..302503d --- /dev/null +++ b/include/tnt/linear/impl/orthogonal_projection_impl.hpp @@ -0,0 +1,80 @@ +#ifndef TNT_ORTHOGONAL_PROJECTION_IMPL_HPP +#define TNT_ORTHOGONAL_PROJECTION_IMPL_HPP + +#include +#include +#include + +#define VECTOR_DIM 1 +#define ROW_INDEX 0 + +namespace tnt { + + namespace detail { + + template + + struct OptimizedOrthogonalProjection { + + static Tensor eval(Tensor &V1, Tensor &V2) { + DataType* v = V1.data.data; + DataType* s = V2.data.data; + int num_rows = V1.shape.axes[ROW_INDEX]; + + scalar_multiply(s, (dotProduct(v, s, num_rows) / dotProduct(s, s, num_rows)), num_rows); + + tnt::AlignedPtr ptr(s, num_rows); + tnt::Tensor proj(tnt::Shape{num_rows, VECTOR_DIM}, ptr); + + return proj; + } + + private: + + static void scalar_multiply(DataType *data, DataType scalar, int num_rows) { + for (int i = 0; i < num_rows; i++) { data[i] = data[i] * scalar; } + } + + static float dotProduct(DataType *V1, DataType* V2, int num_rows) { + float product = 0; + for (int i = 0; i < num_rows; i++) { product += V1[i] * V2[i]; } + return product; + } + + }; + + } // namespace detail + +} // namespace tnt + +using projection_data_types = doctest::Types; + +TEST_CASE_TEMPLATE("orthogonal_projection(const Tensor&, const Tensor&)", T, projection_data_types) { + + T v1Data[2] = {(T) 1, (T) 0}; + T v2Data[2] = {(T) 2, (T) 3}; + T v3Data[2] = {(T) 2, (T) 0}; + tnt::Tensor v1(tnt::Shape{2, 1}, tnt::AlignedPtr(v1Data, 2)); + tnt::Tensor v2(tnt::Shape{2, 1}, tnt::AlignedPtr(v2Data, 2)); + tnt::Tensor v3(tnt::Shape{2, 1}, tnt::AlignedPtr(v3Data, 2)); + + REQUIRE(project(v2, v1) == v3); +} + +TEST_CASE_TEMPLATE("orthogonal_projection(const Tensor&, const Tensor&)", T, projection_data_types) { + + T v1Data[3] = {(T) 1, (T) 2, (T) 0}; + T v2Data[3] = {(T) 1, (T) 1, (T) 2}; + T v3Data[3] = {(T) 1/2, (T) 1/2, (T) 1}; + T v4Data[3] = {(T) 3/5, (T) 6/5, (T) 0}; + tnt::Tensor v1(tnt::Shape{3, 1}, tnt::AlignedPtr(v1Data, 3)); + tnt::Tensor v2(tnt::Shape{3, 1}, tnt::AlignedPtr(v2Data, 3)); + tnt::Tensor v3(tnt::Shape{3, 1}, tnt::AlignedPtr(v3Data, 3)); + tnt::Tensor v4(tnt::Shape{3, 1}, tnt::AlignedPtr(v4Data, 3)); + + REQUIRE(project(v1, v2) == v3); + REQUIRE(project(v2, v1) == v4); + +} + +#endif // TNT_ORTHOGONAL_PROJECTION_IMPL_HPP diff --git a/include/tnt/linear/linear.hpp b/include/tnt/linear/linear.hpp index cbf871b..4a6938d 100644 --- a/include/tnt/linear/linear.hpp +++ b/include/tnt/linear/linear.hpp @@ -5,6 +5,7 @@ #include #include #include +#include //#include #endif // TNT_LINEAR_HPP diff --git a/include/tnt/linear/orthogonal_projection.hpp b/include/tnt/linear/orthogonal_projection.hpp new file mode 100644 index 0000000..cd43d13 --- /dev/null +++ b/include/tnt/linear/orthogonal_projection.hpp @@ -0,0 +1,46 @@ +#ifndef TNT_ORTHOGONAL_PROJECTION_HPP +#define TNT_ORTHOGONAL_PROJECTION_HPP + +#include +#include + +namespace tnt +{ + + namespace detail + { + + template + struct OptimizedOrthogonalProjection + { + static Tensor eval(const Tensor&, const Tensor&); + }; + + } // namespace detail + +/// \brief Executes Orthogonal Projection of a Vector onto a line spanned by another vector +/// +/// \param V1 The vector to be projected. It has size `Nx1' +/// \param V2 The vector which spans the line to be projeted upon +/// \returns The orthogonal projection of V1 +/// \requires V1 and V2 shall be one dimensional + + template + inline Tensor project(Tensor& V1, Tensor V2) + { + + TNT_ASSERT(V2.shape.axes[1] == 1, InvalidParameterException("tnt::project()", + __FILE__, __LINE__, "projection requires a vector")); + + TNT_ASSERT(V1.shape.axes[1] == 1, InvalidParameterException("tnt::project()", + __FILE__, __LINE__, "projection requires a vector")); + + TNT_ASSERT(V2.shape.axes[0] == V1.shape.axes[0], InvalidParameterException("tnt::project()", + __FILE__, __LINE__, "vectors must have the same dimensions")); + + return detail::OptimizedOrthogonalProjection::eval(V1, V2); + } + +} // namespace tnt + +#endif // TNT_ORTHOGONAL_PROJECTION_HPP \ No newline at end of file