Skip to content

Commit

Permalink
fix: adjust tests and interfaces for additional namespace ttv
Browse files Browse the repository at this point in the history
  • Loading branch information
bassoy committed Nov 1, 2024
1 parent ca8b032 commit 81012ec
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 100 deletions.
7 changes: 4 additions & 3 deletions example/interface2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,12 @@ g++ -I../include/ -std=c++17 -Ofast -fopenmp interface2.cpp -o interface2 && ./i
#include <numeric>
#include <iostream>

using namespace tlib::ttv;

int main()
{
using value_t = float;
using tensor_t = tlib::ttv::tensor<value_t>; // or std::array<value_t,N>
using tensor_t = tensor<value_t>; // or std::array<value_t,N>
using iterator_t = std::ostream_iterator<value_t>;

auto mode = 2ul;
Expand All @@ -39,8 +40,8 @@ int main()
*/

// correct shape, layout and strides of the output tensors C1,C2 are automatically computed and returned by the functions.
auto C1 = tlib::ttv::tensor_times_vector(mode, A,B, tlib::execution_policy::seq , tlib::slicing_policy::subtensor, tlib::fusion_policy::none );
auto C2 = tlib::ttv::tensor_times_vector(mode, A,B, tlib::execution_policy::par_loop, tlib::slicing_policy::slice, tlib::fusion_policy::all );
auto C1 = ttv(mode, A,B, execution_policy::seq , slicing_policy::subtensor, fusion_policy::none );
auto C2 = ttv(mode, A,B, execution_policy::par_loop, slicing_policy::slice, fusion_policy::all );


std::cout << "C1 = [ "; std::copy(C1.begin(), C1.end(), iterator_t(std::cout, " ")); std::cout << " ];" << std::endl;
Expand Down
36 changes: 19 additions & 17 deletions example/interface3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ g++ -I../include/ -std=c++17 -Ofast -fopenmp interface3.cpp -o interface3 && ./i
#include <numeric>
#include <iostream>

using namespace tlib::ttv;


int main()
{
Expand All @@ -21,14 +23,14 @@ int main()
auto na = vector_t{4,3,2}; // input shape tuple
auto p = na.size(); // order of input tensor, i.e. number of dimensions - here 3
auto k = 1ul; // k-order of input tensor
auto pia = tlib::ttv::detail::generate_k_order_layout(p,k); // layout tuple of input tensor - here {1,2,3};
auto wa = tlib::ttv::detail::generate_strides(na,pia); // stride tuple of input tensor - here {1,4,12};
auto pia = detail::generate_k_order_layout(p,k); // layout tuple of input tensor - here {1,2,3};
auto wa = detail::generate_strides(na,pia); // stride tuple of input tensor - here {1,4,12};
auto nn = std::accumulate(na.begin(),na.end(),1ul,std::multiplies<>()); // number of elements of input tensor


auto q = 2ul; // contraction mode - here 2.
auto nb = vector_t{na[q-1]};
auto nc = tlib::ttv::detail::generate_output_shape(na,q); // output shape tuple here {4,2};
auto nc = detail::generate_output_shape(na,q); // output shape tuple here {4,2};


auto A = tensor_t(nn ,0.0f); // tensor A is a std::vector<value_t> of length nn initialized with 0
Expand All @@ -37,8 +39,10 @@ int main()
auto C2 = tensor_t(nn/nb[0] ,0.0f);


auto pic = tlib::ttv::detail::generate_output_layout (pia,q); // output layout is computed according to input layout and contraction mode - here {1,2};
auto wc = tlib::ttv::detail::generate_strides(nc,pic); // output strides is computed according to output shape and output layout - here {1,4};
// output layout is computed according to input layout and contraction mode - here {1,2};
auto pic = detail::generate_output_layout (pia,q);
// output strides is computed according to output shape and output layout - here {1,4};
auto wc = detail::generate_strides(nc,pic);

std::iota(A.begin(),A.end(),value_t{1});

Expand All @@ -56,19 +60,17 @@ int main()
*/


tlib::ttv::tensor_times_vector(
tlib::execution_policy::seq, tlib::slicing_policy::slice, tlib::fusion_policy::none,
q, p,
A .data(), na.data(), wa.data(), pia.data(),
B .data(), nb.data(),
C1.data(), nc.data(), wc.data(), pic.data() );
ttv(execution_policy::seq, slicing_policy::slice, fusion_policy::none,
q, p,
A .data(), na.data(), wa.data(), pia.data(),
B .data(), nb.data(),
C1.data(), nc.data(), wc.data(), pic.data() );

tlib::ttv::tensor_times_vector(
tlib::execution_policy::par_loop, tlib::slicing_policy::subtensor, tlib::fusion_policy::all,
q, p,
A .data(), na.data(), wa.data(), pia.data(),
B .data(), nb.data(),
C2.data(), nc.data(), wc.data(), pic.data() );
ttv(execution_policy::par_loop, slicing_policy::subtensor, fusion_policy::all,
q, p,
A .data(), na.data(), wa.data(), pia.data(),
B .data(), nb.data(),
C2.data(), nc.data(), wc.data(), pic.data() );

std::cout << "C2 = [ "; std::copy(C2.begin(), C2.end(), iterator_t(std::cout, " ")); std::cout << " ];" << std::endl;
std::cout << "C1 = [ "; std::copy(C1.begin(), C1.end(), iterator_t(std::cout, " ")); std::cout << " ];" << std::endl;
Expand Down
21 changes: 3 additions & 18 deletions include/tlib/detail/tags.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,8 @@
#pragma once


namespace tlib::execution_policy
namespace tlib::ttv::execution_policy
{
/*
struct sequential_policy {};
struct sequential_blas_policy {};
struct parallel_policy {};
struct parallel_blas_policy {};
struct threaded_blas_policy {};
struct parallel_threaded_blas_policy{};
inline constexpr sequential_policy seq;
inline constexpr sequential_blas_policy seq_blas;
inline constexpr parallel_policy par;
inline constexpr parallel_blas_policy blas;
inline constexpr threaded_blas_policy threaded;
inline constexpr parallel_threaded_blas_policy parallel_threaded;
*/

struct sequential_t {};
struct sequential_blas_t {};
Expand All @@ -51,7 +36,7 @@ inline constexpr parallel_blas_t par_blas;
inline constexpr parallel_loop_blas_t par_blas_loop;
}

namespace tlib::slicing_policy
namespace tlib::ttv::slicing_policy
{
struct slice_t {};
struct subtensor_t {};
Expand All @@ -61,7 +46,7 @@ inline constexpr subtensor_t subtensor;
}


namespace tlib::fusion_policy
namespace tlib::ttv::fusion_policy
{
struct none_t {};
struct outer_t {};
Expand Down
10 changes: 5 additions & 5 deletions include/tlib/ttv.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ namespace tlib::ttv
*
*/
template<class value_t, class size_t, class execution_t, class slicing_t, class fusion_t>
inline void tensor_times_vector(
inline void ttv(
execution_t ep, slicing_t sp, fusion_t fp,
size_t const q, size_t const p,
value_t const*const a, size_t const*const na, size_t const*const wa, size_t const*const pia,
Expand Down Expand Up @@ -97,15 +97,15 @@ inline void tensor_times_vector(
*
*/
template<class value_t, class execution_t, class slicing_t, class fusion_t>
inline auto tensor_times_vector(
inline auto ttv(
std::size_t q, tensor<value_t> const& a, tensor<value_t> const& b,
execution_t ep, slicing_t sp, fusion_t fp)
{
auto c_shape = detail::generate_output_shape (a.shape() ,q);
auto c_layout = detail::generate_output_layout(a.layout(),q);
auto c = tensor<value_t>( c_shape, c_layout );

tensor_times_vector( ep, sp, fp, q, a.order(),
ttv( ep, sp, fp, q, a.order(),
a.data().data(), a.shape().data(), a.strides().data(), a.layout().data(),
b.data().data(), b.shape().data(),
c.data().data(), c.shape().data(), c.strides().data(), c.layout().data());
Expand All @@ -122,6 +122,6 @@ inline auto tensor_times_vector(
template<class value_t>
inline auto operator*(tlib::ttv::tensor_view<value_t> const& a, tlib::ttv::tensor<value_t> const& b)
{
return tlib::ttv::tensor_times_vector(a.contraction_mode(), a.get_tensor(), b,
tlib::execution_policy::par_loop, tlib::slicing_policy::subtensor, tlib::fusion_policy::all) ;
return tlib::ttv::ttv(a.contraction_mode(), a.get_tensor(), b,
tlib::ttv::execution_policy::par_loop, tlib::ttv::slicing_policy::subtensor, tlib::ttv::fusion_policy::all) ;
}
Loading

0 comments on commit 81012ec

Please sign in to comment.