forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
CatKernel.cpp
67 lines (56 loc) · 1.82 KB
/
CatKernel.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cpu/CatKernel.h>
#include <ATen/cpu/vec256/functional.h>
#include <ATen/cpu/vec256/vec256.h>
namespace at { namespace native {
namespace {
struct InputMeta {
void* data_ptr;
int64_t inner_size;
InputMeta(const Tensor& t, int64_t dim, int64_t inner)
: data_ptr(t.data_ptr())
, inner_size(t.size(dim) * inner) {}
};
template <typename scalar_t>
void cat_serial_kernel_impl(Tensor& result, TensorList tensors, int64_t dim) {
int64_t outer = result.numel() / (result.size(dim) * result.stride(dim));
scalar_t* result_data = result.data_ptr<scalar_t>();
int64_t ninputs = tensors.size();
std::vector<InputMeta> inputs;
inputs.reserve(ninputs);
for (auto const &tensor : tensors) {
inputs.emplace_back(tensor, dim, result.stride(dim));
}
using Vec = vec256::Vec256<scalar_t>;
scalar_t* result_ptr = result_data;
for (int64_t i = 0; i < outer; ++i) {
for (int64_t j = 0; j < ninputs; j++) {
int64_t local_inner = inputs[j].inner_size;
scalar_t* input_ptr = (scalar_t*)(inputs[j].data_ptr) + i * local_inner;
if (local_inner < Vec::size()) {
#ifndef _MSC_VER
# pragma unroll
#endif
for (int64_t k = 0; k < local_inner; k++) {
result_ptr[k] = input_ptr[k];
}
} else {
vec256::map(
[](Vec x) { return x; },
result_ptr,
input_ptr,
local_inner);
}
result_ptr += local_inner;
}
}
}
void cat_serial_kernel(Tensor& result, TensorList tensors, int64_t dim) {
AT_DISPATCH_FLOATING_TYPES(result.scalar_type(), "cat_serial_kernel", [&]() {
cat_serial_kernel_impl<scalar_t>(result, tensors, dim);
});
}
} // anonymous namespace
REGISTER_DISPATCH(cat_serial_stub, &cat_serial_kernel);
}} // at::native