forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
LinearAlgebraUtils.h
281 lines (249 loc) · 11.3 KB
/
LinearAlgebraUtils.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
#include <c10/core/ScalarType.h>
#include <ATen/ATen.h>
#include <ATen/ExpandUtils.h>
#include <ATen/TensorUtils.h>
#include <limits>
#include <sstream>
#include <cstring>
namespace at { namespace native {
/*
* Clones a Tensor so that the following conditions hold:
* If we think of a Tensor of having size (B, M, N), where B is any number
* of batch dimensions, then:
* - Each (M, N) matrix is in column major form
* - Let Tensor P have size (B, M, N) and Q have size (B, M', N').
* Then when laid out in memory, the M by N matrix starting at
* P.data_ptr()[b * M * N] is of the same corresponding batch as the M' by N'
* matrix starting at Q.data_ptr()[b * M' * N'].
*/
static inline Tensor cloneBatchedColumnMajor(const Tensor& src) {
// If src is already in batched column major format, then
// this will be efficient (no reordering of the data will occur)
// because the first transpose will make the tensor contiguous,
// and cloning a contiguous tensor is fast.
auto result = src.transpose(-2, -1).clone(at::MemoryFormat::Contiguous);
result.transpose_(-2, -1);
return result;
}
/*
* Given batches of matrices with arbitrary batch dim,
* computes the number of batches.
*/
static inline int64_t batchCount(const Tensor& batched_matrices) {
int64_t result = 1;
for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
result *= batched_matrices.size(i);
}
return result;
}
// Computes the number of elements of a matrix in a batched matrix tensor
static inline int64_t matrixStride(const Tensor& batched_matrices) {
return batched_matrices.size(-1) * batched_matrices.size(-2);
}
// Returns the epsilon value for floating types except half
static inline double _get_epsilon(const ScalarType& sc_type) {
switch (sc_type) {
case at::ScalarType::Float:
return static_cast<double>(std::numeric_limits<float>::epsilon());
case at::ScalarType::Double:
return std::numeric_limits<double>::epsilon();
default:
AT_ERROR("This function doesn't handle types other than float and double");
}
}
// Validates input shapes and devices
// for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve)
static inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) {
TORCH_CHECK(self.device() == A.device(),
"Expected b and A to be on the same device, but found b on ",
self.device(), " and A on ", A.device(), " instead.");
TORCH_CHECK(A.size(-1) == A.size(-2),
"A must be batches of square matrices, "
"but they are ", A.size(-1), " by ", A.size(-2), " matrices");
TORCH_CHECK(A.size(-1) == self.size(-2),
"Incompatible matrix sizes for ", name, ": each A "
"matrix is ", A.size(-1), " by ", A.size(-1),
" but each b matrix is ", self.size(-2), " by ", self.size(-1));
}
// Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig)
static inline void squareCheckInputs(const Tensor& self) {
TORCH_CHECK(self.size(-1) == self.size(-2),
"A must be batches of square matrices, "
"but they are ", self.size(-1), " by ", self.size(-2), " matrices");
}
/*
* Given a vector of int64_t infos, obtained after a batch operations,
* this function checks if the computation over all these batches has been
* successful (info = 0) or not, and report in case of the latter.
*/
static inline void batchCheckErrors(std::vector<int64_t>& infos, const char* name, bool allow_singular=false) {
for (size_t i = 0; i < infos.size(); i++) {
auto info = infos[i];
if (info < 0) {
AT_ERROR(name, ": For batch ", i, ": Argument ", -info, " has illegal value");
} else if (info > 0) {
if (strstr(name, "svd")) {
AT_ERROR(name, ": the updating process of SBDSDC did not converge (error: ", info, ")");
} else if (strstr(name, "symeig")) {
AT_ERROR(name, ": For batch ", i, ": the algorithm failed to converge; ", info,
" off-diagonal elements of an intermediate tridiagonal form did not converge to zero.");
} else if (!allow_singular) {
AT_ERROR(name, ": For batch ", i, ": U(", info, ",", info, ") is zero, singular U.");
}
}
}
}
/*
* This is an overloaded case of the previous function for a tensor of infos.
*/
static inline void batchCheckErrors(const Tensor& infos, const char* name, bool allow_singular=false) {
auto batch_size = infos.numel();
auto infos_cpu = infos.to(at::kCPU);
auto infos_data = infos_cpu.data_ptr<int>();
for (int64_t i = 0; i < batch_size; i++) {
auto info = infos_data[i];
if (info < 0) {
AT_ERROR(name, ": For batch ", i, ": Argument ", -info, " has illegal value");
} else if (!allow_singular && info > 0) {
AT_ERROR(name, ": For batch ", i, ": U(", info, ",", info, ") is zero, singular U.");
}
}
}
/*
* Given a info int, obtained after a single operation, this function check if the computation
* has been successful (info = 0) or not, and report in case of the latter.
*/
static inline void singleCheckErrors(int64_t info, const char* name, bool allow_singular=false) {
if (info < 0) {
AT_ERROR(name, ": Argument ", -info, " has illegal value");
} else if (info > 0) {
if (strstr(name, "svd")) {
AT_ERROR(name, ": the updating process of SBDSDC did not converge (error: ", info, ")");
} else if (strstr(name, "symeig")) {
AT_ERROR(name, ": the algorithm failed to converge; ", info,
" off-diagonal elements of an intermediate tridiagonal form did not converge to zero.");
} else if (!allow_singular) {
AT_ERROR(name, ": U(", info, ",", info, ") is zero, singular U.");
}
}
}
// Checks if all the Tensors in a TensorList are of the same dimensions
static inline void checkAllSameDim(TensorList tensors, int64_t dim) {
for (auto &t : tensors) {
TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead.");
}
}
static inline std::tuple<Tensor,Tensor> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) {
linearSolveCheckInputs(arg1, arg2, name);
// broadcast the batch dimensions of arg1 and arg2.
IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2);
IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2);
std::vector<int64_t> expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes);
std::vector<int64_t> arg1_expand_size({expand_batch_portion});
arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) });
std::vector<int64_t> arg2_expand_size({expand_batch_portion});
arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) });
Tensor arg1_broadcasted = arg1.expand(arg1_expand_size);
Tensor arg2_broadcasted = arg2.expand(arg2_expand_size);
return std::make_tuple(arg1_broadcasted, arg2_broadcasted);
}
// Return a permutation with the given axes moved to the end.
static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) {
const std::vector<int64_t> a = axes.vec();
const int64_t ndim = self.ndimension();
std::vector<int64_t> perm;
for (int64_t i = 0; i < ndim; i++) {
auto it = std::find(a.begin(), a.end(), i);
if (it == a.end()) {
perm.push_back(i);
}
}
for (auto i : a) {
perm.push_back(i);
}
TORCH_CHECK((int64_t)perm.size() == ndim,
"duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim);
return self.permute(perm);
}
// Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition
static inline std::tuple<std::vector<int64_t>,
std::vector<int64_t>,
int64_t> _compute_geometry_for_Q(const Tensor& input, bool some) {
int64_t m = input.size(-2), n = input.size(-1);
int64_t n_columns_q;
// We need to compute the required size of Q based on the `some` option
auto q_sizes = input.sizes().vec();
if (!some && m > n) {
q_sizes[input.dim() - 1] = m;
n_columns_q = m;
} else {
q_sizes[input.dim() - 1] = n;
n_columns_q = std::min(m, n);
}
auto q_strides = at::detail::defaultStrides(q_sizes);
// Q should be a column-major or a batch of column-major matrices
// ... x m x n will have strides: ...., n, 1
// We require: ...., 1, m
q_strides[input.dim() - 1] = m;
q_strides[input.dim() - 2] = 1;
return std::make_tuple(q_sizes, q_strides, n_columns_q);
}
// Function to generate empty tensors of required size, strides and dtype for the SVD operation
static inline std::tuple<Tensor, Tensor, Tensor> _create_U_S_VT(const Tensor& input, bool some, bool compute_uv) {
auto sizes = input.sizes().vec();
int64_t m = input.size(-2), n = input.size(-1);
sizes[input.dim() - 1] = (compute_uv && some) ? std::min(m, n) : m;
auto strides = at::detail::defaultStrides(sizes);
// U should be a column-major or a batch of column-major matrices
// ... x m x ucol will have strides: ...., ucol, 1
// We require: ...., 1, m
strides[input.dim() - 1] = m;
strides[input.dim() - 2] = 1;
Tensor U_empty;
if (!input.is_cuda()) {
U_empty = at::empty_strided(sizes, strides, input.options());
} else {
// NB: U_empty is an empty tensor created on the CPU intentionally, because magma_(d/s)gesdd
// (which is the driver routine for the divide and conquer SVD operation)
// takes in arrays on the CPU as input. This routine is a hybrid CPU-GPU routine that
// moves the inputs between devices internally.
U_empty = at::empty_strided(sizes, strides, input.options().device(at::kCPU));
}
sizes[input.dim() - 2] = n;
sizes[input.dim() - 1] = n;
// VT should be a row-major or a batch of row-major matrices
Tensor VT_empty;
if (!input.is_cuda()) {
VT_empty = at::empty(sizes, input.options());
} else {
// NB: VT_empty is an empty tensor created on the CPU intentionally, because magma_(d/s)gesdd
// (which is the driver routine for the divide and conquer SVD operation)
// takes in arrays on the CPU as input. This routine is a hybrid CPU-GPU routine that
// moves the inputs between devices internally.
VT_empty = at::empty(sizes, input.options().device(at::kCPU));
}
sizes.pop_back();
sizes[input.dim() - 2] = std::min(m, n);
Tensor S_empty;
ScalarType dtype = toValueType(typeMetaToScalarType(input.dtype()));
if (!input.is_cuda()) {
S_empty = at::empty(sizes, input.options().dtype(dtype));
} else {
// NB: S_empty is an empty tensor created on the CPU intentionally, because magma_(d/s)gesdd
// (which is the driver routine for the divide and conquer SVD operation)
// takes in arrays on the CPU as input. This routine is a hybrid CPU-GPU routine that
// moves the inputs between devices internally.
S_empty = at::empty(sizes, input.options().dtype(dtype).device(at::kCPU));
}
return std::tuple<Tensor, Tensor, Tensor>(U_empty, S_empty, VT_empty);
}
// Function used instead of .to so that the original strides are retained
// .to doesn't retain strides and make the output tensor contiguous
static inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) {
auto strided_to = at::empty_strided(original_tensor.sizes(),
original_tensor.strides(),
options);
strided_to.copy_(original_tensor);
return strided_to;
}
}} // namespace at::native