forked from kulinseth/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ValidateCompressedIndicesCommon.h
369 lines (336 loc) · 11.7 KB
/
ValidateCompressedIndicesCommon.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
#pragma once
#include <ATen/Dispatch.h>
#include <ATen/Tensor.h>
#include <ATen/Utils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/sparse/Macros.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/tensor.h>
#endif
#ifdef GPUCC
#define NAME "compressed_index_invariance_checks_cuda"
#else
#define NAME "compressed_index_invariance_checks_cpu"
#endif
#define INVARIANT_CHECK_FUNC_API static INLINE FUNCAPI void
namespace at {
namespace native {
namespace {
// NOTE: all the checks but the very last one are designed
// to work with vectors.
// To enable vectorization one would need to write a conversion
// Vec -> bool and make kernel launchers call into vectorized
// execution paths.
// All the invariants are described in
// https://pearu.github.io/bsr_tensor_invariants.html NOTE: in the code we also
// use `cidx/idx` to refer to `compressed_indices/plain_indices` respectively.
INVARIANT_CHECK_FUNC_API
_assert(const bool cond, const char* const message) {
#ifdef GPUCC
CUDA_KERNEL_ASSERT(cond && message);
#else
TORCH_CHECK(cond, message);
#endif
}
enum class CDimName : bool { CRow, CCol };
// Invariant 5.1
// compressed_index[..., 0] == 0.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_first_cidx_is_zero(
const index_t& cidx,
const index_t& zero) {
const bool invariant = cidx == zero;
if (cdim_name == CDimName::CRow) {
_assert(invariant, "`crow_indices[..., 0] == 0` is not satisfied.");
} else {
_assert(invariant, "`ccol_indices[..., 0] == 0` is not satisfied.");
}
}
// Invariant 5.2
// compressed_index[..., -1] == nnz.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_last_cidx_is_nnz(
const index_t& cidx,
const index_t& nnz) {
const bool invariant = cidx == nnz;
if (cdim_name == CDimName::CRow) {
_assert(invariant, "`crow_indices[..., -1] == nnz` is not satisfied.");
} else {
_assert(invariant, "`ccol_indices[..., -1] == nnz` is not satisfied.");
}
}
// Invariant 5.3
// 0 <= compressed_indices[..., 1:] - compressed_indices[..., :-1] <= plain_dim.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_cidx_nondecreasing_locally_bounded_sequence(
const index_t& cidx,
const index_t& cidx_next,
const index_t& zero,
const index_t& dim) {
const auto s_cidx = cidx_next - cidx;
const bool invariant = zero <= s_cidx && s_cidx <= dim;
if (cdim_name == CDimName::CRow) {
_assert(
invariant,
"`0 <= crow_indices[..., 1:] - crow_indices[..., :-1] <= ncols` is not satisfied.");
} else {
_assert(
invariant,
"`0 <= ccol_indices[..., 1:] - ccol_indices[..., :-1] <= nrows` is not satisfied.");
}
}
// Invariants 5.4 and 5.5
// 0 <= plain_index < plain_dim.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_idx_bounds(
const index_t& idx,
const index_t& zero,
const index_t& dim) {
const bool invariant = zero <= idx && idx < dim;
if (cdim_name == CDimName::CRow) {
_assert(invariant, "`0 <= col_indices < ncols` is not satisfied.");
} else {
_assert(invariant, "`0 <= row_indices < nrows` is not satisfied.");
}
}
// Invariant 5.6
// plain_indices[..., compressed_indices[..., i - 1]:compressed_indices[..., i]]
// for all i = 1, ..., compressed_dim
// are sorted and distinct along the last dimension values.
template <CDimName cdim_name, typename index_t>
INVARIANT_CHECK_FUNC_API _check_idx_sorted_distinct_vals_slices_with_cidx(
const index_t* RESTRICT ptr_idx_batch,
const index_t cidx,
const index_t cidx_next) {
// Note that ptr_idx_batch = &idx[batch_idx] and is contiguous.
const auto* RESTRICT slice_begin = ptr_idx_batch + cidx;
const auto* RESTRICT slice_end = ptr_idx_batch + cidx_next;
for (auto* RESTRICT curr = slice_begin + 1; curr < slice_end; ++curr) {
const auto invariant = *(curr - 1) < *curr;
if (cdim_name == CDimName::CRow) {
_assert(
invariant,
"`col_indices[..., crow_indices[..., i - 1]:crow_indices[..., i]] "
"for all i = 1, ..., nrows "
"are sorted and distinct along the last dimension values` "
"is not satisfied.");
} else {
_assert(
invariant,
"`row_indices[..., ccol_indices[..., i - 1]:ccol_indices[..., i]] "
"for all i = 1, ..., ncols "
"are sorted and distinct along the last dimension values` "
"is not satisfied.");
}
}
}
static inline int64_t indexCount(IntArrayRef sizes) {
int64_t res = 1;
for (const auto& s : sizes) {
res *= s;
}
return res;
}
template <typename func_t, typename vec_func_t>
struct EmptyVecKernel {
static void launch(
TensorIteratorBase& iter,
const func_t& f,
const vec_func_t& vec_f) {}
};
template <typename scalar_t>
using DummyVec = scalar_t;
template <
template <typename func_t>
class kernel_t,
template <typename func_t, typename vec_func_t>
class vec_kernel_t>
struct KernelLauncher {
template <typename func_t, typename vec_func_t>
static void launch(
TensorIteratorBase& iter,
const func_t& f,
const vec_func_t& vec_f) {
vec_kernel_t<func_t, vec_func_t>::launch(iter, f, vec_f);
}
template <typename func_t>
static void launch(TensorIteratorBase& iter, const func_t& f) {
kernel_t<func_t>::launch(iter, f);
}
};
template <
CDimName cdim_name,
template <typename func_t>
class kernel_t,
template <typename func_t, typename vec_func_t>
class vec_kernel_t = EmptyVecKernel,
template <typename scalar_t> class Vec = DummyVec>
void _validate_compressed_sparse_indices_kernel(
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
if (cdim_name == CDimName::CRow) {
TORCH_CHECK(
cidx.size(-1) == cdim + 1,
"crow_indices have wrong shape: ",
"crow_indices.shape[-1] = ",
cidx.size(-1),
" is not equal to ",
"nrows + 1 = ",
cdim + 1);
TORCH_CHECK(
idx.size(-1) == nnz,
"col_indices have wrong shape: ",
"col_indices.shape[-1] = ",
idx.size(-1),
" is not equal to ",
"nnz = ",
nnz);
} else {
TORCH_CHECK(
cidx.size(-1) == cdim + 1,
"ccol_indices have wrong shape: ",
"ccol_indices.shape[-1] = ",
cidx.size(-1),
" is not equal to ",
"ncols + 1 = ",
cdim + 1);
TORCH_CHECK(
idx.size(-1) == nnz,
"row_indices have wrong shape: ",
"row_indices.shape[-1] = ",
idx.size(-1),
" is not equal to ",
"nnz = ",
nnz);
}
using KernelLauncher = KernelLauncher<kernel_t, vec_kernel_t>;
// For TensorIterator's output: no void lambdas.
const auto dummy = at::empty({1}, cidx.options());
// Invariants 5.4 and 5.5
{
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.add_owned_output(dummy.expand_as(idx))
.add_input(idx)
.build();
AT_DISPATCH_INDEX_TYPES(idx.scalar_type(), NAME, [&iter, dim]() {
const auto zero = index_t{0};
KernelLauncher::launch(iter, [zero, dim] FUNCAPI(index_t idx) -> index_t {
_check_idx_bounds<cdim_name, index_t>(idx, zero, dim);
return 0;
});
});
}
// Invariants 5.1, 5.2, 5.3, 5.6
{
const auto cidx_first = cidx.slice(-1, 0, 1);
const auto cidx_last = cidx.slice(-1, cdim, cdim + 1);
const auto cidx_curr = cidx.slice(-1, 0, cdim);
const auto cidx_next = cidx.slice(-1, 1, cdim + 1);
const auto batch_dims = cidx.sizes().slice(0, cidx.dim() - 1);
const auto batch_count = indexCount(batch_dims);
const auto batch_idx =
at::arange(batch_count, cidx.options()).view(batch_dims).unsqueeze_(-1);
const auto idx_ndims = idx.dim();
const auto cpu_options = idx.options().dtype(kLong).device(kCPU);
Tensor idx_sizes_and_strides_cpu = at::empty({2, idx_ndims}, cpu_options);
idx_sizes_and_strides_cpu.select(0, 0).copy_(
at::tensor(idx.sizes(), cpu_options));
idx_sizes_and_strides_cpu.select(0, 1).copy_(
at::tensor(idx.strides(), cpu_options));
const Tensor idx_sizes_and_strides =
idx_sizes_and_strides_cpu.to(idx.device());
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.add_owned_output(dummy.expand_as(cidx_curr))
.add_input(cidx_first)
.add_input(cidx_last)
.add_input(cidx_curr)
.add_input(cidx_next)
.add_input(batch_idx)
.build();
AT_DISPATCH_INDEX_TYPES(
idx.scalar_type(),
NAME,
[&iter, &idx, dim, nnz, idx_ndims, &idx_sizes_and_strides]() {
const auto* RESTRICT ptr_idx = idx.data_ptr<index_t>();
const int64_t* RESTRICT idx_sizes =
idx_sizes_and_strides.data_ptr<int64_t>();
const int64_t* RESTRICT idx_strides = idx_sizes + idx_ndims;
const auto zero = index_t{0};
KernelLauncher::launch(
iter,
[zero, dim, nnz, idx_ndims, idx_sizes, idx_strides, ptr_idx] FUNCAPI(
index_t cidx_first,
index_t cidx_last,
index_t cidx_curr,
index_t cidx_next,
index_t batch_idx) -> index_t {
// Invariant 5.1
_check_first_cidx_is_zero<cdim_name, index_t>(cidx_first, zero);
// Invariant 5.2
_check_last_cidx_is_nnz<cdim_name, index_t>(cidx_last, nnz);
// Invariant 5.3
_check_cidx_nondecreasing_locally_bounded_sequence<
cdim_name,
index_t>(cidx_curr, cidx_next, zero, dim);
// Invariant 5.6
// NOTE: the implementation below is sync-less, but,
// unfortunately, work is not guaranteed to be well-balanced
// between different threads.
int64_t idx_offset = 0;
// assuming idx contiguity per batch:
int64_t tmp = batch_idx * idx_sizes[idx_ndims - 1];
for (int i = idx_ndims - 1; i >= 0; i--) {
int64_t div = tmp / idx_sizes[i];
idx_offset += (tmp - div * idx_sizes[i]) * idx_strides[i];
tmp = div;
}
const auto* RESTRICT ptr_idx_batch = ptr_idx + idx_offset;
_check_idx_sorted_distinct_vals_slices_with_cidx<
cdim_name,
index_t>(ptr_idx_batch, cidx_curr, cidx_next);
return 0;
});
});
}
}
template <
template <typename func_t>
class kernel_t,
template <typename func_t, typename vec_func_t>
class vec_kernel_t = EmptyVecKernel,
template <typename scalar_t> class Vec = DummyVec>
void validate_compressed_sparse_indices_kernel(
const bool is_crow,
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
if (is_crow) {
_validate_compressed_sparse_indices_kernel<
CDimName::CRow,
kernel_t,
vec_kernel_t,
Vec>(cidx, idx, cdim, dim, nnz);
} else {
_validate_compressed_sparse_indices_kernel<
CDimName::CCol,
kernel_t,
vec_kernel_t,
Vec>(cidx, idx, cdim, dim, nnz);
}
}
} // namespace
} // namespace native
} // namespace at