forked from NVIDIA/cutlass
-
Notifications
You must be signed in to change notification settings - Fork 20
/
cuda_host_adapter.hpp
335 lines (271 loc) · 11 KB
/
cuda_host_adapter.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
/***************************************************************************************************
* Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Interface betweeen a CUTLASS device-wide operator and CUDA.
*/
#pragma once
#if !defined(CUTLASS_ENABLE_SYCL)
#include <cuda_runtime_api.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/trace.h"
#include "cutlass/platform/platform.h"
#if ! defined(__CUDACC_RTC__)
#include <cstdio>
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
// NVRTC doesn't need definitions for these host classes
#if ((__CUDACC_VER_MAJOR__ >= 12) || \
((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))) \
&& !defined(__CUDACC_RTC__)
#define CUDA_HOST_ADAPTER_LAUNCH_ATTRIBUTES_ENABLED
#endif
#if ((__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__))
#define CUDA_HOST_ADAPTER_TENSORMAP_ENABLED
#endif
// Include <cuda.h> for CUDA Driver API calls if any of these capabilities are enabled.
#if defined(CUDA_HOST_ADAPTER_LAUNCH_ATTRIBUTES_ENABLED) || \
defined(CUDA_HOST_ADAPTER_TENSORMAP_ENABLED)
#include <cuda.h>
#endif // defined(CUDA_HOST_ADAPTER_LAUNCH_ATTRIBUTES_ENABLED) ||
// defined(CUDA_HOST_ADAPTER_TENSORMAP_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Macro-level guard for CUDA Host Adapter
//
#if !defined(CUTLASS_ENABLE_CUDA_HOST_ADAPTER)
#define CUTLASS_ENABLE_CUDA_HOST_ADAPTER false
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This class manages runtime CUlaunchAttribute that can be supplied to CudaHostAdapter
/// CudaHostLaunchAttributes will be an empty struct in earlier CTK where CUlaunchAttribute
/// is not introduced.
struct CudaHostLaunchAttributes {
#if defined(CUDA_HOST_ADAPTER_LAUNCH_ATTRIBUTES_ENABLED)
/// Reasonable maximum launch attributes that are commonly applied
static constexpr int32_t kMaximumAttributeCount = 5;
/// Launch attributes
CUlaunchAttribute launch_attributes[kMaximumAttributeCount];
int32_t attribute_count = 0;
CUTLASS_HOST_DEVICE
CudaHostLaunchAttributes(CUlaunchAttribute *launch_attributes_ = nullptr,
int32_t attribute_count_ = 0) {
CUTLASS_ASSERT(attribute_count_ >= 0 && attribute_count_ < kMaximumAttributeCount);
for (int32_t i = 0; i < attribute_count_ && i < kMaximumAttributeCount; ++i) {
launch_attributes[i] = launch_attributes_[i];
}
attribute_count = attribute_count_;
}
CUTLASS_HOST_DEVICE
CUlaunchAttribute const* data() const {
return launch_attributes;
}
CUTLASS_HOST_DEVICE
size_t size() const {
return attribute_count;
}
#endif // (CUDA_HOST_ADAPTER_LAUNCH_ATTRIBUTES_ENABLED)
};
/// This class defines an object which abstracts interactions between the CUTLASS device-wide GEMM and
/// CUDA. The intention is to enable CUTLASS to be used with both the CUDA Runtime API and CUDA Driver API.
struct CudaHostAdapter {
/// Limit the number of kernels
static constexpr int32_t kMaximumKernelCount = 4;
/// Maximum cluster size
static constexpr int MaxClusterSize = 32;
//
// Data members
//
/// Handles
void *kernel_handles[kMaximumKernelCount];
int32_t kernel_count = 0;
CudaHostLaunchAttributes launch_attributes;
//
// Methods
//
/// Ctor
CudaHostAdapter() = default;
/// Dtor
virtual ~CudaHostAdapter() = default;
/// Copy Ctor
CUTLASS_HOST_DEVICE
CudaHostAdapter(const CudaHostAdapter & rhs)
: kernel_count(rhs.kernel_count),
launch_attributes(rhs.launch_attributes) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
}
/// Copy Assignment
CUTLASS_HOST_DEVICE
CudaHostAdapter& operator=(const CudaHostAdapter & rhs) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
kernel_count = rhs.kernel_count;
launch_attributes = rhs.launch_attributes;
return *this;
}
/// Move ctor
CUTLASS_HOST_DEVICE
CudaHostAdapter(CudaHostAdapter && rhs)
: kernel_count(rhs.kernel_count),
launch_attributes(std::move(rhs.launch_attributes)) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
}
// / Move assignment
CUTLASS_HOST_DEVICE
CudaHostAdapter& operator=(CudaHostAdapter && rhs) {
CUTLASS_ASSERT(rhs.kernel_count >= 0 && rhs.kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < rhs.kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = rhs.kernel_handles[i];
}
kernel_count = rhs.kernel_count;
launch_attributes = std::move(rhs.launch_attributes);
return *this;
}
/// Ctor
CUTLASS_HOST_DEVICE
CudaHostAdapter(void **kernel_handles_,
int32_t kernel_count_,
CudaHostLaunchAttributes const &launch_attributes_ = { })
: kernel_count(kernel_count_),
launch_attributes(launch_attributes_) {
CUTLASS_ASSERT(kernel_count >= 0 && kernel_count < kMaximumKernelCount);
for (int32_t i = 0; i < kernel_count && i < kMaximumKernelCount; ++i) {
kernel_handles[i] = kernel_handles_[i];
}
}
/// Returns true if the CudaHostAdapter is empty (kernel_count == 0)
CUTLASS_HOST_DEVICE
bool empty() const { return !kernel_count; }
/// Returns kernel_count
CUTLASS_HOST_DEVICE
size_t size() const { return static_cast<size_t>(kernel_count); }
/// Queries the occupancy of a kernel
virtual Status query_occupancy(
int32_t *device_sms,
int32_t *sm_occupancy,
int32_t kernel_index,
int32_t thread_count,
int32_t smem_size) const = 0;
/// Launches a kernel without using Threadblock Clusters.
virtual Status launch(
dim3 const grid_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void** kernel_params,
int32_t kernel_index) const = 0;
/// Launches a kernel using the CUDA Extensible Launch API and Threadblock Clusters.
virtual Status launch(
dim3 const grid_dims,
dim3 const cluster_dims,
dim3 const block_dims,
size_t const smem_size,
cudaStream_t cuda_stream,
void** kernel_params,
int32_t kernel_index) const = 0;
#if defined(CUDA_HOST_ADAPTER_TENSORMAP_ENABLED)
/// Create a tensor map descriptor object representing im2col memory region.
virtual CUresult tensorMapEncodeIm2col (
CUtensorMap* tensorMap,
CUtensorMapDataType tensorDataType,
cuuint32_t tensorRank,
void* globalAddress,
const cuuint64_t* globalDim,
const cuuint64_t* globalStrides,
const int* pixelBoxLowerCorner,
const int* pixelBoxUpperCorner,
cuuint32_t channelsPerPixel,
cuuint32_t pixelsPerColumn,
const cuuint32_t* elementStrides,
CUtensorMapInterleave interleave,
CUtensorMapSwizzle swizzle,
CUtensorMapL2promotion l2Promotion,
CUtensorMapFloatOOBfill oobFill) const = 0;
/// Create a tensor map descriptor object representing tiled memory region.
virtual CUresult tensorMapEncodeTiled (
CUtensorMap* tensorMap,
CUtensorMapDataType tensorDataType,
cuuint32_t tensorRank,
void* globalAddress,
const cuuint64_t* globalDim,
const cuuint64_t* globalStrides,
const cuuint32_t* boxDim,
const cuuint32_t* elementStrides,
CUtensorMapInterleave interleave,
CUtensorMapSwizzle swizzle,
CUtensorMapL2promotion l2Promotion,
CUtensorMapFloatOOBfill oobFill) const = 0;
/// Modify an existing tensor map descriptor with an updated global address.
virtual CUresult tensorMapReplaceAddress(
CUtensorMap* tensorMap,
void* globalAddress) const = 0;
#endif // defined(CUDA_HOST_ADAPTER_TENSORMAP_ENABLED)
protected:
/**
* Fills a buffer in Global Memory with a byte sequence copied from host memory.
* This function can be overriden to dispatch to the appropriate cuMemsetD*Async API
*/
virtual Status memsetDeviceImpl(
void* destination, ///< Device memory pointer to be filled
void const* fill_value, ///< Value to be filled in the buffer
size_t fill_size, ///< Size of the data type to be used for filling the buffer
size_t count, ///< Number of elements of size fill_size
cudaStream_t stream) const = 0;
public:
/// Fills a buffer in Global Memory with a byte sequence copied from host memory
template<class FillValueType>
CUTLASS_HOST_DEVICE
Status memsetDevice(
void* destination,
FillValueType fill_value,
size_t count,
cudaStream_t stream) const {
return this->memsetDeviceImpl(
destination,
&fill_value,
sizeof(FillValueType),
count,
stream);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////