Skip to content

Commit

Permalink
Fix suggestions
Browse files Browse the repository at this point in the history
  • Loading branch information
kchristin22 committed Oct 22, 2024
1 parent 66103cf commit e562068
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 17 deletions.
6 changes: 3 additions & 3 deletions include/clad/Differentiator/BuiltinDerivatives.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ ValueAndPushforward<int, int> cudaDeviceSynchronize_pushforward()

template <typename T>
__global__ void atomicAdd_kernel(T* destPtr, T* srcPtr, size_t N) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x)
atomicAdd(&destPtr[i], srcPtr[i]);
}
Expand All @@ -95,7 +95,7 @@ void cudaMemcpy_pullback(T* destPtr, T* srcPtr, size_t count,
cudaMemcpyKind kind, T* d_destPtr, T* d_srcPtr,
size_t* d_count, cudaMemcpyKind* d_kind)
__attribute__((host)) {
T* aux_destPtr;
T* aux_destPtr = nullptr;
if (kind == cudaMemcpyDeviceToHost) {
*d_kind = cudaMemcpyHostToDevice;
cudaMalloc(&aux_destPtr, count);
Expand Down Expand Up @@ -125,7 +125,7 @@ void cudaMemcpy_pullback(T* destPtr, T* srcPtr, size_t count,
cudaFree(aux_destPtr);
} else if (kind == cudaMemcpyHostToDevice) {
// d_kind is device to host, so d_srcPtr is a host pointer
for (size_t i = 0; i < N; ++i)
for (size_t i = 0; i < N; i++)
d_srcPtr[i] += aux_destPtr[i];
free(aux_destPtr);
}
Expand Down
11 changes: 5 additions & 6 deletions lib/Differentiator/ReverseModeVisitor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -611,8 +611,9 @@ Expr* getArraySizeExpr(const ArrayType* AT, ASTContext& context,
if (!m_DiffReq.CUDAGlobalArgsIndexes.empty())
for (auto index : m_DiffReq.CUDAGlobalArgsIndexes)
m_CUDAGlobalArgs.emplace(m_Derivative->getParamDecl(index));
// If the function is a global kernel, all its parameters reside in the
// global memory of the GPU
// if the function is a global kernel, all the adjoint parameters reside in
// the global memory of the GPU. To facilitate the process, all the params
// of the kernel are added to the set.
else if (m_DiffReq->hasAttr<clang::CUDAGlobalAttr>())
for (auto* param : params)
m_CUDAGlobalArgs.emplace(param);
Expand Down Expand Up @@ -3226,10 +3227,8 @@ Expr* getArraySizeExpr(const ArrayType* AT, ASTContext& context,
bool dxInForward = false;
if (auto* callExpr = dyn_cast_or_null<CallExpr>(stmtDx))
if (auto* FD = dyn_cast<FunctionDecl>(callExpr->getCalleeDecl()))
if (utils::IsMemoryFunction(FD)) {
printf("%s\n", FD->getNameAsString().c_str());
if (utils::IsMemoryFunction(FD))
dxInForward = true;
}
if (stmtDx) {
if (dxInForward)
addToCurrentBlock(stmtDx, direction::forward);
Expand Down Expand Up @@ -3257,7 +3256,7 @@ Expr* getArraySizeExpr(const ArrayType* AT, ASTContext& context,

if (size) {
llvm::SmallVector<Expr*, 2> args;
if (auto BinOp = dyn_cast<BinaryOperator>(size)) {
if (auto* BinOp = dyn_cast<BinaryOperator>(size)) {
if (BinOp->getOpcode() == BO_Mul) {
Expr* lhs = BinOp->getLHS();
Expr* rhs = BinOp->getRHS();
Expand Down
30 changes: 22 additions & 8 deletions test/CUDA/GradientKernels.cu
Original file line number Diff line number Diff line change
Expand Up @@ -427,25 +427,33 @@ void fn(double *out, double *in) {
//CHECK-NEXT: }

double fn_memory(double *out, double *in) {
kernel_call<<<1, 10>>>(out, in);
double *in_dev = nullptr;
cudaMalloc(&in_dev, 10 * sizeof(double));
cudaMemcpy(in_dev, in, 10 * sizeof(double), cudaMemcpyHostToDevice);
kernel_call<<<1, 10>>>(out, in_dev);
cudaDeviceSynchronize();
double *out_host = (double*)malloc(10 * sizeof(double));
double *out_host = (double *)malloc(10 * sizeof(double));
cudaMemcpy(out_host, out, 10 * sizeof(double), cudaMemcpyDeviceToHost);
double res = 0;
for (int i=0; i < 10; ++i) {
res += out_host[i];
}
free(out_host);
cudaFree(out);
cudaFree(in);
cudaFree(in_dev);
return res;
}

// CHECK: void fn_memory_grad(double *out, double *in, double *_d_out, double *_d_in) {
//CHECK-NEXT: int _d_i = 0;
//CHECK-NEXT: int i = 0;
//CHECK-NEXT: clad::tape<double> _t1 = {};
//CHECK-NEXT: kernel_call<<<1, 10>>>(out, in);
//CHECK-NEXT: double *_d_in_dev = nullptr;
//CHECK-NEXT: double *in_dev = nullptr;
//CHECK-NEXT: cudaMalloc(&_d_in_dev, 10 * sizeof(double));
//CHECK-NEXT: cudaMalloc(&in_dev, 10 * sizeof(double));
//CHECK-NEXT: cudaMemcpy(in_dev, in, 10 * sizeof(double), cudaMemcpyHostToDevice);
//CHECK-NEXT: kernel_call<<<1, 10>>>(out, in_dev);
//CHECK-NEXT: cudaDeviceSynchronize();
//CHECK-NEXT: double *_d_out_host = (double *)calloc(10, sizeof(double));
//CHECK-NEXT: double *out_host = (double *)malloc(10 * sizeof(double));
Expand Down Expand Up @@ -481,10 +489,16 @@ double fn_memory(double *out, double *in) {
//CHECK-NEXT: clad::custom_derivatives::cudaMemcpy_pullback(out_host, out, 10 * sizeof(double), cudaMemcpyDeviceToHost, _d_out_host, _d_out, &_r0, &_r1);
//CHECK-NEXT: }
//CHECK-NEXT: kernel_call_pullback<<<1, 10>>>(out, in, _d_out, _d_in);
//CHECK-NEXT: {
//CHECK-NEXT: unsigned long _r0 = 0UL;
//CHECK-NEXT: cudaMemcpyKind _r1 = static_cast<cudaMemcpyKind>(0U);
//CHECK-NEXT: clad::custom_derivatives::cudaMemcpy_pullback(in_dev, in, 10 * sizeof(double), cudaMemcpyHostToDevice, _d_in_dev, _d_in, &_r0, &_r1);
//CHECK-NEXT: }
//CHECK-NEXT: free(out_host);
//CHECK-NEXT: free(_d_out_host);
//CHECK-NEXT: cudaFree(out);
//CHECK-NEXT: cudaFree(in);
//CHECK-NEXT: cudaFree(in_dev);
//CHECK-NEXT: cudaFree(_d_in_dev);
//CHECK-NEXT:}

// CHECK: __attribute__((device)) void device_fn_pullback_1(double in, double val, double _d_y, double *_d_in, double *_d_val) {
Expand Down Expand Up @@ -797,9 +811,8 @@ int main(void) {
INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto test_memory = clad::gradient(fn_memory);
test_memory.execute(dummy_out_double, dummy_in_double, d_out_double, d_in_double);
cudaMemcpy(res, d_in_double, 10 * sizeof(double), cudaMemcpyDeviceToHost);
printf("%0.2f, %0.2f, %0.2f\n", res[0], res[1], res[2]); // CHECK-EXEC: 60.00, 0.00, 0.00
test_memory.execute(dummy_out_double, fives, d_out_double, zeros);
printf("%0.2f, %0.2f, %0.2f\n", zeros[0], zeros[1], zeros[2]); // CHECK-EXEC: 60.00, 0.00, 0.00

free(res);
free(fives);
Expand All @@ -808,6 +821,7 @@ int main(void) {
cudaFree(d_in_double);
cudaFree(val);
cudaFree(d_val);
cudaFree(dummy_in_double);

return 0;
}

0 comments on commit e562068

Please sign in to comment.