Skip to content

Commit

Permalink
Lose unnecessary sync calls to GPU in tests and add them in cudaMemcp…
Browse files Browse the repository at this point in the history
…y_pullback to mimic its blocking behavior
  • Loading branch information
kchristin22 committed Oct 21, 2024
1 parent fc417d9 commit f65bbde
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 16 deletions.
8 changes: 6 additions & 2 deletions include/clad/Differentiator/BuiltinDerivatives.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ void cudaMemcpy_pullback(T* destPtr, T* srcPtr, size_t count,
*d_kind = cudaMemcpyDeviceToHost;
aux_destPtr = (T*)malloc(count);
}
cudaDeviceSynchronize(); // needed in case user uses another stream for
// kernel execution besides the default one
cudaMemcpy(aux_destPtr, d_destPtr, count, *d_kind);
size_t N = count / sizeof(T);
if (kind == cudaMemcpyDeviceToHost) {
Expand All @@ -116,8 +118,10 @@ void cudaMemcpy_pullback(T* destPtr, T* srcPtr, size_t count,
size_t numBlocks = std::min(maxBlocks, (N + numThreads - 1) / numThreads);
custom_derivatives::atomicAdd_kernel<<<numBlocks, numThreads>>>(
d_srcPtr, aux_destPtr, N);
cudaDeviceSynchronize(); // needed in case user uses another stream than the
// default one
cudaDeviceSynchronize(); // needed in case the user uses another stream for
// kernel execution besides the default one, so we
// need to make sure the data are updated before
// continuing with the rest of the code
cudaFree(aux_destPtr);
} else if (kind == cudaMemcpyHostToDevice) {
// d_kind is device to host, so d_srcPtr is a host pointer
Expand Down
17 changes: 3 additions & 14 deletions test/CUDA/GradientKernels.cu
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,6 @@ double fn_memory(double *out, double *in) {
else { \
test.execute_kernel(grid, block, x, dx); \
} \
cudaDeviceSynchronize(); \
int *res = (int*)malloc(N * sizeof(int)); \
cudaMemcpy(res, dx, N * sizeof(int), cudaMemcpyDeviceToHost); \
for (int i = 0; i < (N - 1); i++) { \
Expand Down Expand Up @@ -602,7 +601,6 @@ double fn_memory(double *out, double *in) {
} \
int *res = (int*)malloc(N * sizeof(int)); \
cudaMemcpy(res, dx, N * sizeof(int), cudaMemcpyDeviceToHost); \
cudaDeviceSynchronize(); \
for (int i = 0; i < (N - 1); i++) { \
printf("%d, ", res[i]); \
} \
Expand Down Expand Up @@ -637,7 +635,6 @@ double fn_memory(double *out, double *in) {
} \
int *res = (int*)malloc(N * sizeof(int)); \
cudaMemcpy(res, dx, N * sizeof(int), cudaMemcpyDeviceToHost); \
cudaDeviceSynchronize(); \
for (int i = 0; i < (N - 1); i++) { \
printf("%d, ", res[i]); \
} \
Expand Down Expand Up @@ -672,7 +669,6 @@ double fn_memory(double *out, double *in) {
} \
double *res = (double*)malloc(N * sizeof(double)); \
cudaMemcpy(res, dx, N * sizeof(double), cudaMemcpyDeviceToHost); \
cudaDeviceSynchronize(); \
for (int i = 0; i < (N - 1); i++) { \
printf("%0.2f, ", res[i]); \
} \
Expand Down Expand Up @@ -748,42 +744,38 @@ int main(void) {
auto test_device = clad::gradient(kernel_with_device_call, "out, val");
test_device.execute_kernel(dim3(1), dim3(10, 1, 1), dummy_out_double, dummy_in_double, 5, d_out_double, d_val);
double *res = (double*)malloc(10 * sizeof(double));
cudaMemcpy(res, d_val, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaMemcpy(res, d_val, sizeof(double), cudaMemcpyDeviceToHost); // no need for synchronization before or after,
// as the cudaMemcpy call is queued after the kernel call
// on the default stream and the cudaMemcpy call is blocking
printf("%0.2f\n", *res); // CHECK-EXEC: 50.00

INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto test_device_2 = clad::gradient(kernel_with_device_call_2, "out, val");
test_device_2.execute_kernel(dim3(1), dim3(10, 1, 1), dummy_out_double, dummy_in_double, 5, d_out_double, d_val);
cudaMemcpy(res, d_val, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("%0.2f\n", *res); // CHECK-EXEC: 50.00

INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto check_dup = clad::gradient(dup_kernel_with_device_call_2, "out, val"); // check that the pullback function is not regenerated
check_dup.execute_kernel(dim3(1), dim3(10, 1, 1), dummy_out_double, dummy_in_double, 5, d_out_double, d_val);
cudaMemcpy(res, d_val, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("%s\n", cudaGetErrorString(cudaGetLastError())); // CHECK-EXEC: no error
printf("%0.2f\n", *res); // CHECK-EXEC: 50.00

INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto test_device_3 = clad::gradient(kernel_with_device_call_2, "out, in");
test_device_3.execute_kernel(dim3(1), dim3(10, 1, 1), dummy_out_double, dummy_in_double, 5, d_out_double, d_in_double);
cudaDeviceSynchronize();
cudaMemcpy(res, d_in_double, 10 * sizeof(double), cudaMemcpyDeviceToHost);
printf("%0.2f, %0.2f, %0.2f\n", res[0], res[1], res[2]); // CHECK-EXEC: 5.00, 5.00, 5.00

INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto test_device_4 = clad::gradient(kernel_with_device_call_3);
test_device_4.execute_kernel(dim3(1), dim3(10, 1, 1), dummy_out_double, dummy_in_double, val, d_out_double, d_in_double, d_val);
cudaDeviceSynchronize();
cudaMemcpy(res, d_in_double, 10 * sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("%0.2f, %0.2f, %0.2f\n", res[0], res[1], res[2]); // CHECK-EXEC: 5.00, 5.00, 5.00
cudaMemcpy(res, d_val, sizeof(double), cudaMemcpyDeviceToHost);
printf("%0.2f\n", *res); // CHECK-EXEC: 50.00
Expand All @@ -792,23 +784,20 @@ int main(void) {

auto test_kernel_call = clad::gradient(fn);
test_kernel_call.execute(dummy_out_double, dummy_in_double, d_out_double, d_in_double);
cudaDeviceSynchronize();
cudaMemcpy(res, d_in_double, sizeof(double), cudaMemcpyDeviceToHost);
printf("%0.2f\n", *res); // CHECK-EXEC: 50.00

INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto nested_device = clad::gradient(kernel_with_nested_device_call, "out, in");
nested_device.execute_kernel(dim3(1), dim3(10, 1, 1), dummy_out_double, dummy_in_double, 5, d_out_double, d_in_double);
cudaDeviceSynchronize();
cudaMemcpy(res, d_in_double, 10 * sizeof(double), cudaMemcpyDeviceToHost);
printf("%0.2f, %0.2f, %0.2f\n", res[0], res[1], res[2]); // CHECK-EXEC: 5.00, 5.00, 5.00

INIT(dummy_in_double, dummy_out_double, val, d_in_double, d_out_double, d_val);

auto test_memory = clad::gradient(fn_memory);
test_memory.execute(dummy_out_double, dummy_in_double, d_out_double, d_in_double);
cudaDeviceSynchronize();
cudaMemcpy(res, d_in_double, 10 * sizeof(double), cudaMemcpyDeviceToHost);
printf("%0.2f, %0.2f, %0.2f\n", res[0], res[1], res[2]); // CHECK-EXEC: 60.00, 0.00, 0.00

Expand Down

0 comments on commit f65bbde

Please sign in to comment.