-
Notifications
You must be signed in to change notification settings - Fork 164
/
hgemv.py
76 lines (68 loc) · 2.42 KB
/
hgemv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import torch
import time
from torch.utils.cpp_extension import load
from functools import partial
from typing import Optional
torch.set_grad_enabled(False)
# Load the CUDA kernel as a python module
lib = load(name='hgemv_lib',
sources=['hgemv.cu'],
extra_cuda_cflags=[
"-O3",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math"
],
extra_cflags=['-std=c++17'])
def run_benchmark(perf_func: callable,
a: torch.Tensor, b: torch.Tensor,
tag: str, out: Optional[torch.Tensor] = None,
warmup: int = 10, iters: int = 200,
show_all: bool = False):
if out is not None:
out.fill_(0)
if out is not None:
for i in range(warmup):
perf_func(a, b, out)
else:
for i in range(warmup):
_ = perf_func(a, b)
torch.cuda.synchronize()
start = time.time()
# iters
if out is not None:
for i in range(iters):
perf_func(a, b, out)
else:
for i in range(iters):
out = perf_func(a, b)
torch.cuda.synchronize()
end = time.time()
total_time = (end - start) * 1000 # ms
mean_time = total_time / iters
out_info = f"out_{tag}"
out_val = out.flatten().detach().cpu().numpy().tolist()[:3]
out_val = [round(v, 8) for v in out_val]
print(f"{out_info:>13}: {out_val}, time:{mean_time:.8f}ms")
if show_all: print(out)
return out.clone(), mean_time
print("-" * 80)
M, N, K = 1024, 1, 128
a = torch.randn((M, K)).cuda().half().contiguous()
b = torch.randn((K, N)).cuda().half().contiguous()
c = torch.randn((M, N)).cuda().half().contiguous()
run_benchmark(lib.hgemv_k32_f16, a, b, "k32f16", c)
run_benchmark(lib.hgemv_k128_f16x4, a, b, "k128f16x4", c)
run_benchmark(partial(torch.matmul, out=c), a, b, "f16_th")
print("-" * 80)
M, N, K = 1024, 1, 16
a = torch.randn((M, K)).cuda().half().contiguous()
b = torch.randn((K, N)).cuda().half().contiguous()
c = torch.randn((M, N)).cuda().half().contiguous()
run_benchmark(lib.hgemv_k16_f16, a, b, "k16f16", c)
run_benchmark(partial(torch.matmul, out=c), a, b, "f16_th")
print("-" * 80)