Skip to content

Commit

Permalink
Merge pull request #2372 from igchor/bench_cpu_count
Browse files Browse the repository at this point in the history
[Benchmarks] support CPU counter measurements
  • Loading branch information
pbalcer authored Nov 27, 2024
2 parents 53e8056 + 4a323ed commit 77ef984
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 58 deletions.
3 changes: 0 additions & 3 deletions scripts/benchmarks/benches/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@ def download(self, name, url, file, untar = False):
def name(self):
raise NotImplementedError()

def unit(self):
raise NotImplementedError()

def lower_is_better(self):
return True

Expand Down
43 changes: 29 additions & 14 deletions scripts/benchmarks/benches/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def setup(self):
if options.sycl is None:
return

repo_path = git_clone(self.directory, "compute-benchmarks-repo", "https://github.com/intel/compute-benchmarks.git", "c80ddec9f0b4905bcbeb0f264f710093dc70340d")
repo_path = git_clone(self.directory, "compute-benchmarks-repo", "https://github.com/intel/compute-benchmarks.git", "df38bc342641d7e83fbb4fe764a23d21d734e07b")
build_path = create_build_path(self.directory, 'compute-benchmarks-build')

configure_command = [
Expand Down Expand Up @@ -77,6 +77,13 @@ def benchmarks(self) -> list[Benchmark]:

return benches

def parse_unit_type(compute_unit):
if "[count]" in compute_unit:
return "instr"
elif "[us]" in compute_unit:
return "μs"
return "unknown"

class ComputeBenchmark(Benchmark):
def __init__(self, bench, name, test):
self.bench = bench
Expand All @@ -90,9 +97,6 @@ def bin_args(self) -> list[str]:
def extra_env_vars(self) -> dict:
return {}

def unit(self):
return "μs"

def setup(self):
self.benchmark_bin = os.path.join(self.bench.directory, 'compute-benchmarks-build', 'bin', self.bench_name)

Expand All @@ -108,22 +112,32 @@ def run(self, env_vars) -> list[Result]:
env_vars.update(self.extra_env_vars())

result = self.run_bench(command, env_vars)
(label, mean) = self.parse_output(result)
return [ Result(label=self.name(), value=mean, command=command, env=env_vars, stdout=result) ]
parsed_results = self.parse_output(result)
ret = []
for label, mean, unit in parsed_results:
extra_label = " CPU count" if parse_unit_type(unit) == "CPU count" else ""
ret.append(Result(label=self.name() + extra_label, value=mean, command=command, env=env_vars, stdout=result, unit=parse_unit_type(unit)))
return ret

def parse_output(self, output):
csv_file = io.StringIO(output)
reader = csv.reader(csv_file)
next(reader, None)
data_row = next(reader, None)
if data_row is None:
results = []
while True:
data_row = next(reader, None)
if data_row is None:
break
try:
label = data_row[0]
mean = float(data_row[1])
unit = data_row[7]
results.append((label, mean, unit))
except (ValueError, IndexError) as e:
raise ValueError(f"Error parsing output: {e}")
if len(results) == 0:
raise ValueError("Benchmark output does not contain data.")
try:
label = data_row[0]
mean = float(data_row[1])
return (label, mean)
except (ValueError, IndexError) as e:
raise ValueError(f"Error parsing output: {e}")
return results

def teardown(self):
return
Expand Down Expand Up @@ -249,6 +263,7 @@ def bin_args(self) -> list[str]:
f"--memoryPlacement={self.placement}",
"--useEvents=0",
"--contents=Zeros",
"--multiplier=1",
]

class VectorSum(ComputeBenchmark):
Expand Down
5 changes: 1 addition & 4 deletions scripts/benchmarks/benches/llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,6 @@ def __init__(self, bench):
self.bench = bench
super().__init__(bench.directory)

def unit(self):
return "token/s"

def setup(self):
self.benchmark_bin = os.path.join(self.bench.build_path, 'bin', 'llama-bench')

Expand Down Expand Up @@ -171,7 +168,7 @@ def run(self, env_vars) -> list[Result]:
for r in parsed:
(extra_label, mean) = r
label = f"{self.name()} {extra_label}"
results.append(Result(label=label, value=mean, command=command, env=env_vars, stdout=result))
results.append(Result(label=label, value=mean, command=command, env=env_vars, stdout=result, unit="token/s"))
return results

def parse_output(self, output):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ class Result:
env: str
stdout: str
passed: bool = True
# values should not be set by the benchmark
unit: str = ""
# values should not be set by the benchmark
name: str = ""
lower_is_better: bool = True
git_hash: str = ''
Expand Down
6 changes: 2 additions & 4 deletions scripts/benchmarks/benches/syclbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,6 @@ def bin_args(self) -> list[str]:
def extra_env_vars(self) -> dict:
return {}

def unit(self):
return "ms"

def setup(self):
self.benchmark_bin = os.path.join(self.directory, 'sycl-bench-build', self.bench_name)

Expand Down Expand Up @@ -134,7 +131,8 @@ def run(self, env_vars) -> list[Result]:
passed=(row[1]=="PASS"),
command=command,
env=env_vars,
stdout=row))
stdout=row,
unit="ms"))
self.done = True
return res_list

Expand Down
5 changes: 1 addition & 4 deletions scripts/benchmarks/benches/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,6 @@ def __init__(self, name, value, diff):
def name(self):
return self.bname

def unit(self):
return "ms"

def lower_is_better(self):
return True

Expand All @@ -61,7 +58,7 @@ def setup(self):
def run(self, env_vars) -> list[Result]:
random_value = self.value + random.uniform(-1 * (self.diff), self.diff)
return [
Result(label=self.name(), value=random_value, command="", env={"A": "B"}, stdout="no output")
Result(label=self.name(), value=random_value, command="", env={"A": "B"}, stdout="no output", unit="ms")
]

def teardown(self):
Expand Down
35 changes: 9 additions & 26 deletions scripts/benchmarks/benches/velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,12 @@ def benchmarks(self) -> list[Benchmark]:
]

class VelocityBase(Benchmark):
def __init__(self, name: str, bin_name: str, vb: VelocityBench):
def __init__(self, name: str, bin_name: str, vb: VelocityBench, unit: str):
super().__init__(vb.directory)
self.vb = vb
self.bench_name = name
self.bin_name = bin_name
self.unit = unit
self.code_path = os.path.join(self.vb.repo_path, self.bench_name, 'SYCL')

def download_deps(self):
Expand Down Expand Up @@ -83,21 +84,18 @@ def run(self, env_vars) -> list[Result]:

result = self.run_bench(command, env_vars)

return [ Result(label=self.name(), value=self.parse_output(result), command=command, env=env_vars, stdout=result) ]
return [ Result(label=self.name(), value=self.parse_output(result), command=command, env=env_vars, stdout=result, unit=self.unit) ]

def teardown(self):
return

class Hashtable(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("hashtable", "hashtable_sycl", vb)
super().__init__("hashtable", "hashtable_sycl", vb, "M keys/sec")

def name(self):
return "Velocity-Bench Hashtable"

def unit(self):
return "M keys/sec"

def bin_args(self) -> list[str]:
return ["--no-verify"]

Expand All @@ -114,15 +112,12 @@ def parse_output(self, stdout: str) -> float:

class Bitcracker(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("bitcracker", "bitcracker", vb)
super().__init__("bitcracker", "bitcracker", vb, "s")
self.data_path = os.path.join(vb.repo_path, "bitcracker", "hash_pass")

def name(self):
return "Velocity-Bench Bitcracker"

def unit(self):
return "s"

def bin_args(self) -> list[str]:
return ["-f", f"{self.data_path}/img_win8_user_hash.txt",
"-d", f"{self.data_path}/user_passwords_60000.txt",
Expand All @@ -137,7 +132,7 @@ def parse_output(self, stdout: str) -> float:

class SobelFilter(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("sobel_filter", "sobel_filter", vb)
super().__init__("sobel_filter", "sobel_filter", vb, "ms")

def download_deps(self):
self.download("sobel_filter", "https://github.com/oneapi-src/Velocity-Bench/raw/main/sobel_filter/res/sobel_filter_data.tgz?download=", "sobel_filter_data.tgz", untar=True)
Expand All @@ -146,9 +141,6 @@ def download_deps(self):
def name(self):
return "Velocity-Bench Sobel Filter"

def unit(self):
return "ms"

def bin_args(self) -> list[str]:
return ["-i", f"{self.data_path}/sobel_filter_data/silverfalls_32Kx32K.png",
"-n", "5"]
Expand All @@ -166,7 +158,7 @@ def parse_output(self, stdout: str) -> float:

class QuickSilver(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("QuickSilver", "qs", vb)
super().__init__("QuickSilver", "qs", vb, "MMS/CTT")
self.data_path = os.path.join(vb.repo_path, "QuickSilver", "Examples", "AllScattering")

def run(self, env_vars) -> list[Result]:
Expand All @@ -179,9 +171,6 @@ def run(self, env_vars) -> list[Result]:
def name(self):
return "Velocity-Bench QuickSilver"

def unit(self):
return "MMS/CTT"

def lower_is_better(self):
return False

Expand All @@ -200,17 +189,14 @@ def parse_output(self, stdout: str) -> float:

class Easywave(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("easywave", "easyWave_sycl", vb)
super().__init__("easywave", "easyWave_sycl", vb, "ms")

def download_deps(self):
self.download("easywave", "https://git.gfz-potsdam.de/id2/geoperil/easyWave/-/raw/master/data/examples.tar.gz", "examples.tar.gz", untar=True)

def name(self):
return "Velocity-Bench Easywave"

def unit(self):
return "ms"

def bin_args(self) -> list[str]:
return ["-grid", f"{self.data_path}/examples/e2Asean.grd",
"-source", f"{self.data_path}/examples/BengkuluSept2007.flt",
Expand Down Expand Up @@ -245,7 +231,7 @@ def parse_output(self, stdout: str) -> float:

class CudaSift(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("cudaSift", "cudaSift", vb)
super().__init__("cudaSift", "cudaSift", vb, "ms")

def download_deps(self):
images = os.path.join(self.vb.repo_path, self.bench_name, 'inputData')
Expand All @@ -256,9 +242,6 @@ def download_deps(self):
def name(self):
return "Velocity-Bench CudaSift"

def unit(self):
return "ms"

def parse_output(self, stdout: str) -> float:
match = re.search(r'Avg workload time = (\d+\.\d+) ms', stdout)
if match:
Expand Down
3 changes: 1 addition & 2 deletions scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
if bench_results is not None:
for bench_result in bench_results:
if bench_result.passed:
print(f"complete ({bench_result.label}: {bench_result.value:.3f} {benchmark.unit()}).")
print(f"complete ({bench_result.label}: {bench_result.value:.3f} {bench_result.unit}).")
else:
print(f"complete ({bench_result.label}: verification FAILED)")
iteration_results.append(bench_result)
Expand All @@ -91,7 +91,6 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
median_index = len(label_results) // 2
median_result = label_results[median_index]

median_result.unit = benchmark.unit()
median_result.name = label
median_result.lower_is_better = benchmark.lower_is_better()

Expand Down

0 comments on commit 77ef984

Please sign in to comment.