155 lines
6.1 KiB
Python
155 lines
6.1 KiB
Python
import argparse
|
|
import os
|
|
import statistics
|
|
import subprocess
|
|
import time
|
|
|
|
from compare_csv import compare_csv_files
|
|
|
|
# ANSI color codes
|
|
RED = '\033[0;31m'
|
|
GREEN = '\033[0;32m'
|
|
NC = '\033[0m' # No Color
|
|
|
|
def remove_non_empty_dir(path):
|
|
if os.path.exists(path):
|
|
for root, dirs, files in os.walk(path, topdown=False):
|
|
for name in files:
|
|
os.remove(os.path.join(root, name))
|
|
for name in dirs:
|
|
os.rmdir(os.path.join(root, name))
|
|
os.rmdir(path)
|
|
|
|
def get_max_name_length(directory):
|
|
max_length = 0
|
|
for file in os.listdir(directory):
|
|
if file.endswith('.csv'):
|
|
name_length = len(os.path.splitext(file)[0])
|
|
if name_length > max_length:
|
|
max_length = name_length
|
|
return max_length
|
|
|
|
def format_difference(diff):
|
|
threshold = 1e-5
|
|
if diff != 0:
|
|
if abs(diff) < threshold:
|
|
return '{:.2e}'.format(diff).rjust(6) # Scientific notation for small values
|
|
else:
|
|
return '{:.3f}'.format(diff).rjust(6) # Fixed-point notation for larger values
|
|
else:
|
|
return '0'.rjust(6)
|
|
|
|
def run_benchmark(command, runs, precompile=False):
|
|
times = []
|
|
for _ in range(runs):
|
|
start_time = time.perf_counter()
|
|
output = subprocess.run(command, capture_output=True, text=True)
|
|
elapsed = time.perf_counter() - start_time
|
|
if precompile:
|
|
out = output.stdout.splitlines()[-1] # Take the second to last line if there are new line symbols
|
|
times.append(float(out)*1e-9) # Convert from nanoseconds to seconds
|
|
else:
|
|
times.append(elapsed)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
min_time = min(times)
|
|
max_time = max(times)
|
|
std_dev = statistics.stdev(times) if len(times) > 1 else 0
|
|
|
|
return avg_time, min_time, max_time, std_dev
|
|
|
|
def main(tolerance, runs, silent, no_clean, precompile):
|
|
BENCHMARK_DIR = "./cpp_bench"
|
|
JULIA_DIR = "./julia_bench"
|
|
COMPILER = "g++"
|
|
CFLAGS = ["-O3", "-fopenmp", "-I", "../../include/", "-I", "/usr/include/eigen3"]
|
|
BIN_DIR = "./cpp_bin_temp"
|
|
OUTPUT_DIR = "./csv_temp"
|
|
|
|
# Clean up and create directories
|
|
for dir_path in [BIN_DIR, OUTPUT_DIR]:
|
|
remove_non_empty_dir(dir_path)
|
|
os.makedirs(dir_path)
|
|
|
|
for file in os.listdir('.'):
|
|
if file.endswith('.csv'):
|
|
os.remove(file)
|
|
|
|
# Compile and run C++ benchmarks
|
|
if not silent: print("----- Running C++ Benchmarks -----")
|
|
cpp_times = {}
|
|
for benchmark in os.listdir(BENCHMARK_DIR):
|
|
if benchmark.endswith(".cpp"):
|
|
name = os.path.splitext(benchmark)[0]
|
|
if not silent: print(f"Compiling {name}...", end="", flush=True)
|
|
subprocess.run([COMPILER, *CFLAGS, "-o", f"{BIN_DIR}/{name}", f"{BENCHMARK_DIR}/{benchmark}"])
|
|
if not silent: print(" Running...", end="", flush=True)
|
|
cpp_times[name] = run_benchmark([f"./{BIN_DIR}/{name}"], runs, precompile)
|
|
if not silent: print(" Done.", flush=True)
|
|
|
|
# Move CSV files to output directory
|
|
for file in os.listdir('.'):
|
|
if file.endswith('.csv'):
|
|
os.rename(file, f"{OUTPUT_DIR}/{file}")
|
|
|
|
max_name_length = get_max_name_length(OUTPUT_DIR)
|
|
|
|
# Run Julia benchmarks and compare
|
|
if not silent: print("\n----- Running Julia Benchmarks -----")
|
|
results_dict = {}
|
|
pass_all = True
|
|
julia_times = {}
|
|
for csv_file in sorted(os.listdir(OUTPUT_DIR), key=lambda x: os.path.splitext(x)[0]):
|
|
name = os.path.splitext(csv_file)[0]
|
|
padded_name = name.ljust(max_name_length)
|
|
if os.path.exists(f"{JULIA_DIR}/{name}.jl"):
|
|
if not silent: print(f"Running {name}...", end="", flush=True)
|
|
julia_times[name] = run_benchmark(["julia", f"{JULIA_DIR}/{name}.jl"], runs, precompile)
|
|
|
|
if os.path.exists(f"./{name}.csv"):
|
|
are_equal, _, _, _, max_diff = compare_csv_files(f"./{name}.csv", f"{OUTPUT_DIR}/{csv_file}", tolerance)
|
|
formatted_diff = format_difference(max_diff)
|
|
cpp_time = '{:.4f}s'.format(cpp_times[name][0]).rjust(8)
|
|
julia_time = '{:.4f}s'.format(julia_times[name][0]).rjust(8)
|
|
result = f"{padded_name}: {'Success' if are_equal else 'Failure'} (Max Diff: {formatted_diff}, C++: {cpp_time}, Julia: {julia_time})"
|
|
result_color = GREEN if are_equal else RED
|
|
results_dict[name] = f"{result_color}{result}{NC}"
|
|
if not are_equal:
|
|
pass_all = False
|
|
|
|
else:
|
|
results_dict[name] = f"{RED}{padded_name}: No Julia output{NC}"
|
|
pass_all = False
|
|
if not silent: print(" Done.", flush=True)
|
|
|
|
|
|
# Clean up
|
|
if not no_clean:
|
|
remove_non_empty_dir(BIN_DIR)
|
|
remove_non_empty_dir(OUTPUT_DIR)
|
|
|
|
for file in os.listdir('.'):
|
|
if file.endswith('.csv'):
|
|
os.remove(file)
|
|
|
|
# Print results
|
|
if not silent: print("\n----- Benchmark Results -----")
|
|
print(f"Parameters: Tolerance = {tolerance}, Runs = {runs}, Precompile = {precompile}")
|
|
for name in sorted(results_dict):
|
|
print(results_dict[name])
|
|
if pass_all:
|
|
print(f"\n{GREEN}All benchmarks and comparisons passed.{NC}")
|
|
else:
|
|
print(f"\n{RED}Some benchmarks or comparisons failed.{NC}")
|
|
exit(1)
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser(description='Benchmark and Compare Script')
|
|
parser.add_argument('--tolerance', type=float, default=0, help='Tolerance for CSV comparison')
|
|
parser.add_argument('--runs', type=int, default=1, help='Number of benchmark runs')
|
|
parser.add_argument('--silent', action='store_true', help='Run in silent mode without printing details')
|
|
parser.add_argument('--no-clean', action='store_true', help='Do not clean up temporary files')
|
|
parser.add_argument('--precompile', action='store_true', help='Use precompiling for Julia benchmarks and rely on benchmark script for timing')
|
|
args = parser.parse_args()
|
|
main(args.tolerance, args.runs, args.silent, args.no_clean, args.precompile)
|