Closed as not planned
Description
Bug report
Bug description:
# Add a code block here, if required
#!/usr/bin/env python3
"""
Multicore Benchmark Application
Tests CPU performance across different numbers of cores with various workloads.
"""
import time
import multiprocessing as mp
import concurrent.futures
import math
import random
import numpy as np
from typing import List, Dict, Tuple, Callable
import argparse
import json
from dataclasses import dataclass
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
@dataclass
class BenchmarkResult:
"""Store benchmark results"""
test_name: str
cores_used: int
execution_time: float
operations_per_second: float
speedup: float
efficiency: float
class MulticoreBenchmark:
"""Main benchmark class"""
def __init__(self, max_cores: int = None):
self.max_cores = max_cores or mp.cpu_count()
self.results: List[BenchmarkResult] = []
def cpu_intensive_task(self, n: int) -> float:
"""CPU-intensive mathematical computation"""
result = 0.0
for i in range(n):
result += math.sin(i) * math.cos(i) * math.sqrt(i + 1)
return result
def prime_check(self, n: int) -> bool:
"""Check if a number is prime (CPU intensive)"""
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
for i in range(3, int(math.sqrt(n)) + 1, 2):
if n % i == 0:
return False
return True
def matrix_multiplication(self, size: int) -> np.ndarray:
"""Matrix multiplication benchmark"""
np.random.seed(42) # For reproducible results
a = np.random.rand(size, size)
b = np.random.rand(size, size)
return np.dot(a, b)
def fibonacci(self, n: int) -> int:
"""Recursive Fibonacci (CPU intensive)"""
if n <= 1:
return n
return self.fibonacci(n - 1) + self.fibonacci(n - 2)
def monte_carlo_pi(self, samples: int) -> float:
"""Monte Carlo Pi estimation"""
random.seed(42)
inside_circle = 0
for _ in range(samples):
x, y = random.random(), random.random()
if x*x + y*y <= 1:
inside_circle += 1
return 4 * inside_circle / samples
def run_cpu_benchmark(self, cores: int, iterations: int = 1000000) -> float:
"""Run CPU-intensive benchmark with specified number of cores"""
work_per_core = iterations // cores
if cores == 1:
# Single-threaded execution
start_time = time.time()
self.cpu_intensive_task(iterations)
return time.time() - start_time
else:
# Multi-threaded execution
start_time = time.time()
with ProcessPoolExecutor(max_workers=cores) as executor:
futures = [executor.submit(self.cpu_intensive_task, work_per_core)
for _ in range(cores)]
concurrent.futures.wait(futures)
return time.time() - start_time
def run_prime_benchmark(self, cores: int, max_number: int = 100000) -> float:
"""Run prime checking benchmark"""
numbers = list(range(2, max_number))
if cores == 1:
start_time = time.time()
primes = [n for n in numbers if self.prime_check(n)]
end_time = time.time()
else:
start_time = time.time()
with ProcessPoolExecutor(max_workers=cores) as executor:
primes = [n for n, is_prime in zip(numbers, executor.map(self.prime_check, numbers)) if is_prime]
end_time = time.time()
return end_time - start_time
def run_matrix_benchmark(self, cores: int, matrix_size: int = 200) -> float:
"""Run matrix multiplication benchmark"""
num_matrices = cores * 2
if cores == 1:
start_time = time.time()
for _ in range(num_matrices):
self.matrix_multiplication(matrix_size)
end_time = time.time()
ele:
start_time = time.time()
with ProcessPoolExecutor(max_workers=cores) as executor:
futures = [executor.submit(self.matrix_multiplication, matrix_size)
for _ in range(num_matrices)]
concurrent.futures.wait(futures)
end_time = time.time()
return end_time - start_time
def run_monte_carlo_benchmark(self, cores: int, total_samples: int = 10000000) -> float:
"""Run Monte Carlo Pi estimation benchmark"""
samples_per_core = total_samples // cores
i cores == 1:
start_time = time.time()
pi_estimate = self.monte_carlo_pi(total_samples)
end_time = time.time()
else:
start_time = time.time()
with ProcessPoolExecutor(max_workers=cores) as executor:
futures = [executor.submit(self.monte_carlo_pi, samples_per_core)
for _ in range(cores)]
results = [future.result() for future in futures]
pi_estimate = sum(results) / len(results)
end_time = time.time()
return end_time - start_time
def run_benchmark_suite(self, test_name: str, benchmark_func: Callable,
baseline_time: float = None) -> List[BenchmarkResult]:
"""Run a benchmark across different core counts"""
results = []
print(f"\n{'='*50}")
print(f"Running {test_name} Benchmark")
print(f"{'='*50}")
for cores in range(1, self.max_cores + 1):
print(f"Testing with {cores} core(s)... ", end="", flush=True)
# Run benchmark multiple times and take average
times = []
for _ in range(3):
exec_time = benchmark_func(cores)
times.append(exec_time)
avg_time = sum(times) / len(times)
# Calculate metrics
if baseline_time is None:
baseline_time = avg_time
speedup = baseline_time / avg_time
efficiency = speedup / cores
ops_per_second = 1 / avg_time if avg_time > 0 else 0
result = BenchmarkResult(
test_name=test_name,
cores_used=cores,
execution_time=avg_time,
operations_per_second=ops_per_second,
speedup=speedup,
efficiency=efficiency
)
results.append(result)
self.results.append(result)
print(f"Time: {avg_time:.3f}s, Speedup: {speedup:.2f}x, Efficiency: {efficiency:.2f}")
return results
def print_summary(self):
"""Print benchmark summary"""
print(f"\n{'='*80}")
print("BENCHMARK SUMMARY")
prit(f"{'='*80}")
# Group results by test name
test_groups = {}
for result in self.results:
if result.test_name not in test_groups:
test_groups[result.test_name] = []
test_groups[result.test_name].append(result)
for test_name, test_results in test_groups.items():
print(f"\n{test_name}:")
print(f"{'Cores':<6} {'Time(s)':<10} {'Speedup':<10} {'Efficiency':<12}")
print("-" * 40)
for result in test_results:
print(f"{result.cores_used:<6} {result.execution_time:<10.3f} "
f"{result.speedup:<10.2f} {result.efficiency:<12.2f}")
def save_results(self, filename: str):
"""Save results to JSON file"""
data = {
'system_info': {
'max_cores': self.max_cores,
'timestamp': time.time()
},
'results': [
{
'test_name': r.test_name,
'cores_used': r.cores_used,
'execution_time': r.execution_time,
'operations_per_second': r.operations_per_second,
'speedup': r.speedup,
'efficiency': r.efficiency
}
for r in self.results
]
}
with open(filename, 'w') as f:
json.dump(data, f, indent=2)
print(f"\nResults saved to {filename}")
def run_all_benchmarks(self):
"""Run all benchmark tests"""
print(f"Starting multicore benchmark suite on {self.max_cores} cores")
print(f"System: {mp.cpu_count()} CPU cores detected")
# Run different benchmark types
benchmarks = [
("CPU Intensive", self.run_cpu_benchmark),
("Prime Numbers", self.run_prime_benchmark),
("Matrix Multiplication", self.run_matrix_benchmark),
("Monte Carlo Pi", self.run_monte_carlo_benchmark)
]
for name, func in benchmarks:
self.run_benchmark_suite(name, func)
selfprint_summary()
def main():
"""Main function"""
parser = argparse.ArgumentParser(description='Multicore Benchmark Application')
parser.add_argument('--max-cores', type=int, default=None,
help='Maximum number of cores to test (default: all available)')
parser.add_argument('--output', type=str, default='benchmark_results.json',
help='Output file for results (default: benchmark_results.json)')
parser.add_argument('--test', type=str, choices=['cpu', 'prime', 'matrix', 'monte'],
help='Run specific test only')
args = parser.parse_args()
# Create benchmark instance
benchmark = MulticoreBenchmark(max_cores=args.max_cores)
if args.test:
# Run specific test
test_map = {
'cpu': ("CPU Intensive", benchmark.run_cpu_benchmark),
'prime': ("Prime Numbers", benchmark.run_prime_benchmark),
'matrix': ("Matrix Multiplication", benchmark.run_matrix_benchmark),
'monte': ("Monte Carlo Pi", benchmark.run_monte_carlo_benchmark)
}
name, func = test_map[args.test]
benchmark.run_benchmark_suite(name, func)
else:
# Run all benchmarks
benchmark.run_all_benchmarks()
# Save results
benchmark.save_results(args.output)
if __name__ == "__main__":
main()
CPython versions tested on:
3.14
Operating systems tested on:
Windows