Initial commit

This commit is contained in:
2025-11-20 22:58:11 -05:00
commit 6d75c8e94e
51 changed files with 5141 additions and 0 deletions

141
tests/unit/test_analysis.py Normal file
View File

@@ -0,0 +1,141 @@
"""Tests for analysis and benchmarking tools."""
import pytest
from src.analysis.analyzer import MazeAnalyzer
from src.analysis.benchmark import Benchmark
class TestMazeAnalyzer:
"""Test maze analysis functionality."""
def test_analyze_returns_complete_data(self, medium_maze):
"""Test that analyze returns all required fields."""
result = MazeAnalyzer.analyze(medium_maze)
required_fields = [
'dimensions', 'total_cells', 'algorithm', 'generation_time_ms',
'seed', 'dead_ends', 'dead_end_percentage', 'longest_path_length',
'longest_path_start', 'longest_path_end', 'average_branching_factor'
]
for field in required_fields:
assert field in result
def test_dead_ends_count(self, small_maze):
"""Test dead ends counting."""
dead_ends = MazeAnalyzer.count_dead_ends(small_maze)
assert dead_ends >= 0
assert dead_ends <= small_maze.rows * small_maze.cols
def test_dead_end_percentage(self, medium_maze):
"""Test dead end percentage calculation."""
result = MazeAnalyzer.analyze(medium_maze)
assert 0 <= result['dead_end_percentage'] <= 100
def test_longest_path(self, small_maze):
"""Test longest path finding."""
result = MazeAnalyzer.find_longest_path(small_maze)
assert 'length' in result
assert 'start' in result
assert 'end' in result
assert result['length'] >= 0
def test_branching_factor(self, medium_maze):
"""Test branching factor calculation."""
branching_factor = MazeAnalyzer.calculate_branching_factor(medium_maze)
# Branching factor should be between 1 and 4
assert 1.0 <= branching_factor <= 4.0
def test_total_cells(self, medium_maze):
"""Test total cells calculation."""
result = MazeAnalyzer.analyze(medium_maze)
assert result['total_cells'] == medium_maze.rows * medium_maze.cols
class TestBenchmark:
"""Test benchmarking functionality."""
def test_benchmark_generators_runs(self):
"""Test that generator benchmark runs successfully."""
result = Benchmark.benchmark_generators(
sizes=[(5, 5), (10, 10)],
iterations=2,
seed=42
)
assert 'benchmark_type' in result
assert result['benchmark_type'] == 'generators'
assert 'results' in result
assert len(result['results']) > 0
def test_benchmark_solvers_runs(self):
"""Test that solver benchmark runs successfully."""
result = Benchmark.benchmark_solvers(
sizes=[(5, 5), (10, 10)],
iterations=2,
seed=42
)
assert 'benchmark_type' in result
assert result['benchmark_type'] == 'solvers'
assert 'results' in result
assert len(result['results']) > 0
def test_quick_benchmark(self):
"""Test quick benchmark runs."""
result = Benchmark.quick_benchmark()
assert 'generators' in result
assert 'solvers' in result
def test_benchmark_generator_results_structure(self):
"""Test benchmark generator results have correct structure."""
result = Benchmark.benchmark_generators(
sizes=[(5, 5)],
iterations=2,
seed=42
)
for r in result['results']:
assert 'algorithm' in r
assert 'size' in r
assert 'avg_time_ms' in r
assert 'min_time_ms' in r
assert 'max_time_ms' in r
assert r['avg_time_ms'] >= 0
def test_benchmark_solver_results_structure(self):
"""Test benchmark solver results have correct structure."""
result = Benchmark.benchmark_solvers(
sizes=[(5, 5)],
iterations=2,
seed=42
)
for r in result['results']:
assert 'algorithm' in r
assert 'size' in r
assert 'avg_time_ms' in r
assert 'avg_path_length' in r
assert r['avg_time_ms'] >= 0
assert r['avg_path_length'] > 0
def test_benchmark_multiple_sizes(self):
"""Test benchmark with multiple sizes."""
sizes = [(5, 5), (10, 10)]
result = Benchmark.benchmark_generators(
sizes=sizes,
iterations=2,
seed=42
)
# Should have results for each algorithm at each size
num_algorithms = len(Benchmark.GENERATORS)
expected_results = num_algorithms * len(sizes)
assert len(result['results']) == expected_results