Add "Writing-Tests"
parent
3b9f342ef1
commit
ea76c22954
|
|
@ -0,0 +1,512 @@
|
|||
# Writing Tests
|
||||
|
||||
Guide to creating automated tests for McRogueFace using Python and the automation API.
|
||||
|
||||
## Quick Reference
|
||||
|
||||
**Test Location:** `tests/` directory (NOT `build/tests/` - that gets shipped!)
|
||||
|
||||
**Test Types:**
|
||||
1. **Direct Execution** - No game loop, immediate results
|
||||
2. **Timer-Based** - Requires rendering/game loop
|
||||
|
||||
**Key Tools:**
|
||||
- `mcrfpy.automation` - Screenshot and input automation
|
||||
- `--headless` flag - Run without display
|
||||
- `--exec` flag - Execute specific script
|
||||
|
||||
**Format:** See CLAUDE.md "Testing Guidelines" section
|
||||
|
||||
---
|
||||
|
||||
## Test Type 1: Direct Execution
|
||||
|
||||
For tests that don't need rendering or game state.
|
||||
|
||||
### Template
|
||||
|
||||
```python
|
||||
#!/usr/bin/env python3
|
||||
"""Test description goes here."""
|
||||
|
||||
import mcrfpy
|
||||
import sys
|
||||
|
||||
def test_feature():
|
||||
# Setup
|
||||
obj = mcrfpy.SomeClass()
|
||||
|
||||
# Test
|
||||
obj.property = 42
|
||||
|
||||
# Verify
|
||||
assert obj.property == 42, f"Expected 42, got {obj.property}"
|
||||
|
||||
# More tests...
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
if test_feature():
|
||||
print("PASS")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("FAIL")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"FAIL: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
```bash
|
||||
cd build
|
||||
./mcrogueface --headless --exec ../tests/test_myfeature.py
|
||||
```
|
||||
|
||||
### Example: Testing Vector Operations
|
||||
|
||||
```python
|
||||
import mcrfpy
|
||||
import sys
|
||||
|
||||
def test_vector():
|
||||
# Create vector
|
||||
v = mcrfpy.Vector(3.0, 4.0)
|
||||
|
||||
# Test properties
|
||||
assert v.x == 3.0
|
||||
assert v.y == 4.0
|
||||
|
||||
# Test tuple access
|
||||
assert v[0] == 3.0
|
||||
assert v[1] == 4.0
|
||||
|
||||
# Test modification
|
||||
v.x = 10.0
|
||||
assert v.x == 10.0
|
||||
|
||||
print("All vector tests passed")
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
if test_vector():
|
||||
print("PASS")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("FAIL")
|
||||
sys.exit(1)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test Type 2: Timer-Based Tests
|
||||
|
||||
For tests requiring rendering, screenshots, or game loop.
|
||||
|
||||
### Template
|
||||
|
||||
```python
|
||||
#!/usr/bin/env python3
|
||||
"""Test description - requires game loop."""
|
||||
|
||||
import mcrfpy
|
||||
from mcrfpy import automation
|
||||
import sys
|
||||
|
||||
# Setup scene BEFORE game loop starts
|
||||
mcrfpy.createScene("test")
|
||||
|
||||
# Create UI elements
|
||||
frame = mcrfpy.Frame(100, 100, 200, 150)
|
||||
frame.fill_color = mcrfpy.Color(255, 0, 0)
|
||||
mcrfpy.sceneUI("test").append(frame)
|
||||
|
||||
# Switch to test scene
|
||||
mcrfpy.setScene("test")
|
||||
|
||||
def run_test(runtime_ms):
|
||||
"""Timer callback - runs AFTER game loop starts."""
|
||||
|
||||
# Take screenshot
|
||||
automation.screenshot("test_output.png")
|
||||
|
||||
# Perform interactions
|
||||
automation.click(150, 150) # Click on frame
|
||||
|
||||
# Verify results
|
||||
# ... check state, take more screenshots, etc ...
|
||||
|
||||
# Report results
|
||||
print("PASS")
|
||||
sys.exit(0) # MUST exit!
|
||||
|
||||
# Schedule test to run after game loop starts
|
||||
mcrfpy.setTimer("test_runner", run_test, 100) # 100ms delay
|
||||
```
|
||||
|
||||
### Running
|
||||
|
||||
```bash
|
||||
cd build
|
||||
./mcrogueface --headless --exec ../tests/test_rendering.py
|
||||
```
|
||||
|
||||
**Important:** Timer callbacks are essential! Screenshots only work after rendering starts.
|
||||
|
||||
### Example: Testing Click Events
|
||||
|
||||
```python
|
||||
import mcrfpy
|
||||
from mcrfpy import automation
|
||||
import sys
|
||||
|
||||
# Track clicks
|
||||
clicks_received = []
|
||||
|
||||
def on_frame_click(x, y, button, state):
|
||||
clicks_received.append((x, y, button, state))
|
||||
|
||||
# Setup scene
|
||||
mcrfpy.createScene("test")
|
||||
|
||||
frame = mcrfpy.Frame(100, 100, 200, 150)
|
||||
frame.fill_color = mcrfpy.Color(0, 255, 0)
|
||||
frame.click = on_frame_click
|
||||
|
||||
mcrfpy.sceneUI("test").append(frame)
|
||||
mcrfpy.setScene("test")
|
||||
|
||||
def run_test(runtime_ms):
|
||||
# Simulate click on frame center
|
||||
automation.click(200, 175)
|
||||
|
||||
# Give it a frame to process
|
||||
mcrfpy.setTimer("verify", verify_results, 32)
|
||||
|
||||
def verify_results(runtime_ms):
|
||||
# Check click was received
|
||||
assert len(clicks_received) > 0, "No clicks received!"
|
||||
|
||||
x, y, button, state = clicks_received[0]
|
||||
assert button == 1, f"Wrong button: {button}"
|
||||
assert state == True, "Wrong state"
|
||||
|
||||
print(f"Click received at ({x}, {y})")
|
||||
print("PASS")
|
||||
sys.exit(0)
|
||||
|
||||
mcrfpy.setTimer("test", run_test, 100)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Automation API
|
||||
|
||||
### Screenshots
|
||||
|
||||
```python
|
||||
from mcrfpy import automation
|
||||
|
||||
# Take screenshot
|
||||
automation.screenshot("output.png")
|
||||
|
||||
# Screenshots saved to current directory
|
||||
# Use for visual regression testing
|
||||
```
|
||||
|
||||
### Mouse Input
|
||||
|
||||
```python
|
||||
from mcrfpy import automation
|
||||
|
||||
# Click at position
|
||||
automation.click(x, y, button=1) # button: 1=left, 2=middle, 3=right
|
||||
|
||||
# Move mouse
|
||||
automation.move(x, y)
|
||||
```
|
||||
|
||||
### Keyboard Input
|
||||
|
||||
```python
|
||||
from mcrfpy import automation
|
||||
|
||||
# Key press
|
||||
automation.keypress(key_code, pressed=True)
|
||||
|
||||
# Key release
|
||||
automation.keypress(key_code, pressed=False)
|
||||
```
|
||||
|
||||
**Key codes:** See `mcrfpy.Key.*` constants
|
||||
|
||||
---
|
||||
|
||||
## Test-Driven Development (TDD)
|
||||
|
||||
### TDD Workflow
|
||||
|
||||
1. **Write failing test** - Demonstrates the bug/missing feature
|
||||
2. **Run test** - Verify it fails
|
||||
3. **Implement fix** - Make minimum change to pass
|
||||
4. **Run test** - Verify it passes
|
||||
5. **Refactor** - Clean up if needed
|
||||
6. **Run test** - Verify still passes
|
||||
|
||||
### Example: TDD for New Feature
|
||||
|
||||
**Step 1: Write failing test**
|
||||
|
||||
`tests/test_sprite_rotation.py`:
|
||||
```python
|
||||
import mcrfpy
|
||||
|
||||
def test_sprite_rotation():
|
||||
sprite = mcrfpy.Sprite("test.png", 0, 0)
|
||||
|
||||
# This should work but doesn't yet
|
||||
sprite.rotation = 90
|
||||
assert sprite.rotation == 90
|
||||
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
if test_sprite_rotation():
|
||||
print("PASS")
|
||||
else:
|
||||
print("FAIL")
|
||||
```
|
||||
|
||||
**Step 2: Run test - it fails**
|
||||
|
||||
```bash
|
||||
$ ./mcrogueface --headless --exec tests/test_sprite_rotation.py
|
||||
AttributeError: 'Sprite' object has no attribute 'rotation'
|
||||
FAIL
|
||||
```
|
||||
|
||||
**Step 3: Implement feature**
|
||||
|
||||
Add `rotation` property to `src/UISprite.cpp` (see [[Adding-Python-Bindings]])
|
||||
|
||||
**Step 4: Run test - it passes**
|
||||
|
||||
```bash
|
||||
$ ./mcrogueface --headless --exec tests/test_sprite_rotation.py
|
||||
PASS
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Best Practices
|
||||
|
||||
### DO:
|
||||
|
||||
✅ **Test one thing at a time**
|
||||
```python
|
||||
def test_grid_walkable_setter():
|
||||
grid = mcrfpy.Grid(10, 10, 16, 16)
|
||||
grid.walkable((5, 5), True)
|
||||
# Test only walkable setter, nothing else
|
||||
```
|
||||
|
||||
✅ **Use descriptive test names**
|
||||
```python
|
||||
# Good
|
||||
def test_entity_moves_to_valid_position()
|
||||
|
||||
# Bad
|
||||
def test1()
|
||||
```
|
||||
|
||||
✅ **Clean up after tests**
|
||||
```python
|
||||
def test_feature():
|
||||
# Setup
|
||||
obj = create_test_object()
|
||||
|
||||
try:
|
||||
# Test
|
||||
obj.do_thing()
|
||||
finally:
|
||||
# Cleanup
|
||||
obj.cleanup()
|
||||
```
|
||||
|
||||
✅ **Test edge cases**
|
||||
```python
|
||||
def test_grid_bounds():
|
||||
grid = mcrfpy.Grid(10, 10, 16, 16)
|
||||
|
||||
# Test boundaries
|
||||
grid.at((0, 0)) # Min
|
||||
grid.at((9, 9)) # Max
|
||||
|
||||
# Test out of bounds
|
||||
try:
|
||||
grid.at((10, 10))
|
||||
assert False, "Should have raised exception"
|
||||
except ValueError:
|
||||
pass # Expected
|
||||
```
|
||||
|
||||
### DON'T:
|
||||
|
||||
❌ **Test multiple unrelated things**
|
||||
```python
|
||||
# Bad - testing grid, entity, AND animation
|
||||
def test_everything():
|
||||
grid = mcrfpy.Grid(10, 10, 16, 16)
|
||||
entity = mcrfpy.Entity(5, 5, 0)
|
||||
mcrfpy.animate(entity, "x", 10, 1000, "linear")
|
||||
# Too much!
|
||||
```
|
||||
|
||||
❌ **Forget to exit in timer-based tests**
|
||||
```python
|
||||
def run_test(runtime_ms):
|
||||
automation.screenshot("test.png")
|
||||
print("PASS")
|
||||
# MISSING: sys.exit(0) - will hang!
|
||||
```
|
||||
|
||||
❌ **Rely on timing**
|
||||
```python
|
||||
# Bad - fragile
|
||||
time.sleep(0.5) # Hope animation finished
|
||||
assert entity.x == 100
|
||||
|
||||
# Good - check state
|
||||
while entity.x < 100 and timeout < 1000:
|
||||
time.sleep(0.016)
|
||||
timeout += 16
|
||||
assert entity.x == 100
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Testing Patterns
|
||||
|
||||
### Pattern 1: Property Round-Trip
|
||||
|
||||
```python
|
||||
def test_property_roundtrip():
|
||||
obj = mcrfpy.Frame(0, 0, 100, 100)
|
||||
|
||||
# Set various values
|
||||
test_values = [0, 50, 100, 255, 127]
|
||||
|
||||
for value in test_values:
|
||||
obj.x = value
|
||||
assert obj.x == value, f"Failed for {value}"
|
||||
```
|
||||
|
||||
### Pattern 2: Visual Regression
|
||||
|
||||
```python
|
||||
def run_test(runtime_ms):
|
||||
# Render scene
|
||||
automation.screenshot("current.png")
|
||||
|
||||
# Compare with golden image
|
||||
# (Manual comparison or use image diff tool)
|
||||
|
||||
print("Check current.png against golden/expected.png")
|
||||
sys.exit(0)
|
||||
```
|
||||
|
||||
### Pattern 3: Exception Testing
|
||||
|
||||
```python
|
||||
def test_invalid_input():
|
||||
grid = mcrfpy.Grid(10, 10, 16, 16)
|
||||
|
||||
# Test exception is raised
|
||||
try:
|
||||
grid.at((-1, -1)) # Invalid coords
|
||||
assert False, "Should have raised ValueError"
|
||||
except ValueError as e:
|
||||
assert "out of bounds" in str(e).lower()
|
||||
```
|
||||
|
||||
### Pattern 4: State Machine Testing
|
||||
|
||||
```python
|
||||
def test_entity_states():
|
||||
entity = mcrfpy.Entity(0, 0, 0)
|
||||
|
||||
# Initial state
|
||||
assert entity.state == "idle"
|
||||
|
||||
# Transition
|
||||
entity.attack()
|
||||
assert entity.state == "attacking"
|
||||
|
||||
# Complete action
|
||||
entity.finish_attack()
|
||||
assert entity.state == "idle"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Debugging Failed Tests
|
||||
|
||||
### Get More Information
|
||||
|
||||
```python
|
||||
def test_feature():
|
||||
try:
|
||||
obj.do_thing()
|
||||
except Exception as e:
|
||||
print(f"Exception: {e}")
|
||||
print(f"Object state: {obj}")
|
||||
print(f"Type: {type(obj)}")
|
||||
raise # Re-raise to fail test
|
||||
```
|
||||
|
||||
### Visual Debugging
|
||||
|
||||
```python
|
||||
def run_test(runtime_ms):
|
||||
# Take screenshot before action
|
||||
automation.screenshot("before.png")
|
||||
|
||||
# Perform action
|
||||
do_something()
|
||||
|
||||
# Take screenshot after
|
||||
automation.screenshot("after.png")
|
||||
|
||||
# Now you can compare visually
|
||||
```
|
||||
|
||||
### Print Debugging
|
||||
|
||||
```python
|
||||
def test_complex_operation():
|
||||
print("Starting test...")
|
||||
|
||||
obj = create_object()
|
||||
print(f"Created: {obj}")
|
||||
|
||||
result = obj.process()
|
||||
print(f"Result: {result}")
|
||||
|
||||
assert result == expected
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- CLAUDE.md - Testing guidelines section
|
||||
- [[Performance-Optimization-Workflow]] - Creating benchmarks
|
||||
- `tests/` directory - Example tests
|
||||
|
||||
**Testing Pattern Examples:**
|
||||
- `tests/test_grid_operations.py` - Direct execution
|
||||
- `tests/issue_78_middle_click_test.py` - Timer-based with automation
|
||||
Loading…
Reference in New Issue