Agents python-testing-patterns
Implement comprehensive testing strategies with pytest, fixtures, mocking, and test-driven development. Use when writing Python tests, setting up test suites, or implementing testing best practices.
git clone https://github.com/wshobson/agents
T=$(mktemp -d) && git clone --depth=1 https://github.com/wshobson/agents "$T" && mkdir -p ~/.claude/skills && cp -r "$T/plugins/python-development/skills/python-testing-patterns" ~/.claude/skills/wshobson-agents-python-testing-patterns && rm -rf "$T"
plugins/python-development/skills/python-testing-patterns/SKILL.mdPython Testing Patterns
Comprehensive guide to implementing robust testing strategies in Python using pytest, fixtures, mocking, parameterization, and test-driven development practices.
When to Use This Skill
- Writing unit tests for Python code
- Setting up test suites and test infrastructure
- Implementing test-driven development (TDD)
- Creating integration tests for APIs and services
- Mocking external dependencies and services
- Testing async code and concurrent operations
- Setting up continuous testing in CI/CD
- Implementing property-based testing
- Testing database operations
- Debugging failing tests
Core Concepts
1. Test Types
- Unit Tests: Test individual functions/classes in isolation
- Integration Tests: Test interaction between components
- Functional Tests: Test complete features end-to-end
- Performance Tests: Measure speed and resource usage
2. Test Structure (AAA Pattern)
- Arrange: Set up test data and preconditions
- Act: Execute the code under test
- Assert: Verify the results
3. Test Coverage
- Measure what code is exercised by tests
- Identify untested code paths
- Aim for meaningful coverage, not just high percentages
4. Test Isolation
- Tests should be independent
- No shared state between tests
- Each test should clean up after itself
Quick Start
# test_example.py def add(a, b): return a + b def test_add(): """Basic test example.""" result = add(2, 3) assert result == 5 def test_add_negative(): """Test with negative numbers.""" assert add(-1, 1) == 0 # Run with: pytest test_example.py
Fundamental Patterns
Pattern 1: Basic pytest Tests
# test_calculator.py import pytest class Calculator: """Simple calculator for testing.""" def add(self, a: float, b: float) -> float: return a + b def subtract(self, a: float, b: float) -> float: return a - b def multiply(self, a: float, b: float) -> float: return a * b def divide(self, a: float, b: float) -> float: if b == 0: raise ValueError("Cannot divide by zero") return a / b def test_addition(): """Test addition.""" calc = Calculator() assert calc.add(2, 3) == 5 assert calc.add(-1, 1) == 0 assert calc.add(0, 0) == 0 def test_subtraction(): """Test subtraction.""" calc = Calculator() assert calc.subtract(5, 3) == 2 assert calc.subtract(0, 5) == -5 def test_multiplication(): """Test multiplication.""" calc = Calculator() assert calc.multiply(3, 4) == 12 assert calc.multiply(0, 5) == 0 def test_division(): """Test division.""" calc = Calculator() assert calc.divide(6, 3) == 2 assert calc.divide(5, 2) == 2.5 def test_division_by_zero(): """Test division by zero raises error.""" calc = Calculator() with pytest.raises(ValueError, match="Cannot divide by zero"): calc.divide(5, 0)
Pattern 2: Fixtures for Setup and Teardown
# test_database.py import pytest from typing import Generator class Database: """Simple database class.""" def __init__(self, connection_string: str): self.connection_string = connection_string self.connected = False def connect(self): """Connect to database.""" self.connected = True def disconnect(self): """Disconnect from database.""" self.connected = False def query(self, sql: str) -> list: """Execute query.""" if not self.connected: raise RuntimeError("Not connected") return [{"id": 1, "name": "Test"}] @pytest.fixture def db() -> Generator[Database, None, None]: """Fixture that provides connected database.""" # Setup database = Database("sqlite:///:memory:") database.connect() # Provide to test yield database # Teardown database.disconnect() def test_database_query(db): """Test database query with fixture.""" results = db.query("SELECT * FROM users") assert len(results) == 1 assert results[0]["name"] == "Test" @pytest.fixture(scope="session") def app_config(): """Session-scoped fixture - created once per test session.""" return { "database_url": "postgresql://localhost/test", "api_key": "test-key", "debug": True } @pytest.fixture(scope="module") def api_client(app_config): """Module-scoped fixture - created once per test module.""" # Setup expensive resource client = {"config": app_config, "session": "active"} yield client # Cleanup client["session"] = "closed" def test_api_client(api_client): """Test using api client fixture.""" assert api_client["session"] == "active" assert api_client["config"]["debug"] is True
Pattern 3: Parameterized Tests
# test_validation.py import pytest def is_valid_email(email: str) -> bool: """Check if email is valid.""" return "@" in email and "." in email.split("@")[1] @pytest.mark.parametrize("email,expected", [ ("user@example.com", True), ("test.user@domain.co.uk", True), ("invalid.email", False), ("@example.com", False), ("user@domain", False), ("", False), ]) def test_email_validation(email, expected): """Test email validation with various inputs.""" assert is_valid_email(email) == expected @pytest.mark.parametrize("a,b,expected", [ (2, 3, 5), (0, 0, 0), (-1, 1, 0), (100, 200, 300), (-5, -5, -10), ]) def test_addition_parameterized(a, b, expected): """Test addition with multiple parameter sets.""" from test_calculator import Calculator calc = Calculator() assert calc.add(a, b) == expected # Using pytest.param for special cases @pytest.mark.parametrize("value,expected", [ pytest.param(1, True, id="positive"), pytest.param(0, False, id="zero"), pytest.param(-1, False, id="negative"), ]) def test_is_positive(value, expected): """Test with custom test IDs.""" assert (value > 0) == expected
Pattern 4: Mocking with unittest.mock
# test_api_client.py import pytest from unittest.mock import Mock, patch, MagicMock import requests class APIClient: """Simple API client.""" def __init__(self, base_url: str): self.base_url = base_url def get_user(self, user_id: int) -> dict: """Fetch user from API.""" response = requests.get(f"{self.base_url}/users/{user_id}") response.raise_for_status() return response.json() def create_user(self, data: dict) -> dict: """Create new user.""" response = requests.post(f"{self.base_url}/users", json=data) response.raise_for_status() return response.json() def test_get_user_success(): """Test successful API call with mock.""" client = APIClient("https://api.example.com") mock_response = Mock() mock_response.json.return_value = {"id": 1, "name": "John Doe"} mock_response.raise_for_status.return_value = None with patch("requests.get", return_value=mock_response) as mock_get: user = client.get_user(1) assert user["id"] == 1 assert user["name"] == "John Doe" mock_get.assert_called_once_with("https://api.example.com/users/1") def test_get_user_not_found(): """Test API call with 404 error.""" client = APIClient("https://api.example.com") mock_response = Mock() mock_response.raise_for_status.side_effect = requests.HTTPError("404 Not Found") with patch("requests.get", return_value=mock_response): with pytest.raises(requests.HTTPError): client.get_user(999) @patch("requests.post") def test_create_user(mock_post): """Test user creation with decorator syntax.""" client = APIClient("https://api.example.com") mock_post.return_value.json.return_value = {"id": 2, "name": "Jane Doe"} mock_post.return_value.raise_for_status.return_value = None user_data = {"name": "Jane Doe", "email": "jane@example.com"} result = client.create_user(user_data) assert result["id"] == 2 mock_post.assert_called_once() call_args = mock_post.call_args assert call_args.kwargs["json"] == user_data
Pattern 5: Testing Exceptions
# test_exceptions.py import pytest def divide(a: float, b: float) -> float: """Divide a by b.""" if b == 0: raise ZeroDivisionError("Division by zero") if not isinstance(a, (int, float)) or not isinstance(b, (int, float)): raise TypeError("Arguments must be numbers") return a / b def test_zero_division(): """Test exception is raised for division by zero.""" with pytest.raises(ZeroDivisionError): divide(10, 0) def test_zero_division_with_message(): """Test exception message.""" with pytest.raises(ZeroDivisionError, match="Division by zero"): divide(5, 0) def test_type_error(): """Test type error exception.""" with pytest.raises(TypeError, match="must be numbers"): divide("10", 5) def test_exception_info(): """Test accessing exception info.""" with pytest.raises(ValueError) as exc_info: int("not a number") assert "invalid literal" in str(exc_info.value)
For advanced patterns including async testing, monkeypatching, temporary files, conftest setup, property-based testing, database testing, CI/CD integration, and configuration files, see references/advanced-patterns.md
Test Design Principles
One Behavior Per Test
Each test should verify exactly one behavior. This makes failures easy to diagnose and tests easy to maintain.
# BAD - testing multiple behaviors def test_user_service(): user = service.create_user(data) assert user.id is not None assert user.email == data["email"] updated = service.update_user(user.id, {"name": "New"}) assert updated.name == "New" # GOOD - focused tests def test_create_user_assigns_id(): user = service.create_user(data) assert user.id is not None def test_create_user_stores_email(): user = service.create_user(data) assert user.email == data["email"] def test_update_user_changes_name(): user = service.create_user(data) updated = service.update_user(user.id, {"name": "New"}) assert updated.name == "New"
Test Error Paths
Always test failure cases, not just happy paths.
def test_get_user_raises_not_found(): with pytest.raises(UserNotFoundError) as exc_info: service.get_user("nonexistent-id") assert "nonexistent-id" in str(exc_info.value) def test_create_user_rejects_invalid_email(): with pytest.raises(ValueError, match="Invalid email format"): service.create_user({"email": "not-an-email"})
Testing Best Practices
Test Organization
# tests/ # __init__.py # conftest.py # Shared fixtures # test_unit/ # Unit tests # test_models.py # test_utils.py # test_integration/ # Integration tests # test_api.py # test_database.py # test_e2e/ # End-to-end tests # test_workflows.py
Test Naming Convention
A common pattern:
test_<unit>_<scenario>_<expected_outcome>. Adapt to your team's preferences.
# Pattern: test_<unit>_<scenario>_<expected> def test_create_user_with_valid_data_returns_user(): ... def test_create_user_with_duplicate_email_raises_conflict(): ... def test_get_user_with_unknown_id_returns_none(): ... # Good test names - clear and descriptive def test_user_creation_with_valid_data(): """Clear name describes what is being tested.""" pass def test_login_fails_with_invalid_password(): """Name describes expected behavior.""" pass def test_api_returns_404_for_missing_resource(): """Specific about inputs and expected outcomes.""" pass # Bad test names - avoid these def test_1(): # Not descriptive pass def test_user(): # Too vague pass def test_function(): # Doesn't explain what's tested pass
Testing Retry Behavior
Verify that retry logic works correctly using mock side effects.
from unittest.mock import Mock def test_retries_on_transient_error(): """Test that service retries on transient failures.""" client = Mock() # Fail twice, then succeed client.request.side_effect = [ ConnectionError("Failed"), ConnectionError("Failed"), {"status": "ok"}, ] service = ServiceWithRetry(client, max_retries=3) result = service.fetch() assert result == {"status": "ok"} assert client.request.call_count == 3 def test_gives_up_after_max_retries(): """Test that service stops retrying after max attempts.""" client = Mock() client.request.side_effect = ConnectionError("Failed") service = ServiceWithRetry(client, max_retries=3) with pytest.raises(ConnectionError): service.fetch() assert client.request.call_count == 3 def test_does_not_retry_on_permanent_error(): """Test that permanent errors are not retried.""" client = Mock() client.request.side_effect = ValueError("Invalid input") service = ServiceWithRetry(client, max_retries=3) with pytest.raises(ValueError): service.fetch() # Only called once - no retry for ValueError assert client.request.call_count == 1
Mocking Time with Freezegun
Use freezegun to control time in tests for predictable time-dependent behavior.
from freezegun import freeze_time from datetime import datetime, timedelta @freeze_time("2026-01-15 10:00:00") def test_token_expiry(): """Test token expires at correct time.""" token = create_token(expires_in_seconds=3600) assert token.expires_at == datetime(2026, 1, 15, 11, 0, 0) @freeze_time("2026-01-15 10:00:00") def test_is_expired_returns_false_before_expiry(): """Test token is not expired when within validity period.""" token = create_token(expires_in_seconds=3600) assert not token.is_expired() @freeze_time("2026-01-15 12:00:00") def test_is_expired_returns_true_after_expiry(): """Test token is expired after validity period.""" token = Token(expires_at=datetime(2026, 1, 15, 11, 30, 0)) assert token.is_expired() def test_with_time_travel(): """Test behavior across time using freeze_time context.""" with freeze_time("2026-01-01") as frozen_time: item = create_item() assert item.created_at == datetime(2026, 1, 1) # Move forward in time frozen_time.move_to("2026-01-15") assert item.age_days == 14
Test Markers
# test_markers.py import pytest @pytest.mark.slow def test_slow_operation(): """Mark slow tests.""" import time time.sleep(2) @pytest.mark.integration def test_database_integration(): """Mark integration tests.""" pass @pytest.mark.skip(reason="Feature not implemented yet") def test_future_feature(): """Skip tests temporarily.""" pass @pytest.mark.skipif(os.name == "nt", reason="Unix only test") def test_unix_specific(): """Conditional skip.""" pass @pytest.mark.xfail(reason="Known bug #123") def test_known_bug(): """Mark expected failures.""" assert False # Run with: # pytest -m slow # Run only slow tests # pytest -m "not slow" # Skip slow tests # pytest -m integration # Run integration tests
Coverage Reporting
# Install coverage pip install pytest-cov # Run tests with coverage pytest --cov=myapp tests/ # Generate HTML report pytest --cov=myapp --cov-report=html tests/ # Fail if coverage below threshold pytest --cov=myapp --cov-fail-under=80 tests/ # Show missing lines pytest --cov=myapp --cov-report=term-missing tests/
For advanced patterns (async testing, monkeypatching, property-based testing, database testing, CI/CD integration, and configuration), see references/advanced-patterns.md