Everything-claude-code-zh python-testing
使用pytest的Python测试策略,包括TDD方法、夹具、模拟、参数化和覆盖率要求。
install
source · Clone the upstream repo
git clone https://github.com/xu-xiang/everything-claude-code-zh
Claude Code · Install into ~/.claude/skills/
T=$(mktemp -d) && git clone --depth=1 https://github.com/xu-xiang/everything-claude-code-zh "$T" && mkdir -p ~/.claude/skills && cp -r "$T/docs/zh-CN/skills/python-testing" ~/.claude/skills/xu-xiang-everything-claude-code-zh-python-testing-c763d7 && rm -rf "$T"
manifest:
docs/zh-CN/skills/python-testing/SKILL.mdsource content
Python 测试模式
使用 pytest、TDD 方法论和最佳实践的 Python 应用程序全面测试策略。
何时激活
- 编写新的 Python 代码(遵循 TDD:红、绿、重构)
- 为 Python 项目设计测试套件
- 审查 Python 测试覆盖率
- 设置测试基础设施
核心测试理念
测试驱动开发 (TDD)
始终遵循 TDD 循环:
- 红:为期望的行为编写一个失败的测试
- 绿:编写最少的代码使测试通过
- 重构:在保持测试通过的同时改进代码
# Step 1: Write failing test (RED) def test_add_numbers(): result = add(2, 3) assert result == 5 # Step 2: Write minimal implementation (GREEN) def add(a, b): return a + b # Step 3: Refactor if needed (REFACTOR)
覆盖率要求
- 目标:80%+ 代码覆盖率
- 关键路径:需要 100% 覆盖率
- 使用
来测量覆盖率pytest --cov
pytest --cov=mypackage --cov-report=term-missing --cov-report=html
pytest 基础
基本测试结构
import pytest def test_addition(): """Test basic addition.""" assert 2 + 2 == 4 def test_string_uppercase(): """Test string uppercasing.""" text = "hello" assert text.upper() == "HELLO" def test_list_append(): """Test list append.""" items = [1, 2, 3] items.append(4) assert 4 in items assert len(items) == 4
断言
# Equality assert result == expected # Inequality assert result != unexpected # Truthiness assert result # Truthy assert not result # Falsy assert result is True # Exactly True assert result is False # Exactly False assert result is None # Exactly None # Membership assert item in collection assert item not in collection # Comparisons assert result > 0 assert 0 <= result <= 100 # Type checking assert isinstance(result, str) # Exception testing (preferred approach) with pytest.raises(ValueError): raise ValueError("error message") # Check exception message with pytest.raises(ValueError, match="invalid input"): raise ValueError("invalid input provided") # Check exception attributes with pytest.raises(ValueError) as exc_info: raise ValueError("error message") assert str(exc_info.value) == "error message"
夹具
基本夹具使用
import pytest @pytest.fixture def sample_data(): """Fixture providing sample data.""" return {"name": "Alice", "age": 30} def test_sample_data(sample_data): """Test using the fixture.""" assert sample_data["name"] == "Alice" assert sample_data["age"] == 30
带设置/拆卸的夹具
@pytest.fixture def database(): """Fixture with setup and teardown.""" # Setup db = Database(":memory:") db.create_tables() db.insert_test_data() yield db # Provide to test # Teardown db.close() def test_database_query(database): """Test database operations.""" result = database.query("SELECT * FROM users") assert len(result) > 0
夹具作用域
# Function scope (default) - runs for each test @pytest.fixture def temp_file(): with open("temp.txt", "w") as f: yield f os.remove("temp.txt") # Module scope - runs once per module @pytest.fixture(scope="module") def module_db(): db = Database(":memory:") db.create_tables() yield db db.close() # Session scope - runs once per test session @pytest.fixture(scope="session") def shared_resource(): resource = ExpensiveResource() yield resource resource.cleanup()
带参数的夹具
@pytest.fixture(params=[1, 2, 3]) def number(request): """Parameterized fixture.""" return request.param def test_numbers(number): """Test runs 3 times, once for each parameter.""" assert number > 0
使用多个夹具
@pytest.fixture def user(): return User(id=1, name="Alice") @pytest.fixture def admin(): return User(id=2, name="Admin", role="admin") def test_user_admin_interaction(user, admin): """Test using multiple fixtures.""" assert admin.can_manage(user)
自动使用夹具
@pytest.fixture(autouse=True) def reset_config(): """Automatically runs before every test.""" Config.reset() yield Config.cleanup() def test_without_fixture_call(): # reset_config runs automatically assert Config.get_setting("debug") is False
使用 Conftest.py 共享夹具
# tests/conftest.py import pytest @pytest.fixture def client(): """Shared fixture for all tests.""" app = create_app(testing=True) with app.test_client() as client: yield client @pytest.fixture def auth_headers(client): """Generate auth headers for API testing.""" response = client.post("/api/login", json={ "username": "test", "password": "test" }) token = response.json["token"] return {"Authorization": f"Bearer {token}"}
参数化
基本参数化
@pytest.mark.parametrize("input,expected", [ ("hello", "HELLO"), ("world", "WORLD"), ("PyThOn", "PYTHON"), ]) def test_uppercase(input, expected): """Test runs 3 times with different inputs.""" assert input.upper() == expected
多参数
@pytest.mark.parametrize("a,b,expected", [ (2, 3, 5), (0, 0, 0), (-1, 1, 0), (100, 200, 300), ]) def test_add(a, b, expected): """Test addition with multiple inputs.""" assert add(a, b) == expected
带 ID 的参数化
@pytest.mark.parametrize("input,expected", [ ("valid@email.com", True), ("invalid", False), ("@no-domain.com", False), ], ids=["valid-email", "missing-at", "missing-domain"]) def test_email_validation(input, expected): """Test email validation with readable test IDs.""" assert is_valid_email(input) is expected
参数化夹具
@pytest.fixture(params=["sqlite", "postgresql", "mysql"]) def db(request): """Test against multiple database backends.""" if request.param == "sqlite": return Database(":memory:") elif request.param == "postgresql": return Database("postgresql://localhost/test") elif request.param == "mysql": return Database("mysql://localhost/test") def test_database_operations(db): """Test runs 3 times, once for each database.""" result = db.query("SELECT 1") assert result is not None
标记器和测试选择
自定义标记器
# Mark slow tests @pytest.mark.slow def test_slow_operation(): time.sleep(5) # Mark integration tests @pytest.mark.integration def test_api_integration(): response = requests.get("https://api.example.com") assert response.status_code == 200 # Mark unit tests @pytest.mark.unit def test_unit_logic(): assert calculate(2, 3) == 5
运行特定测试
# Run only fast tests pytest -m "not slow" # Run only integration tests pytest -m integration # Run integration or slow tests pytest -m "integration or slow" # Run tests marked as unit but not slow pytest -m "unit and not slow"
在 pytest.ini 中配置标记器
[pytest] markers = slow: marks tests as slow integration: marks tests as integration tests unit: marks tests as unit tests django: marks tests as requiring Django
模拟和补丁
模拟函数
from unittest.mock import patch, Mock @patch("mypackage.external_api_call") def test_with_mock(api_call_mock): """Test with mocked external API.""" api_call_mock.return_value = {"status": "success"} result = my_function() api_call_mock.assert_called_once() assert result["status"] == "success"
模拟返回值
@patch("mypackage.Database.connect") def test_database_connection(connect_mock): """Test with mocked database connection.""" connect_mock.return_value = MockConnection() db = Database() db.connect() connect_mock.assert_called_once_with("localhost")
模拟异常
@patch("mypackage.api_call") def test_api_error_handling(api_call_mock): """Test error handling with mocked exception.""" api_call_mock.side_effect = ConnectionError("Network error") with pytest.raises(ConnectionError): api_call() api_call_mock.assert_called_once()
模拟上下文管理器
@patch("builtins.open", new_callable=mock_open) def test_file_reading(mock_file): """Test file reading with mocked open.""" mock_file.return_value.read.return_value = "file content" result = read_file("test.txt") mock_file.assert_called_once_with("test.txt", "r") assert result == "file content"
使用 Autospec
@patch("mypackage.DBConnection", autospec=True) def test_autospec(db_mock): """Test with autospec to catch API misuse.""" db = db_mock.return_value db.query("SELECT * FROM users") # This would fail if DBConnection doesn't have query method db_mock.assert_called_once()
模拟类实例
class TestUserService: @patch("mypackage.UserRepository") def test_create_user(self, repo_mock): """Test user creation with mocked repository.""" repo_mock.return_value.save.return_value = User(id=1, name="Alice") service = UserService(repo_mock.return_value) user = service.create_user(name="Alice") assert user.name == "Alice" repo_mock.return_value.save.assert_called_once()
模拟属性
@pytest.fixture def mock_config(): """Create a mock with a property.""" config = Mock() type(config).debug = PropertyMock(return_value=True) type(config).api_key = PropertyMock(return_value="test-key") return config def test_with_mock_config(mock_config): """Test with mocked config properties.""" assert mock_config.debug is True assert mock_config.api_key == "test-key"
测试异步代码
使用 pytest-asyncio 进行异步测试
import pytest @pytest.mark.asyncio async def test_async_function(): """Test async function.""" result = await async_add(2, 3) assert result == 5 @pytest.mark.asyncio async def test_async_with_fixture(async_client): """Test async with async fixture.""" response = await async_client.get("/api/users") assert response.status_code == 200
异步夹具
@pytest.fixture async def async_client(): """Async fixture providing async test client.""" app = create_app() async with app.test_client() as client: yield client @pytest.mark.asyncio async def test_api_endpoint(async_client): """Test using async fixture.""" response = await async_client.get("/api/data") assert response.status_code == 200
模拟异步函数
@pytest.mark.asyncio @patch("mypackage.async_api_call") async def test_async_mock(api_call_mock): """Test async function with mock.""" api_call_mock.return_value = {"status": "ok"} result = await my_async_function() api_call_mock.assert_awaited_once() assert result["status"] == "ok"
测试异常
测试预期异常
def test_divide_by_zero(): """Test that dividing by zero raises ZeroDivisionError.""" with pytest.raises(ZeroDivisionError): divide(10, 0) def test_custom_exception(): """Test custom exception with message.""" with pytest.raises(ValueError, match="invalid input"): validate_input("invalid")
测试异常属性
def test_exception_with_details(): """Test exception with custom attributes.""" with pytest.raises(CustomError) as exc_info: raise CustomError("error", code=400) assert exc_info.value.code == 400 assert "error" in str(exc_info.value)
测试副作用
测试文件操作
import tempfile import os def test_file_processing(): """Test file processing with temp file.""" with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as f: f.write("test content") temp_path = f.name try: result = process_file(temp_path) assert result == "processed: test content" finally: os.unlink(temp_path)
使用 pytest 的 tmp_path 夹具进行测试
def test_with_tmp_path(tmp_path): """Test using pytest's built-in temp path fixture.""" test_file = tmp_path / "test.txt" test_file.write_text("hello world") result = process_file(str(test_file)) assert result == "hello world" # tmp_path automatically cleaned up
使用 tmpdir 夹具进行测试
def test_with_tmpdir(tmpdir): """Test using pytest's tmpdir fixture.""" test_file = tmpdir.join("test.txt") test_file.write("data") result = process_file(str(test_file)) assert result == "data"
测试组织
目录结构
tests/ ├── conftest.py # Shared fixtures ├── __init__.py ├── unit/ # Unit tests │ ├── __init__.py │ ├── test_models.py │ ├── test_utils.py │ └── test_services.py ├── integration/ # Integration tests │ ├── __init__.py │ ├── test_api.py │ └── test_database.py └── e2e/ # End-to-end tests ├── __init__.py └── test_user_flow.py
测试类
class TestUserService: """Group related tests in a class.""" @pytest.fixture(autouse=True) def setup(self): """Setup runs before each test in this class.""" self.service = UserService() def test_create_user(self): """Test user creation.""" user = self.service.create_user("Alice") assert user.name == "Alice" def test_delete_user(self): """Test user deletion.""" user = User(id=1, name="Bob") self.service.delete_user(user) assert not self.service.user_exists(1)
最佳实践
应该做
- 遵循 TDD:在代码之前编写测试(红-绿-重构)
- 测试单一事物:每个测试应验证一个单一行为
- 使用描述性名称:
test_user_login_with_invalid_credentials_fails - 使用夹具:用夹具消除重复
- 模拟外部依赖:不要依赖外部服务
- 测试边界情况:空输入、None 值、边界条件
- 目标 80%+ 覆盖率:关注关键路径
- 保持测试快速:使用标记来分离慢速测试
不要做
- 不要测试实现:测试行为,而非内部实现
- 不要在测试中使用复杂的条件语句:保持测试简单
- 不要忽略测试失败:所有测试必须通过
- 不要测试第三方代码:相信库能正常工作
- 不要在测试之间共享状态:测试应该是独立的
- 不要在测试中捕获异常:使用
pytest.raises - 不要使用 print 语句:使用断言和 pytest 输出
- 不要编写过于脆弱的测试:避免过度具体的模拟
常见模式
测试 API 端点 (FastAPI/Flask)
@pytest.fixture def client(): app = create_app(testing=True) return app.test_client() def test_get_user(client): response = client.get("/api/users/1") assert response.status_code == 200 assert response.json["id"] == 1 def test_create_user(client): response = client.post("/api/users", json={ "name": "Alice", "email": "alice@example.com" }) assert response.status_code == 201 assert response.json["name"] == "Alice"
测试数据库操作
@pytest.fixture def db_session(): """Create a test database session.""" session = Session(bind=engine) session.begin_nested() yield session session.rollback() session.close() def test_create_user(db_session): user = User(name="Alice", email="alice@example.com") db_session.add(user) db_session.commit() retrieved = db_session.query(User).filter_by(name="Alice").first() assert retrieved.email == "alice@example.com"
测试类方法
class TestCalculator: @pytest.fixture def calculator(self): return Calculator() def test_add(self, calculator): assert calculator.add(2, 3) == 5 def test_divide_by_zero(self, calculator): with pytest.raises(ZeroDivisionError): calculator.divide(10, 0)
pytest 配置
pytest.ini
[pytest] testpaths = tests python_files = test_*.py python_classes = Test* python_functions = test_* addopts = --strict-markers --disable-warnings --cov=mypackage --cov-report=term-missing --cov-report=html markers = slow: marks tests as slow integration: marks tests as integration tests unit: marks tests as unit tests
pyproject.toml
[tool.pytest.ini_options] testpaths = ["tests"] python_files = ["test_*.py"] python_classes = ["Test*"] python_functions = ["test_*"] addopts = [ "--strict-markers", "--cov=mypackage", "--cov-report=term-missing", "--cov-report=html", ] markers = [ "slow: marks tests as slow", "integration: marks tests as integration tests", "unit: marks tests as unit tests", ]
运行测试
# Run all tests pytest # Run specific file pytest tests/test_utils.py # Run specific test pytest tests/test_utils.py::test_function # Run with verbose output pytest -v # Run with coverage pytest --cov=mypackage --cov-report=html # Run only fast tests pytest -m "not slow" # Run until first failure pytest -x # Run and stop on N failures pytest --maxfail=3 # Run last failed tests pytest --lf # Run tests with pattern pytest -k "test_user" # Run with debugger on failure pytest --pdb
快速参考
| 模式 | 用法 |
|---|---|
| 测试预期异常 |
| 创建可重用的测试夹具 |
| 使用多个输入运行测试 |
| 标记慢速测试 |
| 跳过慢速测试 |
| 模拟函数和类 |
夹具 | 自动临时目录 |
| 生成覆盖率报告 |
| 简单且可读的断言 |
记住:测试也是代码。保持它们干净、可读且可维护。好的测试能发现错误;优秀的测试能预防错误。