
当前大多数接口自动化框架存在三大瓶颈:
我们能否让系统自己:
答案是:可以,通过 MCP 将 AI 能力嵌入自动化框架。
1[接口自动化框架(如 pytest + requests)]
2 ↑
3[MCP Client:按需调用智能能力]
4 ↓
5[MCP Server]
6├── generate_test_cases # 智能补 Case
7├── analyze_failure_reason # 失败根因分析
8├── suggest_assertion_fix # 断言修复建议
9└── validate_schema_change # 接口变更影响评估
所有智能能力 可插拔、可审计、可关闭,不影响原有流程。
1class GenerateTestCasesInput(BaseModel):
2 openapi_spec: dict = Field(..., description="OpenAPI 3.0 规范 JSON")
3 endpoint_path: str
4 method: str
5
6class TestCase(BaseModel):
7 name: str
8 request: dict # {url, method, headers, json}
9 expected_status: int
10 assertions: list[str] # 如 "response.json()['code'] == 200"
11
12@mcp.tool()
13def generate_test_cases(input: GenerateTestCasesInput) -> list[TestCase]:
14 spec = input.openapi_spec
15 path_info = spec["paths"][input.endpoint_path][input.method.lower()]
16
17 # 提取参数
18 params = path_info.get("parameters", [])
19 body_schema = None
20 if "requestBody" in path_info:
21 body_schema = path_info["requestBody"]["content"]["application/json"]["schema"]
22
23 # 调用专用测试生成模型(微调过的 CodeLlama)
24 prompt = build_test_generation_prompt(path_info, params, body_schema)
25 llm_output = test_gen_llm(prompt)
26
27 # 解析为 TestCase 列表
28 cases = parse_llm_output_to_test_cases(llm_output)
29
30 # 过滤高危操作(如 DELETE /user/{id})
31 safe_cases = [c for c in cases if not is_dangerous(c.request)]
32
33 return safe_cases
1# ci_pipeline.py
2def on_new_api_deployed(openapi_url: str):
3 spec = requests.get(openapi_url).json()
4 new_endpoints = detect_new_endpoints(spec, last_known_spec)
5
6 for path, methods in new_endpoints.items():
7 for method in methods:
8 cases = mcp_client.call_tool("generate_test_cases", {
9 "openapi_spec": spec,
10 "endpoint_path": path,
11 "method": method
12 })
13 # 自动生成 pytest 文件
14 write_pytest_file(path, method, cases)
15
16 # 提交 MR,由测开 Review 后合入
17 create_merge_request("feat/auto-gen-cases-for-new-api")
✅ 收益:新接口上线当天,基础正向/反向 Case 自动覆盖,漏测率 ↓ 90%。
输入:失败的请求 + 响应 + 上下文(时间、环境、最近变更)
1@mcp.tool()
2def analyze_failure_reason(input: FailureContext) -> AnalysisResult:
3 # 步骤 1:检查是否已知问题(知识库匹配)
4 known_issue = query_knowledge_base(input.error_message)
5 if known_issue:
6 return AnalysisResult(
7 root_cause="known_issue",
8 suggestion=known_issue.solution,
9 confidence=0.95
10 )
11
12 # 步骤 2:关联监控指标
13 metrics = get_prometheus_metrics(
14 service=input.service,
15 start=input.timestamp - 300,
16 end=input.timestamp + 60
17 )
18 if metrics["cpu_usage"] > 90:
19 return AnalysisResult(root_cause="resource_exhaustion", ...)
20
21 # 步骤 3:检查 DB 状态
22 if "DB timeout" in input.error_message:
23 db_health = check_db_health(input.db_instance)
24 if not db_health.healthy:
25 return AnalysisResult(root_cause="database_unavailable", ...)
26
27 # 步骤 4:代码变更关联
28 recent_commits = get_recent_commits(input.service, input.timestamp - 3600)
29 for commit in recent_commits:
30 if input.endpoint in commit.changed_files:
31 return AnalysisResult(
32 root_cause="recent_code_change",
33 suspect_commit=commit.sha,
34 author=commit.author
35 )
36
37 return AnalysisResult(root_cause="unknown", need_human=True)
1# conftest.py
2@pytest.hookimpl(tryfirst=True, hookwrapper=True)
3def pytest_runtest_makereport(item, call):
4 outcome = yield
5 rep = outcome.get_result()
6 if rep.when == "call" and rep.failed:
7 # 获取失败上下文
8 ctx = build_failure_context(item, call.excinfo)
9 analysis = mcp_client.call_tool("analyze_failure_reason", ctx)
10
11 # 附加到报告
12 rep.sections.append(("AI 分析", json.dumps(analysis.dict(), indent=2)))
13
14 # 若为环境问题,自动重试
15 if analysis.root_cause in ["resource_exhaustion", "network_fluctuation"]:
16 item.add_marker(pytest.mark.flaky(reruns=2))
📊 效果:失败报告中直接显示:“可能原因:数据库主从延迟,建议检查 binlog 同步状态”。
场景:接口返回新增 discount_amount 字段,原断言 assert total == 100 失败。
1@mcp.tool()
2def suggest_assertion_fix(input: AssertionFixInput) -> AssertionFixOutput:
3 old_assertion = input.old_assertion # "response.json()['total'] == 100"
4 actual_response = input.actual_response # {'total': 90, 'discount_amount': 10}
5
6 # 解析旧断言
7 field, expected = parse_assertion(old_assertion) # field='total', expected=100
8
9 # 检查是否因新字段导致
10 if field in actual_response:
11 actual_value = actual_response[field]
12 if abs(actual_value - expected) == actual_response.get("discount_amount", 0):
13 new_assertion = f"{old_assertion} or response.json()['total'] + response.json()['discount_amount'] == {expected}"
14 return AssertionFixOutput(
15 fixed_assertion=new_assertion,
16 reason="检测到 discount_amount 字段,总价应包含折扣"
17 )
18
19 return AssertionFixOutput(fixed_assertion=None, reason="无法自动修复")
1# 在 CI 中
2if analysis.root_cause == "assertion_mismatch":
3 fix = mcp_client.call_tool("suggest_assertion_fix", {...})
4 if fix.fixed_assertion:
5 create_pr_with_fix(test_file, old_line, fix.fixed_assertion)
6 comment_on_issue(f"AI 建议修复断言: {fix.reason}")
✅ 收益:30% 的断言失败可自动修复,减少无效告警。
传统做法:所有失败都发企业微信。
智能做法:
1# alert_router.py
2def should_alert(analysis: AnalysisResult) -> bool:
3 if analysis.root_cause in ["known_issue", "environment_fluctuation"]:
4 return False # 不报警,记录即可
5 if analysis.confidence > 0.8 and analysis.root_cause == "code_defect":
6 return True # 高置信度缺陷,立即报警
7 return False # 其他情况,汇总日报
📉 结果:告警量下降 60%,工程师不再“告警疲劳”。
能力 | 引入前 | 引入后 | 提升 |
|---|---|---|---|
新接口 Case 覆盖时效 | 2-3 天 | < 1 小时 | ↑ 48x |
失败分析平均耗时 | 25 分钟/次 | 2 分钟/次 | ↓ 92% |
无效告警占比 | 75% | 20% | ↓ 73% |
断言维护成本 | 8 人日/月 | 2 人日/月 | ↓ 75% |
接口自动化 + MCP 的本质是:
这才是 可持续、可治理、可放大的智能测试。