You've already forked adk-python
mirror of
https://github.com/encounter/adk-python.git
synced 2026-03-30 10:57:20 -07:00
fix: Make OpenAPI /docs path work again
* The main fix is in eval_metrics.py * Removed some deprecated paths in the FastAPI server * Added a unit test to catch future breakages * Bump fastapi version to be 0.124.1 to capture the fix in https://github.com/fastapi/fastapi/pull/14482 * Removed the upper-bound restriction on fastapi version which was used to temporarily fix the issue Fixes #3173 Co-authored-by: Liang Wu <wuliang@google.com> PiperOrigin-RevId: 861867708
This commit is contained in:
committed by
Copybara-Service
parent
4ee125a038
commit
65cbf4b35c
+1
-1
@@ -30,7 +30,7 @@ dependencies = [
|
||||
"anyio>=4.9.0, <5.0.0", # For MCP Session Manager
|
||||
"authlib>=1.6.6, <2.0.0", # For RestAPI Tool
|
||||
"click>=8.1.8, <9.0.0", # For CLI tools
|
||||
"fastapi>=0.115.0, <0.124.0", # FastAPI framework
|
||||
"fastapi>=0.124.1, <1.0.0", # FastAPI framework
|
||||
"google-api-python-client>=2.157.0, <3.0.0", # Google API client discovery
|
||||
"google-auth>=2.47.0", # Google Auth library
|
||||
"google-cloud-aiplatform[agent_engines]>=1.132.0, <2.0.0", # For VertexAI integrations, e.g. example store.
|
||||
|
||||
@@ -946,27 +946,6 @@ class AdkWebServer:
|
||||
detail=str(ve),
|
||||
) from ve
|
||||
|
||||
@deprecated(
|
||||
"Please use create_eval_set instead. This will be removed in future"
|
||||
" releases."
|
||||
)
|
||||
@app.post(
|
||||
"/apps/{app_name}/eval_sets/{eval_set_id}",
|
||||
response_model_exclude_none=True,
|
||||
tags=[TAG_EVALUATION],
|
||||
)
|
||||
async def create_eval_set_legacy(
|
||||
app_name: str,
|
||||
eval_set_id: str,
|
||||
):
|
||||
"""Creates an eval set, given the id."""
|
||||
await create_eval_set(
|
||||
app_name=app_name,
|
||||
create_eval_set_request=CreateEvalSetRequest(
|
||||
eval_set=EvalSet(eval_set_id=eval_set_id, eval_cases=[])
|
||||
),
|
||||
)
|
||||
|
||||
@app.get(
|
||||
"/apps/{app_name}/eval-sets",
|
||||
response_model_exclude_none=True,
|
||||
@@ -982,19 +961,6 @@ class AdkWebServer:
|
||||
|
||||
return ListEvalSetsResponse(eval_set_ids=eval_sets)
|
||||
|
||||
@deprecated(
|
||||
"Please use list_eval_sets instead. This will be removed in future"
|
||||
" releases."
|
||||
)
|
||||
@app.get(
|
||||
"/apps/{app_name}/eval_sets",
|
||||
response_model_exclude_none=True,
|
||||
tags=[TAG_EVALUATION],
|
||||
)
|
||||
async def list_eval_sets_legacy(app_name: str) -> list[str]:
|
||||
list_eval_sets_response = await list_eval_sets(app_name)
|
||||
return list_eval_sets_response.eval_set_ids
|
||||
|
||||
@app.post(
|
||||
"/apps/{app_name}/eval-sets/{eval_set_id}/add-session",
|
||||
response_model_exclude_none=True,
|
||||
@@ -1142,22 +1108,6 @@ class AdkWebServer:
|
||||
except NotFoundError as nfe:
|
||||
raise HTTPException(status_code=404, detail=str(nfe)) from nfe
|
||||
|
||||
@deprecated(
|
||||
"Please use run_eval instead. This will be removed in future releases."
|
||||
)
|
||||
@app.post(
|
||||
"/apps/{app_name}/eval_sets/{eval_set_id}/run_eval",
|
||||
response_model_exclude_none=True,
|
||||
tags=[TAG_EVALUATION],
|
||||
)
|
||||
async def run_eval_legacy(
|
||||
app_name: str, eval_set_id: str, req: RunEvalRequest
|
||||
) -> list[RunEvalResult]:
|
||||
run_eval_response = await run_eval(
|
||||
app_name=app_name, eval_set_id=eval_set_id, req=req
|
||||
)
|
||||
return run_eval_response.run_eval_results
|
||||
|
||||
@app.post(
|
||||
"/apps/{app_name}/eval-sets/{eval_set_id}/run",
|
||||
response_model_exclude_none=True,
|
||||
@@ -1251,28 +1201,6 @@ class AdkWebServer:
|
||||
except ValidationError as ve:
|
||||
raise HTTPException(status_code=500, detail=str(ve)) from ve
|
||||
|
||||
@deprecated(
|
||||
"Please use get_eval_result instead. This will be removed in future"
|
||||
" releases."
|
||||
)
|
||||
@app.get(
|
||||
"/apps/{app_name}/eval_results/{eval_result_id}",
|
||||
response_model_exclude_none=True,
|
||||
tags=[TAG_EVALUATION],
|
||||
)
|
||||
async def get_eval_result_legacy(
|
||||
app_name: str,
|
||||
eval_result_id: str,
|
||||
) -> EvalSetResult:
|
||||
try:
|
||||
return self.eval_set_results_manager.get_eval_set_result(
|
||||
app_name, eval_result_id
|
||||
)
|
||||
except ValueError as ve:
|
||||
raise HTTPException(status_code=404, detail=str(ve)) from ve
|
||||
except ValidationError as ve:
|
||||
raise HTTPException(status_code=500, detail=str(ve)) from ve
|
||||
|
||||
@app.get(
|
||||
"/apps/{app_name}/eval-results",
|
||||
response_model_exclude_none=True,
|
||||
@@ -1285,19 +1213,6 @@ class AdkWebServer:
|
||||
)
|
||||
return ListEvalResultsResponse(eval_result_ids=eval_result_ids)
|
||||
|
||||
@deprecated(
|
||||
"Please use list_eval_results instead. This will be removed in future"
|
||||
" releases."
|
||||
)
|
||||
@app.get(
|
||||
"/apps/{app_name}/eval_results",
|
||||
response_model_exclude_none=True,
|
||||
tags=[TAG_EVALUATION],
|
||||
)
|
||||
async def list_eval_results_legacy(app_name: str) -> list[str]:
|
||||
list_eval_results_response = await list_eval_results(app_name)
|
||||
return list_eval_results_response.eval_result_ids
|
||||
|
||||
@app.get(
|
||||
"/apps/{app_name}/metrics-info",
|
||||
response_model_exclude_none=True,
|
||||
|
||||
@@ -267,16 +267,6 @@ class EvalMetric(EvalBaseModel):
|
||||
),
|
||||
)
|
||||
|
||||
judge_model_options: Optional[JudgeModelOptions] = Field(
|
||||
deprecated=True,
|
||||
default=None,
|
||||
description=(
|
||||
"[DEPRECATED] This field is deprecated in favor of `criterion`."
|
||||
" Depending on the metric you may want to one of the sub-classes of"
|
||||
" BaseCriterion."
|
||||
),
|
||||
)
|
||||
|
||||
criterion: Optional[BaseCriterion] = Field(
|
||||
default=None, description="""Evaluation criterion used by the metric."""
|
||||
)
|
||||
|
||||
@@ -1113,29 +1113,6 @@ def test_save_artifact_returns_500_on_unexpected_error(
|
||||
assert response.json()["detail"] == "unexpected failure"
|
||||
|
||||
|
||||
def test_create_eval_set(test_app, test_session_info):
|
||||
"""Test creating an eval set."""
|
||||
url = f"/apps/{test_session_info['app_name']}/eval_sets/test_eval_set_id"
|
||||
response = test_app.post(url)
|
||||
|
||||
# Verify the response
|
||||
assert response.status_code == 200
|
||||
|
||||
|
||||
def test_list_eval_sets(test_app, create_test_eval_set):
|
||||
"""Test get eval set."""
|
||||
info = create_test_eval_set
|
||||
url = f"/apps/{info['app_name']}/eval_sets"
|
||||
response = test_app.get(url)
|
||||
|
||||
# Verify the response
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert isinstance(data, list)
|
||||
assert len(data) == 1
|
||||
assert data[0] == "test_eval_set_id"
|
||||
|
||||
|
||||
def test_get_eval_set_result_not_found(test_app):
|
||||
"""Test getting an eval set result that doesn't exist."""
|
||||
url = "/apps/test_app_name/eval_results/test_eval_result_id_not_found"
|
||||
@@ -1143,65 +1120,6 @@ def test_get_eval_set_result_not_found(test_app):
|
||||
assert response.status_code == 404
|
||||
|
||||
|
||||
def test_run_eval(test_app, create_test_eval_set):
|
||||
"""Test running an eval."""
|
||||
|
||||
# Helper function to verify eval case result.
|
||||
def verify_eval_case_result(actual_eval_case_result):
|
||||
expected_eval_case_result = {
|
||||
"evalSetId": "test_eval_set_id",
|
||||
"evalId": "test_eval_case_id",
|
||||
"finalEvalStatus": 1,
|
||||
"overallEvalMetricResults": [{
|
||||
"metricName": "tool_trajectory_avg_score",
|
||||
"threshold": 0.5,
|
||||
"score": 1.0,
|
||||
"evalStatus": 1,
|
||||
"details": {},
|
||||
}],
|
||||
}
|
||||
for k, v in expected_eval_case_result.items():
|
||||
assert actual_eval_case_result[k] == v
|
||||
|
||||
info = create_test_eval_set
|
||||
url = f"/apps/{info['app_name']}/eval_sets/test_eval_set_id/run_eval"
|
||||
payload = {
|
||||
"eval_ids": ["test_eval_case_id"],
|
||||
"eval_metrics": [
|
||||
{"metric_name": "tool_trajectory_avg_score", "threshold": 0.5}
|
||||
],
|
||||
}
|
||||
response = test_app.post(url, json=payload)
|
||||
|
||||
# Verify the response
|
||||
assert response.status_code == 200
|
||||
|
||||
data = response.json()
|
||||
assert len(data) == 1
|
||||
verify_eval_case_result(data[0])
|
||||
|
||||
# Verify the eval set result is saved via get_eval_result endpoint.
|
||||
url = f"/apps/{info['app_name']}/eval_results/{info['app_name']}_test_eval_set_id_eval_result"
|
||||
response = test_app.get(url)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert isinstance(data, dict)
|
||||
assert data["evalSetId"] == "test_eval_set_id"
|
||||
assert (
|
||||
data["evalSetResultId"]
|
||||
== f"{info['app_name']}_test_eval_set_id_eval_result"
|
||||
)
|
||||
assert len(data["evalCaseResults"]) == 1
|
||||
verify_eval_case_result(data["evalCaseResults"][0])
|
||||
|
||||
# Verify the eval set result is saved via list_eval_results endpoint.
|
||||
url = f"/apps/{info['app_name']}/eval_results"
|
||||
response = test_app.get(url)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data == [f"{info['app_name']}_test_eval_set_id_eval_result"]
|
||||
|
||||
|
||||
def test_list_metrics_info(test_app):
|
||||
"""Test listing metrics info."""
|
||||
url = "/apps/test_app/metrics-info"
|
||||
@@ -1233,6 +1151,13 @@ def test_debug_trace(test_app):
|
||||
logger.info("Debug trace test completed successfully")
|
||||
|
||||
|
||||
def test_openapi_json_schema_accessible(test_app):
|
||||
"""Test that the OpenAPI /openapi.json endpoint is accessible."""
|
||||
response = test_app.get("/openapi.json")
|
||||
assert response.status_code == 200
|
||||
logger.info("OpenAPI /openapi.json endpoint is accessible")
|
||||
|
||||
|
||||
def test_get_event_graph_returns_dot_src_for_app_agent():
|
||||
"""Ensure graph endpoint unwraps App instances before building the graph."""
|
||||
from google.adk.cli.adk_web_server import AdkWebServer
|
||||
|
||||
Reference in New Issue
Block a user