refactor: Extract reusable private methods

Co-authored-by: Xiang (Sean) Zhou <seanzhougoogle@google.com>
PiperOrigin-RevId: 871128393
This commit is contained in:
Xiang (Sean) Zhou
2026-02-16 22:51:59 -08:00
committed by Copybara-Service
parent 4e2d6159ae
commit 706f9fe74d
4 changed files with 384 additions and 334 deletions
File diff suppressed because it is too large Load Diff
+39 -27
View File
@@ -34,6 +34,43 @@ from .base_llm_flow import BaseLlmFlow
logger = logging.getLogger('google_adk.' + __name__)
def _create_request_processors():
"""Create the standard request processor list for a single-agent flow."""
return [
basic.request_processor,
auth_preprocessor.request_processor,
request_confirmation.request_processor,
instructions.request_processor,
identity.request_processor,
contents.request_processor,
# Context cache processor sets up cache config and finds
# existing cache metadata.
context_cache_processor.request_processor,
# Interactions processor extracts previous_interaction_id for
# stateful conversations via the Interactions API.
interactions_processor.request_processor,
# Some implementations of NL Planning mark planning contents
# as thoughts in the post processor. Since these need to be
# unmarked, NL Planning should be after contents.
_nl_planning.request_processor,
# Code execution should be after the contents as it mutates
# the contents to optimize data files.
_code_execution.request_processor,
# Output schema processor adds system instruction and
# set_model_response when both output_schema and tools are
# present.
_output_schema_processor.request_processor,
]
def _create_response_processors():
"""Create the standard response processor list for a single-agent flow."""
return [
_nl_planning.response_processor,
_code_execution.response_processor,
]
class SingleFlow(BaseLlmFlow):
"""SingleFlow is the LLM flows that handles tools calls.
@@ -43,30 +80,5 @@ class SingleFlow(BaseLlmFlow):
def __init__(self):
super().__init__()
self.request_processors += [
basic.request_processor,
auth_preprocessor.request_processor,
request_confirmation.request_processor,
instructions.request_processor,
identity.request_processor,
contents.request_processor,
# Context cache processor sets up cache config and finds existing cache metadata
context_cache_processor.request_processor,
# Interactions processor extracts previous_interaction_id for stateful
# conversations via the Interactions API
interactions_processor.request_processor,
# Some implementations of NL Planning mark planning contents as thoughts
# in the post processor. Since these need to be unmarked, NL Planning
# should be after contents.
_nl_planning.request_processor,
# Code execution should be after the contents as it mutates the contents
# to optimize data files.
_code_execution.request_processor,
# Output schema processor add system instruction and set_model_response
# when both output_schema and tools are present.
_output_schema_processor.request_processor,
]
self.response_processors += [
_nl_planning.response_processor,
_code_execution.response_processor,
]
self.request_processors += _create_request_processors()
self.response_processors += _create_response_processors()
+6 -18
View File
@@ -31,6 +31,7 @@ from google.adk.auth.auth_credential import OAuth2Auth
from google.adk.auth.auth_preprocessor import TOOLSET_AUTH_CREDENTIAL_ID_PREFIX
from google.adk.auth.auth_tool import AuthConfig
from google.adk.auth.auth_tool import AuthToolArguments
from google.adk.flows.llm_flows.base_llm_flow import _resolve_toolset_auth
from google.adk.flows.llm_flows.base_llm_flow import BaseLlmFlow
from google.adk.flows.llm_flows.base_llm_flow import TOOLSET_AUTH_CREDENTIAL_ID_PREFIX as FLOW_PREFIX
from google.adk.flows.llm_flows.functions import build_auth_request_event
@@ -119,14 +120,6 @@ class TestResolveToolsetAuth:
agent.tools = []
return agent
@pytest.fixture
def flow(self):
"""Create a BaseLlmFlow instance for testing."""
# BaseLlmFlow is abstract, but we can still test _resolve_toolset_auth
flow = Mock(spec=BaseLlmFlow)
flow._resolve_toolset_auth = BaseLlmFlow._resolve_toolset_auth
return flow
@pytest.mark.asyncio
async def test_no_tools_returns_no_events(
self, mock_invocation_context, mock_agent
@@ -134,9 +127,8 @@ class TestResolveToolsetAuth:
"""Test that no events are yielded when agent has no tools."""
mock_agent.tools = []
flow = BaseLlmFlow.__new__(BaseLlmFlow)
events = []
async for event in flow._resolve_toolset_auth(
async for event in _resolve_toolset_auth(
mock_invocation_context, mock_agent
):
events.append(event)
@@ -152,9 +144,8 @@ class TestResolveToolsetAuth:
toolset = MockToolset(auth_config=None)
mock_agent.tools = [toolset]
flow = BaseLlmFlow.__new__(BaseLlmFlow)
events = []
async for event in flow._resolve_toolset_auth(
async for event in _resolve_toolset_auth(
mock_invocation_context, mock_agent
):
events.append(event)
@@ -184,9 +175,8 @@ class TestResolveToolsetAuth:
mock_manager.get_auth_credential = AsyncMock(return_value=mock_credential)
MockCredentialManager.return_value = mock_manager
flow = BaseLlmFlow.__new__(BaseLlmFlow)
events = []
async for event in flow._resolve_toolset_auth(
async for event in _resolve_toolset_auth(
mock_invocation_context, mock_agent
):
events.append(event)
@@ -213,9 +203,8 @@ class TestResolveToolsetAuth:
mock_manager.get_auth_credential = AsyncMock(return_value=None)
MockCredentialManager.return_value = mock_manager
flow = BaseLlmFlow.__new__(BaseLlmFlow)
events = []
async for event in flow._resolve_toolset_auth(
async for event in _resolve_toolset_auth(
mock_invocation_context, mock_agent
):
events.append(event)
@@ -258,9 +247,8 @@ class TestResolveToolsetAuth:
mock_manager.get_auth_credential = AsyncMock(return_value=None)
MockCredentialManager.return_value = mock_manager
flow = BaseLlmFlow.__new__(BaseLlmFlow)
events = []
async for event in flow._resolve_toolset_auth(
async for event in _resolve_toolset_auth(
mock_invocation_context, mock_agent
):
events.append(event)
@@ -19,6 +19,7 @@ from unittest.mock import AsyncMock
from google.adk.agents.llm_agent import Agent
from google.adk.events.event import Event
from google.adk.flows.llm_flows.base_llm_flow import _handle_after_model_callback
from google.adk.flows.llm_flows.base_llm_flow import BaseLlmFlow
from google.adk.models.google_llm import Gemini
from google.adk.models.llm_request import LlmRequest
@@ -285,7 +286,7 @@ async def test_handle_after_model_callback_grounding_with_no_callbacks(
)
flow = BaseLlmFlowForTesting()
result = await flow._handle_after_model_callback(
result = await _handle_after_model_callback(
invocation_context, llm_response, event
)
@@ -342,7 +343,7 @@ async def test_handle_after_model_callback_grounding_with_callback_override(
)
flow = BaseLlmFlowForTesting()
result = await flow._handle_after_model_callback(
result = await _handle_after_model_callback(
invocation_context, llm_response, event
)
@@ -404,7 +405,7 @@ async def test_handle_after_model_callback_grounding_with_plugin_override(
)
flow = BaseLlmFlowForTesting()
result = await flow._handle_after_model_callback(
result = await _handle_after_model_callback(
invocation_context, llm_response, event
)
@@ -461,13 +462,13 @@ async def test_handle_after_model_callback_caches_canonical_tools():
flow = BaseLlmFlowForTesting()
# Call _handle_after_model_callback multiple times with the same context
result1 = await flow._handle_after_model_callback(
result1 = await _handle_after_model_callback(
invocation_context, llm_response, event
)
result2 = await flow._handle_after_model_callback(
result2 = await _handle_after_model_callback(
invocation_context, llm_response, event
)
result3 = await flow._handle_after_model_callback(
result3 = await _handle_after_model_callback(
invocation_context, llm_response, event
)