diff --git a/src/google/adk/utils/output_schema_utils.py b/src/google/adk/utils/output_schema_utils.py index 7c494f92..bb31d098 100644 --- a/src/google/adk/utils/output_schema_utils.py +++ b/src/google/adk/utils/output_schema_utils.py @@ -30,6 +30,17 @@ from .variant_utils import GoogleLLMVariant def can_use_output_schema_with_tools(model: Union[str, BaseLlm]) -> bool: """Returns True if output schema with tools is supported.""" + # LiteLLM handles tools + response_format compatibility per-provider: + # - Providers with native support (OpenAI, Azure): both passed directly + # - Providers without (Fireworks): auto-converted to json_tool_call + + # tool_choice enforcement + # This is strictly more reliable than the SetModelResponseTool + # prompt-based workaround. + from ..models.lite_llm import LiteLlm + + if isinstance(model, LiteLlm): + return True + model_string = model if isinstance(model, str) else model.model return ( diff --git a/tests/unittests/utils/test_output_schema_utils.py b/tests/unittests/utils/test_output_schema_utils.py index fc2f6fb5..cf759c99 100644 --- a/tests/unittests/utils/test_output_schema_utils.py +++ b/tests/unittests/utils/test_output_schema_utils.py @@ -15,6 +15,7 @@ from google.adk.models.anthropic_llm import Claude from google.adk.models.google_llm import Gemini +from google.adk.models.lite_llm import LiteLlm from google.adk.utils.output_schema_utils import can_use_output_schema_with_tools import pytest @@ -37,6 +38,11 @@ import pytest (Claude(model="claude-3.7-sonnet"), "1", False), (Claude(model="claude-3.7-sonnet"), "0", False), (Claude(model="claude-3.7-sonnet"), None, False), + (LiteLlm(model="openai/gpt-4o"), "1", True), + (LiteLlm(model="openai/gpt-4o"), "0", True), + (LiteLlm(model="openai/gpt-4o"), None, True), + (LiteLlm(model="anthropic/claude-3.7-sonnet"), None, True), + (LiteLlm(model="fireworks_ai/llama-v3p1-70b"), None, True), ], ) def test_can_use_output_schema_with_tools(