From 89df5fcf883b599cf7bfe40bde35b8d86ab0146b Mon Sep 17 00:00:00 2001 From: George Weale Date: Mon, 2 Mar 2026 10:39:54 -0800 Subject: [PATCH] feat: Enable output schema with tools for LiteLlm models LiteLlm provides built-in handling for tool and response format compatibility across different providers, allowing output schemas to be used reliably with tools for any LiteLlm instance Close #3969 Co-authored-by: George Weale PiperOrigin-RevId: 877471153 --- src/google/adk/utils/output_schema_utils.py | 11 +++++++++++ tests/unittests/utils/test_output_schema_utils.py | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/src/google/adk/utils/output_schema_utils.py b/src/google/adk/utils/output_schema_utils.py index 7c494f92..bb31d098 100644 --- a/src/google/adk/utils/output_schema_utils.py +++ b/src/google/adk/utils/output_schema_utils.py @@ -30,6 +30,17 @@ from .variant_utils import GoogleLLMVariant def can_use_output_schema_with_tools(model: Union[str, BaseLlm]) -> bool: """Returns True if output schema with tools is supported.""" + # LiteLLM handles tools + response_format compatibility per-provider: + # - Providers with native support (OpenAI, Azure): both passed directly + # - Providers without (Fireworks): auto-converted to json_tool_call + + # tool_choice enforcement + # This is strictly more reliable than the SetModelResponseTool + # prompt-based workaround. + from ..models.lite_llm import LiteLlm + + if isinstance(model, LiteLlm): + return True + model_string = model if isinstance(model, str) else model.model return ( diff --git a/tests/unittests/utils/test_output_schema_utils.py b/tests/unittests/utils/test_output_schema_utils.py index fc2f6fb5..cf759c99 100644 --- a/tests/unittests/utils/test_output_schema_utils.py +++ b/tests/unittests/utils/test_output_schema_utils.py @@ -15,6 +15,7 @@ from google.adk.models.anthropic_llm import Claude from google.adk.models.google_llm import Gemini +from google.adk.models.lite_llm import LiteLlm from google.adk.utils.output_schema_utils import can_use_output_schema_with_tools import pytest @@ -37,6 +38,11 @@ import pytest (Claude(model="claude-3.7-sonnet"), "1", False), (Claude(model="claude-3.7-sonnet"), "0", False), (Claude(model="claude-3.7-sonnet"), None, False), + (LiteLlm(model="openai/gpt-4o"), "1", True), + (LiteLlm(model="openai/gpt-4o"), "0", True), + (LiteLlm(model="openai/gpt-4o"), None, True), + (LiteLlm(model="anthropic/claude-3.7-sonnet"), None, True), + (LiteLlm(model="fireworks_ai/llama-v3p1-70b"), None, True), ], ) def test_can_use_output_schema_with_tools(