feat: Enable output schema with tools for LiteLlm models

LiteLlm provides built-in handling for tool and response format compatibility across different providers, allowing output schemas to be used reliably with tools for any LiteLlm instance

Close #3969

Co-authored-by: George Weale <gweale@google.com>
PiperOrigin-RevId: 877471153
This commit is contained in:
George Weale
2026-03-02 10:39:54 -08:00
committed by Copybara-Service
parent f9c104faf7
commit 89df5fcf88
2 changed files with 17 additions and 0 deletions
@@ -30,6 +30,17 @@ from .variant_utils import GoogleLLMVariant
def can_use_output_schema_with_tools(model: Union[str, BaseLlm]) -> bool:
"""Returns True if output schema with tools is supported."""
# LiteLLM handles tools + response_format compatibility per-provider:
# - Providers with native support (OpenAI, Azure): both passed directly
# - Providers without (Fireworks): auto-converted to json_tool_call +
# tool_choice enforcement
# This is strictly more reliable than the SetModelResponseTool
# prompt-based workaround.
from ..models.lite_llm import LiteLlm
if isinstance(model, LiteLlm):
return True
model_string = model if isinstance(model, str) else model.model
return (
@@ -15,6 +15,7 @@
from google.adk.models.anthropic_llm import Claude
from google.adk.models.google_llm import Gemini
from google.adk.models.lite_llm import LiteLlm
from google.adk.utils.output_schema_utils import can_use_output_schema_with_tools
import pytest
@@ -37,6 +38,11 @@ import pytest
(Claude(model="claude-3.7-sonnet"), "1", False),
(Claude(model="claude-3.7-sonnet"), "0", False),
(Claude(model="claude-3.7-sonnet"), None, False),
(LiteLlm(model="openai/gpt-4o"), "1", True),
(LiteLlm(model="openai/gpt-4o"), "0", True),
(LiteLlm(model="openai/gpt-4o"), None, True),
(LiteLlm(model="anthropic/claude-3.7-sonnet"), None, True),
(LiteLlm(model="fireworks_ai/llama-v3p1-70b"), None, True),
],
)
def test_can_use_output_schema_with_tools(