feat: Add ADK examples for litellm with add_function_to_prompt

Add examples for for https://github.com/google/adk-python/issues/1273

PiperOrigin-RevId: 775352677
This commit is contained in:
Genquan Duan
2025-06-24 13:07:57 -07:00
committed by Copybara-Service
parent abc89d2c81
commit f33e0903b2
5 changed files with 183 additions and 3 deletions
@@ -0,0 +1,16 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import agent
@@ -0,0 +1,78 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from google.adk import Agent
from google.adk.models.lite_llm import LiteLlm
from langchain_core.utils.function_calling import convert_to_openai_function
def roll_die(sides: int) -> int:
"""Roll a die and return the rolled result.
Args:
sides: The integer number of sides the die has.
Returns:
An integer of the result of rolling the die.
"""
return random.randint(1, sides)
def check_prime(number: int) -> str:
"""Check if a given number is prime.
Args:
number: The input number to check.
Returns:
A str indicating the number is prime or not.
"""
if number <= 1:
return f"{number} is not prime."
is_prime = True
for i in range(2, int(number**0.5) + 1):
if number % i == 0:
is_prime = False
break
if is_prime:
return f"{number} is prime."
else:
return f"{number} is not prime."
root_agent = Agent(
model=LiteLlm(
model="vertex_ai/meta/llama-4-maverick-17b-128e-instruct-maas",
# If the model is not trained with functions and you would like to
# enable function calling, you can add functions to the models, and the
# functions will be added to the prompts during inferences.
functions=[
convert_to_openai_function(roll_die),
convert_to_openai_function(check_prime),
],
),
name="data_processing_agent",
description="""You are a helpful assistant.""",
instruction="""
You are a helpful assistant, and call tools optionally.
If call tools, the tool format should be in json, and the tool arguments should be parsed from users inputs.
""",
tools=[
roll_die,
check_prime,
],
)
@@ -0,0 +1,81 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import time
import agent
from dotenv import load_dotenv
from google.adk import Runner
from google.adk.artifacts import InMemoryArtifactService
from google.adk.cli.utils import logs
from google.adk.sessions import InMemorySessionService
from google.adk.sessions import Session
from google.genai import types
load_dotenv(override=True)
logs.log_to_tmp_folder()
async def main():
app_name = 'my_app'
user_id_1 = 'user1'
session_service = InMemorySessionService()
artifact_service = InMemoryArtifactService()
runner = Runner(
app_name=app_name,
agent=agent.root_agent,
artifact_service=artifact_service,
session_service=session_service,
)
session_11 = await session_service.create_session(
app_name=app_name, user_id=user_id_1
)
async def run_prompt(session: Session, new_message: str):
content = types.Content(
role='user', parts=[types.Part.from_text(text=new_message)]
)
print('** User says:', content.model_dump(exclude_none=True))
async for event in runner.run_async(
user_id=user_id_1,
session_id=session.id,
new_message=content,
):
if event.content.parts:
part = event.content.parts[0]
if part.text:
print(f'** {event.author}: {part.text}')
if part.function_call:
print(f'** {event.author} calls tool: {part.function_call}')
if part.function_response:
print(
f'** {event.author} gets tool response: {part.function_response}'
)
start_time = time.time()
print('Start time:', start_time)
print('------------------------------------')
await run_prompt(session_11, 'Hi, introduce yourself.')
await run_prompt(session_11, 'Roll a die with 100 sides.')
await run_prompt(session_11, 'Check if it is prime.')
end_time = time.time()
print('------------------------------------')
print('End time:', end_time)
print('Total time:', end_time - start_time)
if __name__ == '__main__':
asyncio.run(main())
+8
View File
@@ -29,6 +29,7 @@ from typing import Tuple
from typing import Union
from google.genai import types
import litellm
from litellm import acompletion
from litellm import ChatCompletionAssistantMessage
from litellm import ChatCompletionAssistantToolCall
@@ -53,6 +54,9 @@ from .base_llm import BaseLlm
from .llm_request import LlmRequest
from .llm_response import LlmResponse
# This will add functions to prompts if functions are provided.
litellm.add_function_to_prompt = True
logger = logging.getLogger("google_adk." + __name__)
_NEW_LINE = "\n"
@@ -662,6 +666,10 @@ class LiteLlm(BaseLlm):
messages, tools, response_format = _get_completion_inputs(llm_request)
if "functions" in self._additional_args:
# LiteLLM does not support both tools and functions together.
tools = None
completion_args = {
"model": self.model,
"messages": messages,
@@ -17,11 +17,8 @@ from google.adk.models.lite_llm import LiteLlm
from google.genai import types
from google.genai.types import Content
from google.genai.types import Part
import litellm
import pytest
litellm.add_function_to_prompt = True
_TEST_MODEL_NAME = "vertex_ai/meta/llama-3.1-405b-instruct-maas"
_SYSTEM_PROMPT = """