feat: Support static instructions

Static instructions:
Always added to system instructions for context caching

Dynamic instructions:
Added to system instructions when no static instruction exists (for backward compatibility), OR inserted before last batch of continuous user content when static instructions exist

PiperOrigin-RevId: 809170679
This commit is contained in:
Xiang (Sean) Zhou
2025-09-19 13:46:00 -07:00
committed by Copybara-Service
parent f4e1fd962e
commit 9be9cc2fee
7 changed files with 893 additions and 23 deletions
+57 -4
View File
@@ -134,7 +134,18 @@ class LlmAgent(BaseAgent):
"""The config type for this agent."""
instruction: Union[str, InstructionProvider] = ''
"""Instructions for the LLM model, guiding the agent's behavior."""
"""Dynamic instructions for the LLM model, guiding the agent's behavior.
These instructions can contain placeholders like {variable_name} that will be
resolved at runtime using session state and context.
**Behavior depends on static_instruction:**
- If static_instruction is None: instruction goes to system_instruction
- If static_instruction is set: instruction goes to user content in the request
This allows for context caching optimization where static content (static_instruction)
comes first in the prompt, followed by dynamic content (instruction).
"""
global_instruction: Union[str, InstructionProvider] = ''
"""Instructions for all the agents in the entire agent tree.
@@ -145,6 +156,48 @@ class LlmAgent(BaseAgent):
or personality.
"""
static_instruction: Optional[types.Content] = None
"""Static instruction content sent literally as system instruction at the beginning.
This field is for content that never changes and doesn't contain placeholders.
It's sent directly to the model without any processing or variable substitution.
This field is primarily for context caching optimization. Static instructions
are sent as system instruction at the beginning of the request, allowing
for improved performance when the static portion remains unchanged. Live API
has its own cache mechanism, thus this field doesn't work with Live API.
**Impact on instruction field:**
- When static_instruction is None: instruction → system_instruction
- When static_instruction is set: instruction → user content (after static content)
**Context Caching:**
- **Implicit Cache**: Automatic caching by model providers (no config needed)
- **Explicit Cache**: Cache explicitly created by user for instructions, tools and contents
See below for more information of Implicit Cache and Explicit Cache
Gemini API: https://ai.google.dev/gemini-api/docs/caching?lang=python
Vertex API: https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview
Setting static_instruction alone does NOT enable caching automatically.
For explicit caching control, configure context_cache_config at App level.
**Content Support:**
Can contain text, files, binaries, or any combination as types.Content
supports multiple part types (text, inline_data, file_data, etc.).
**Example:**
```python
static_instruction = types.Content(
role='user',
parts=[
types.Part(text='You are a helpful assistant.'),
types.Part(file_data=types.FileData(...))
]
)
```
"""
tools: list[ToolUnion] = Field(default_factory=list)
"""Tools available to this agent."""
@@ -462,9 +515,7 @@ class LlmAgent(BaseAgent):
):
result = ''.join(
part.text
for part in event.content.parts
if part.text and not part.thought
[part.text if part.text else '' for part in event.content.parts]
)
if self.output_schema:
# If the result from the final chunk is just whitespace or empty,
@@ -600,6 +651,8 @@ class LlmAgent(BaseAgent):
kwargs['model'] = config.model
if config.instruction:
kwargs['instruction'] = config.instruction
if config.static_instruction:
kwargs['static_instruction'] = config.static_instruction
if config.disallow_transfer_to_parent:
kwargs['disallow_transfer_to_parent'] = config.disallow_transfer_to_parent
if config.disallow_transfer_to_peers:
+19 -1
View File
@@ -53,7 +53,25 @@ class LlmAgentConfig(BaseAgentConfig):
),
)
instruction: str = Field(description='Required. LlmAgent.instruction.')
instruction: str = Field(
description=(
'Required. LlmAgent.instruction. Dynamic instructions with'
' placeholder support. Behavior: if static_instruction is None, goes'
' to system_instruction; if static_instruction is set, goes to user'
' content after static content.'
)
)
static_instruction: Optional[types.Content] = Field(
default=None,
description=(
'Optional. LlmAgent.static_instruction. Static content sent literally'
' at position 0 without placeholder processing. When set, changes'
' instruction behavior to go to user content instead of'
' system_instruction. Supports context caching and rich content'
' (text, files, binaries).'
),
)
disallow_transfer_to_parent: Optional[bool] = Field(
default=None,
@@ -58,6 +58,11 @@ class _ContentLlmRequestProcessor(BaseLlmRequestProcessor):
agent.name,
)
# Add dynamic instructions to the last user content if static instructions exist
await _add_dynamic_instructions_to_user_content(
invocation_context, llm_request
)
# Maintain async generator behavior
if False: # Ensures it behaves as a generator
yield # This is a no-op but maintains generator structure
@@ -557,3 +562,49 @@ def _is_live_model_audio_event(event: Event) -> bool:
if part.file_data and part.file_data.mime_type == 'audio/pcm':
return True
return False
async def _add_dynamic_instructions_to_user_content(
invocation_context: InvocationContext, llm_request: LlmRequest
) -> None:
"""Add dynamic instructions to the last user content when static instructions exist."""
from ...agents.readonly_context import ReadonlyContext
from ...utils import instructions_utils
agent = invocation_context.agent
dynamic_instructions = []
# Handle agent dynamic instructions if static instruction exists
if agent.static_instruction and agent.instruction:
# Static instruction exists, so add dynamic instruction to content
raw_si, bypass_state_injection = await agent.canonical_instruction(
ReadonlyContext(invocation_context)
)
si = raw_si
if not bypass_state_injection:
si = await instructions_utils.inject_session_state(
raw_si, ReadonlyContext(invocation_context)
)
if si: # Only add if not empty
dynamic_instructions.append(si)
if not dynamic_instructions:
return
# Find the start of the last continuous batch of user content
# Walk backwards to find the first non-user content, then insert before next user content
insert_index = len(llm_request.contents)
for i in range(len(llm_request.contents) - 1, -1, -1):
if llm_request.contents[i].role != 'user':
insert_index = i + 1
break
elif i == 0:
# All content from start is user content
insert_index = 0
break
# Create new user content with dynamic instructions
instruction_parts = [types.Part(text=instr) for instr in dynamic_instructions]
new_content = types.Content(role='user', parts=instruction_parts)
llm_request.contents.insert(insert_index, new_content)
+12 -11
View File
@@ -16,16 +16,13 @@
from __future__ import annotations
import re
from typing import AsyncGenerator
from typing import Generator
from typing import TYPE_CHECKING
from typing_extensions import override
from ...agents.readonly_context import ReadonlyContext
from ...events.event import Event
from ...sessions.state import State
from ...utils import instructions_utils
from ._base_llm_processor import BaseLlmRequestProcessor
@@ -50,10 +47,8 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
root_agent: BaseAgent = agent.root_agent
# Appends global instructions if set.
if (
isinstance(root_agent, LlmAgent) and root_agent.global_instruction
): # not empty str
# Handle global instructions
if isinstance(root_agent, LlmAgent) and root_agent.global_instruction:
raw_si, bypass_state_injection = (
await root_agent.canonical_global_instruction(
ReadonlyContext(invocation_context)
@@ -66,8 +61,14 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
)
llm_request.append_instructions([si])
# Appends agent instructions if set.
if agent.instruction: # not empty str
# Handle static_instruction - add via append_instructions
if agent.static_instruction:
llm_request.append_instructions(agent.static_instruction)
# Handle instruction based on whether static_instruction exists
if agent.instruction and not agent.static_instruction:
# Only add to system instructions if no static instruction exists
# If static instruction exists, content processor will handle it
raw_si, bypass_state_injection = await agent.canonical_instruction(
ReadonlyContext(invocation_context)
)
@@ -79,8 +80,8 @@ class _InstructionsLlmRequestProcessor(BaseLlmRequestProcessor):
llm_request.append_instructions([si])
# Maintain async generator behavior
if False: # Ensures it behaves as a generator
yield # This is a no-op but maintains generator structure
return
yield # This line ensures it behaves as a generator but is never reached
request_processor = _InstructionsLlmRequestProcessor()
+65 -7
View File
@@ -14,7 +14,9 @@
from __future__ import annotations
import logging
from typing import Optional
from typing import Union
from google.genai import types
from pydantic import BaseModel
@@ -86,17 +88,73 @@ class LlmRequest(BaseModel):
cache_metadata: Optional[CacheMetadata] = None
"""Cache metadata from previous requests, used for cache management."""
def append_instructions(self, instructions: list[str]) -> None:
def append_instructions(
self, instructions: Union[list[str], types.Content]
) -> None:
"""Appends instructions to the system instruction.
Args:
instructions: The instructions to append.
instructions: The instructions to append. Can be:
- list[str]: Strings to append/concatenate to system instruction
- types.Content: Content object to append to system instruction
Note: Only text content is supported. Model API requires system_instruction
to be a string. Non-text parts in Content will be handled differently.
Behavior:
- list[str]: concatenates with existing system_instruction using \\n\\n
- types.Content: extracts text from parts and concatenates
"""
if self.config.system_instruction:
self.config.system_instruction += '\n\n' + '\n\n'.join(instructions)
else:
self.config.system_instruction = '\n\n'.join(instructions)
# Handle Content object - extract only text parts
if isinstance(instructions, types.Content):
# TODO: Handle non-text contents in instruction by putting non-text parts
# into llm_request.contents and adding a reference in the system instruction
# that references the contents.
# Extract text from all text parts
text_parts = [part.text for part in instructions.parts if part.text]
if not text_parts:
return # No text content to append
new_text = "\n\n".join(text_parts)
if not self.config.system_instruction:
self.config.system_instruction = new_text
elif isinstance(self.config.system_instruction, str):
self.config.system_instruction += "\n\n" + new_text
else:
# Log warning for unsupported system_instruction types
logging.warning(
"Cannot append to system_instruction of unsupported type: %s. "
"Only string system_instruction is supported.",
type(self.config.system_instruction),
)
return
# Handle list of strings
if isinstance(instructions, list) and all(
isinstance(inst, str) for inst in instructions
):
if not instructions: # Handle empty list
return
new_text = "\n\n".join(instructions)
if not self.config.system_instruction:
self.config.system_instruction = new_text
elif isinstance(self.config.system_instruction, str):
self.config.system_instruction += "\n\n" + new_text
else:
# Log warning for unsupported system_instruction types
logging.warning(
"Cannot append to system_instruction of unsupported type: %s. "
"Only string system_instruction is supported.",
type(self.config.system_instruction),
)
return
# Invalid input
raise TypeError("instructions must be list[str] or types.Content")
def append_tools(self, tools: list[BaseTool]) -> None:
"""Appends tools to the request.
@@ -138,4 +196,4 @@ class LlmRequest(BaseModel):
"""
self.config.response_schema = base_model
self.config.response_mime_type = 'application/json'
self.config.response_mime_type = "application/json"
@@ -0,0 +1,272 @@
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for static instruction functionality."""
from google.adk.agents.invocation_context import InvocationContext
from google.adk.agents.llm_agent import LlmAgent
from google.adk.agents.run_config import RunConfig
from google.adk.flows.llm_flows.contents import _add_dynamic_instructions_to_user_content
from google.adk.flows.llm_flows.instructions import request_processor
from google.adk.models.llm_request import LlmRequest
from google.adk.sessions.in_memory_session_service import InMemorySessionService
from google.genai import types
import pytest
async def _create_invocation_context(agent: LlmAgent) -> InvocationContext:
"""Helper to create InvocationContext with session."""
session_service = InMemorySessionService()
session = await session_service.create_session(
app_name='test_app', user_id='test_user'
)
return InvocationContext(
invocation_id='test_invocation_id',
agent=agent,
session=session,
session_service=session_service,
run_config=RunConfig(),
branch='main',
)
@pytest.mark.parametrize('llm_backend', ['GOOGLE_AI', 'VERTEX'])
class TestStaticInstructions:
def test_static_instruction_field_exists(self, llm_backend):
"""Test that static_instruction field exists and works with types.Content."""
static_content = types.Content(
role='user', parts=[types.Part(text='This is a static instruction')]
)
agent = LlmAgent(name='test_agent', static_instruction=static_content)
assert agent.static_instruction == static_content
def test_static_instruction_supports_multiple_parts(self, llm_backend):
"""Test that static_instruction supports multiple parts including files."""
static_content = types.Content(
role='user',
parts=[
types.Part(text='Here is the document:'),
types.Part(
inline_data=types.Blob(
data=b'fake_file_content', mime_type='text/plain'
)
),
types.Part(text='Please analyze this document.'),
],
)
agent = LlmAgent(name='test_agent', static_instruction=static_content)
assert agent.static_instruction == static_content
assert len(agent.static_instruction.parts) == 3
def test_static_instruction_outputs_placeholders_literally(self, llm_backend):
"""Test that static instructions output placeholders literally without processing."""
static_content = types.Content(
role='user',
parts=[
types.Part(text='Hello {name}, you have {count} messages'),
],
)
agent = LlmAgent(name='test_agent', static_instruction=static_content)
assert '{name}' in agent.static_instruction.parts[0].text
assert '{count}' in agent.static_instruction.parts[0].text
@pytest.mark.asyncio
async def test_static_instruction_added_to_contents(self, llm_backend):
"""Test that static instructions are added to llm_request.config.system_instruction."""
static_content = types.Content(
role='user', parts=[types.Part(text='Static instruction content')]
)
agent = LlmAgent(name='test_agent', static_instruction=static_content)
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# Run the instruction processor
async for _ in request_processor.run_async(invocation_context, llm_request):
pass
# Static instruction should be added to system instructions, not contents
assert len(llm_request.contents) == 0
assert llm_request.config.system_instruction == 'Static instruction content'
@pytest.mark.asyncio
async def test_dynamic_instruction_without_static_goes_to_system(
self, llm_backend
):
"""Test that dynamic instructions go to system when no static instruction exists."""
agent = LlmAgent(
name='test_agent', instruction='Dynamic instruction content'
)
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# Run the instruction processor
async for _ in request_processor.run_async(invocation_context, llm_request):
pass
# Dynamic instruction should be added to system instructions
assert (
llm_request.config.system_instruction == 'Dynamic instruction content'
)
assert len(llm_request.contents) == 0
@pytest.mark.asyncio
async def test_dynamic_instruction_with_static_not_in_system(
self, llm_backend
):
"""Test that dynamic instructions don't go to system when static instruction exists."""
static_content = types.Content(
role='user', parts=[types.Part(text='Static instruction content')]
)
agent = LlmAgent(
name='test_agent',
instruction='Dynamic instruction content',
static_instruction=static_content,
)
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# Run the instruction processor
async for _ in request_processor.run_async(invocation_context, llm_request):
pass
# Static instruction should be in system instructions
assert len(llm_request.contents) == 0
assert llm_request.config.system_instruction == 'Static instruction content'
@pytest.mark.asyncio
async def test_dynamic_instructions_added_to_user_content(self, llm_backend):
"""Test that dynamic instructions are added to user content when static exists."""
static_content = types.Content(
role='user', parts=[types.Part(text='Static instruction')]
)
agent = LlmAgent(
name='test_agent',
instruction='Dynamic instruction',
static_instruction=static_content,
)
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# Add some existing user content
llm_request.contents = [
types.Content(role='user', parts=[types.Part(text='Hello world')])
]
# Run the content processor function
await _add_dynamic_instructions_to_user_content(
invocation_context, llm_request
)
# Dynamic instruction should be inserted before the last continuous batch of user content
assert len(llm_request.contents) == 2
assert llm_request.contents[0].role == 'user'
assert len(llm_request.contents[0].parts) == 1
assert llm_request.contents[0].parts[0].text == 'Dynamic instruction'
assert llm_request.contents[1].role == 'user'
assert len(llm_request.contents[1].parts) == 1
assert llm_request.contents[1].parts[0].text == 'Hello world'
@pytest.mark.asyncio
async def test_dynamic_instructions_create_user_content_when_none_exists(
self, llm_backend
):
"""Test that dynamic instructions create user content when none exists."""
static_content = types.Content(
role='user', parts=[types.Part(text='Static instruction')]
)
agent = LlmAgent(
name='test_agent',
instruction='Dynamic instruction',
static_instruction=static_content,
)
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# No existing content
# Run the content processor function
await _add_dynamic_instructions_to_user_content(
invocation_context, llm_request
)
# Dynamic instruction should create new user content
assert len(llm_request.contents) == 1
assert llm_request.contents[0].role == 'user'
assert len(llm_request.contents[0].parts) == 1
assert llm_request.contents[0].parts[0].text == 'Dynamic instruction'
@pytest.mark.asyncio
async def test_no_dynamic_instructions_when_no_static(self, llm_backend):
"""Test that no dynamic instructions are added to content when no static instructions exist."""
agent = LlmAgent(name='test_agent', instruction='Dynamic instruction only')
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# Add some existing user content
original_content = types.Content(
role='user', parts=[types.Part(text='Hello world')]
)
llm_request.contents = [original_content]
# Run the content processor function
await _add_dynamic_instructions_to_user_content(
invocation_context, llm_request
)
# Content should remain unchanged
assert len(llm_request.contents) == 1
assert llm_request.contents[0].role == 'user'
assert len(llm_request.contents[0].parts) == 1
assert llm_request.contents[0].parts[0].text == 'Hello world'
@pytest.mark.asyncio
async def test_static_instruction_with_files_and_text(self, llm_backend):
"""Test that static instruction can contain files and text together."""
static_content = types.Content(
role='user',
parts=[
types.Part(text='Analyze this image:'),
types.Part(
inline_data=types.Blob(
data=b'fake_image_data', mime_type='image/png'
)
),
types.Part(text='Focus on the key elements.'),
],
)
agent = LlmAgent(name='test_agent', static_instruction=static_content)
invocation_context = await _create_invocation_context(agent)
llm_request = LlmRequest()
# Run the instruction processor
async for _ in request_processor.run_async(invocation_context, llm_request):
pass
# Static instruction should extract only text parts and concatenate them
assert len(llm_request.contents) == 0
assert (
llm_request.config.system_instruction
== 'Analyze this image:\n\nFocus on the key elements.'
)
+417
View File
@@ -156,6 +156,194 @@ def test_append_tools_consolidates_declarations_in_single_tool():
assert 'third_tool' in request.tools_dict
def test_append_instructions_with_string_list():
"""Test that append_instructions works with list of strings (existing behavior)."""
request = LlmRequest()
# Initially system_instruction should be None
assert request.config.system_instruction is None
# Append first set of instructions
request.append_instructions(['First instruction', 'Second instruction'])
# Should be joined with double newlines
expected = 'First instruction\n\nSecond instruction'
assert request.config.system_instruction == expected
assert len(request.contents) == 0
def test_append_instructions_with_string_list_multiple_calls():
"""Test multiple calls to append_instructions with string lists."""
request = LlmRequest()
# First call
request.append_instructions(['First instruction'])
assert request.config.system_instruction == 'First instruction'
# Second call should append with double newlines
request.append_instructions(['Second instruction', 'Third instruction'])
expected = 'First instruction\n\nSecond instruction\n\nThird instruction'
assert request.config.system_instruction == expected
def test_append_instructions_with_content():
"""Test that append_instructions works with types.Content (new behavior)."""
request = LlmRequest()
# Create a Content object
content = types.Content(
role='user', parts=[types.Part(text='This is content-based instruction')]
)
# Append content
request.append_instructions(content)
# Should be set as system_instruction
assert len(request.contents) == 0
assert request.config.system_instruction == content
def test_append_instructions_with_content_multiple_calls():
"""Test multiple calls to append_instructions with Content objects."""
request = LlmRequest()
# Add some existing content first
existing_content = types.Content(
role='user', parts=[types.Part(text='Existing content')]
)
request.contents.append(existing_content)
# First Content instruction
content1 = types.Content(
role='user', parts=[types.Part(text='First instruction')]
)
request.append_instructions(content1)
# Should be set as system_instruction, existing content unchanged
assert len(request.contents) == 1
assert request.contents[0] == existing_content
assert request.config.system_instruction == content1
# Second Content instruction
content2 = types.Content(
role='user', parts=[types.Part(text='Second instruction')]
)
request.append_instructions(content2)
# Second Content should be merged with first in system_instruction
assert len(request.contents) == 1
assert request.contents[0] == existing_content
assert isinstance(request.config.system_instruction, types.Content)
assert len(request.config.system_instruction.parts) == 2
assert request.config.system_instruction.parts[0].text == 'First instruction'
assert request.config.system_instruction.parts[1].text == 'Second instruction'
def test_append_instructions_with_content_multipart():
"""Test append_instructions with Content containing multiple parts."""
request = LlmRequest()
# Create Content with multiple parts (text and potentially files)
content = types.Content(
role='user',
parts=[
types.Part(text='Text instruction'),
types.Part(text='Additional text part'),
],
)
request.append_instructions(content)
assert len(request.contents) == 0
assert request.config.system_instruction == content
assert len(request.config.system_instruction.parts) == 2
assert request.config.system_instruction.parts[0].text == 'Text instruction'
assert (
request.config.system_instruction.parts[1].text == 'Additional text part'
)
def test_append_instructions_mixed_string_and_content():
"""Test mixing string list and Content instructions."""
request = LlmRequest()
# First add string instructions
request.append_instructions(['String instruction'])
assert request.config.system_instruction == 'String instruction'
# Then add Content instruction
content = types.Content(
role='user', parts=[types.Part(text='Content instruction')]
)
request.append_instructions(content)
# String and Content should be merged in system_instruction
assert len(request.contents) == 0
assert isinstance(request.config.system_instruction, types.Content)
assert len(request.config.system_instruction.parts) == 2
assert request.config.system_instruction.parts[0].text == 'String instruction'
assert (
request.config.system_instruction.parts[1].text == 'Content instruction'
)
def test_append_instructions_empty_string_list():
"""Test append_instructions with empty list of strings."""
request = LlmRequest()
# Empty list should not modify anything
request.append_instructions([])
assert request.config.system_instruction is None
assert len(request.contents) == 0
def test_append_instructions_invalid_input():
"""Test append_instructions with invalid input types."""
request = LlmRequest()
# Test with invalid types
with pytest.raises(
TypeError, match='instructions must be list\\[str\\] or types.Content'
):
request.append_instructions('single string') # Should be list[str]
with pytest.raises(
TypeError, match='instructions must be list\\[str\\] or types.Content'
):
request.append_instructions(123) # Invalid type
with pytest.raises(
TypeError, match='instructions must be list\\[str\\] or types.Content'
):
request.append_instructions(
['valid string', 123]
) # Mixed valid/invalid in list
def test_append_instructions_content_preserves_role_and_parts():
"""Test that Content objects have text extracted regardless of role or parts."""
request = LlmRequest()
# Create Content with specific role and parts
content = types.Content(
role='system', # Different role
parts=[
types.Part(text='System instruction'),
types.Part(text='Additional system part'),
],
)
request.append_instructions(content)
# Text should be extracted and concatenated to system_instruction string
assert len(request.contents) == 0
assert (
request.config.system_instruction
== 'System instruction\n\nAdditional system part'
)
async def _create_tool_context() -> ToolContext:
"""Helper to create a ToolContext for testing."""
session_service = InMemorySessionService()
@@ -308,3 +496,232 @@ def test_multiple_append_tools_calls_consolidate():
assert 'dummy_tool' in request.tools_dict
assert 'another_tool' in request.tools_dict
assert 'third_tool' in request.tools_dict
# Updated tests for simplified string-only append_instructions behavior
def test_append_instructions_with_content():
"""Test that append_instructions extracts text from types.Content."""
request = LlmRequest()
# Create a Content object
content = types.Content(
role='user', parts=[types.Part(text='This is content-based instruction')]
)
# Append content
request.append_instructions(content)
# Should extract text and set as system_instruction string
assert len(request.contents) == 0
assert (
request.config.system_instruction == 'This is content-based instruction'
)
def test_append_instructions_with_content_multiple_calls():
"""Test multiple calls to append_instructions with Content objects."""
request = LlmRequest()
# Add some existing content first
existing_content = types.Content(
role='user', parts=[types.Part(text='Existing content')]
)
request.contents.append(existing_content)
# First Content instruction
content1 = types.Content(
role='user', parts=[types.Part(text='First instruction')]
)
request.append_instructions(content1)
# Should extract text and set as system_instruction, existing content unchanged
assert len(request.contents) == 1
assert request.contents[0] == existing_content
assert request.config.system_instruction == 'First instruction'
# Second Content instruction
content2 = types.Content(
role='user', parts=[types.Part(text='Second instruction')]
)
request.append_instructions(content2)
# Second Content text should be appended to existing string
assert len(request.contents) == 1
assert request.contents[0] == existing_content
assert (
request.config.system_instruction
== 'First instruction\n\nSecond instruction'
)
def test_append_instructions_with_content_multipart():
"""Test append_instructions with Content containing multiple text parts."""
request = LlmRequest()
# Create Content with multiple text parts
content = types.Content(
role='user',
parts=[
types.Part(text='Text instruction'),
types.Part(text='Additional text part'),
],
)
request.append_instructions(content)
# Should extract and join all text parts
assert len(request.contents) == 0
assert (
request.config.system_instruction
== 'Text instruction\n\nAdditional text part'
)
def test_append_instructions_mixed_string_and_content():
"""Test mixing string list and Content instructions."""
request = LlmRequest()
# First add string instructions
request.append_instructions(['String instruction'])
assert request.config.system_instruction == 'String instruction'
# Then add Content instruction
content = types.Content(
role='user', parts=[types.Part(text='Content instruction')]
)
request.append_instructions(content)
# Content text should be appended to existing string
assert len(request.contents) == 0
assert (
request.config.system_instruction
== 'String instruction\n\nContent instruction'
)
def test_append_instructions_content_extracts_text_only():
"""Test that Content objects have text extracted regardless of role."""
request = LlmRequest()
# Create Content with specific role and parts
content = types.Content(
role='system', # Different role
parts=[
types.Part(text='System instruction'),
types.Part(text='Additional system part'),
],
)
request.append_instructions(content)
# Only text should be extracted and concatenated
assert len(request.contents) == 0
assert (
request.config.system_instruction
== 'System instruction\n\nAdditional system part'
)
def test_append_instructions_content_with_non_text_parts():
"""Test that non-text parts in Content are ignored."""
request = LlmRequest()
# Create Content with text and non-text parts
content = types.Content(
role='user',
parts=[
types.Part(text='Text instruction'),
types.Part(
inline_data=types.Blob(data=b'file_data', mime_type='text/plain')
),
types.Part(text='More text'),
],
)
request.append_instructions(content)
# Only text parts should be extracted
assert request.config.system_instruction == 'Text instruction\n\nMore text'
def test_append_instructions_content_no_text_parts():
"""Test that Content with no text parts does nothing."""
request = LlmRequest()
# Set initial system instruction
request.config.system_instruction = 'Initial'
# Create Content with only non-text parts
content = types.Content(
role='user',
parts=[
types.Part(
inline_data=types.Blob(data=b'file_data', mime_type='text/plain')
),
],
)
request.append_instructions(content)
# Should remain unchanged since no text to extract
assert request.config.system_instruction == 'Initial'
def test_append_instructions_content_empty_text_parts():
"""Test that Content with empty text parts are skipped."""
request = LlmRequest()
# Create Content with empty and non-empty text parts
content = types.Content(
role='user',
parts=[
types.Part(text='Valid text'),
types.Part(text=''), # Empty text
types.Part(text=None), # None text
types.Part(text='More valid text'),
],
)
request.append_instructions(content)
# Only non-empty text should be extracted
assert request.config.system_instruction == 'Valid text\n\nMore valid text'
def test_append_instructions_warning_unsupported_system_instruction_type(
caplog,
):
"""Test that warnings are logged for unsupported system_instruction types."""
import logging
request = LlmRequest()
# Set unsupported type as system_instruction
request.config.system_instruction = {'unsupported': 'dict'}
with caplog.at_level(logging.WARNING):
# Try appending Content - should log warning and skip
content = types.Content(role='user', parts=[types.Part(text='Test')])
request.append_instructions(content)
# Should remain unchanged
assert request.config.system_instruction == {'unsupported': 'dict'}
# Try appending strings - should also log warning and skip
request.append_instructions(['Test string'])
# Should remain unchanged
assert request.config.system_instruction == {'unsupported': 'dict'}
# Check that warnings were logged
assert (
len(
[record for record in caplog.records if record.levelname == 'WARNING']
)
>= 1
)
assert (
'Cannot append to system_instruction of unsupported type' in caplog.text
)