Types of Callbacks¶
The framework provides different types of callbacks that trigger at various stages of an agent's execution. Understanding when each callback fires and what context it receives is key to using them effectively.
Agent Lifecycle Callbacks¶
These callbacks are available on any agent that inherits from BaseAgent
(including LlmAgent
, SequentialAgent
, ParallelAgent
, LoopAgent
, etc).
Before Agent Callback¶
When: Called immediately before the agent's _run_async_impl
(or _run_live_impl
) method is executed. It runs after the agent's InvocationContext
is created but before its core logic begins.
Purpose: Ideal for setting up resources or state needed only for this specific agent's run, performing validation checks on the session state (callback_context.state) before execution starts, logging the entry point of the agent's activity, or potentially modifying the invocation context before the core logic uses it.
Code
import {
LlmAgent,
CallbackContext
} from 'adk-typescript/agents';
import {
Content,
LlmRegistry
} from 'adk-typescript/models';
import { runners } from 'adk-typescript';
// Define the model
const GEMINI_2_FLASH = "gemini-2.0-flash";
// --- 1. Define the Callback Function ---
function checkIfAgentShouldRun(callbackContext: CallbackContext): Content | null {
/**
* Logs entry and checks 'skip_llm_agent' in session state.
* If True, returns Content to skip the agent's execution.
* If False or not present, returns null to allow execution.
*/
const agentName = callbackContext.agentName;
const invocationId = callbackContext.invocationId;
const currentState = callbackContext.state;
console.log(`\n[Callback] Entering agent: ${agentName} (Inv: ${invocationId})`);
console.log(`[Callback] Current State: ${JSON.stringify(currentState)}`);
// Check the condition in session state
if (currentState.skip_llm_agent === true) {
console.log(`[Callback] State condition 'skip_llm_agent=true' met: Skipping agent ${agentName}.`);
// Return Content to skip the agent's run
return {
parts: [{ text: `Agent ${agentName} skipped by before_agent_callback due to state.` }],
role: "model" // Assign model role to the overriding response
};
} else {
console.log(`[Callback] State condition not met: Proceeding with agent ${agentName}.`);
// Return null to allow the LlmAgent's normal execution
return null;
}
}
// --- 2. Setup Agent with Callback ---
// Create model instance (using LlmRegistry)
const model = LlmRegistry.newLlm(GEMINI_2_FLASH);
const llmAgentWithBeforeCallback = new LlmAgent({
name: "MyControlledAgent",
model: model,
instruction: "You are a concise assistant.",
description: "An LLM agent demonstrating stateful before_agent_callback",
beforeAgentCallback: checkIfAgentShouldRun // Assign the callback
});
// --- 3. Setup Runner and Sessions using InMemoryRunner ---
async function main(): Promise<void> {
const appName = "before_agent_demo";
const userId = "test_user";
const sessionIdRun = "session_will_run";
const sessionIdSkip = "session_will_skip";
// Use InMemoryRunner - it includes InMemorySessionService
const runner = new runners.InMemoryRunner(llmAgentWithBeforeCallback, appName);
// Get the bundled session service
const sessionService = runner.sessionService;
// Create session 1: Agent will run (default empty state)
sessionService.createSession({
appName: appName,
userId: userId,
sessionId: sessionIdRun
// No initial state means 'skip_llm_agent' will be false in the callback check
});
// Create session 2: Agent will be skipped (state has skip_llm_agent=true)
sessionService.createSession({
appName: appName,
userId: userId,
sessionId: sessionIdSkip,
state: { skip_llm_agent: true } // Set the state flag here
});
try {
// --- Scenario 1: Run where callback allows agent execution ---
console.log("\n" + "=".repeat(20) +
` SCENARIO 1: Running Agent on Session '${sessionIdRun}' (Should Proceed) ` +
"=".repeat(20));
const events1 = runner.run({
userId: userId,
sessionId: sessionIdRun,
newMessage: {
role: "user",
parts: [{ text: "Hello, please respond." }]
}
});
for await (const event of events1) {
// Print final output (either from LLM or callback override)
if (event.isFinalResponse() && event.content && event.content.parts) {
console.log(`Final Output: [${event.author}] ${event.content.parts[0].text?.trim()}`);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
// --- Scenario 2: Run where callback intercepts and skips agent ---
console.log("\n" + "=".repeat(20) +
` SCENARIO 2: Running Agent on Session '${sessionIdSkip}' (Should Skip) ` +
"=".repeat(20));
const events2 = runner.run({
userId: userId,
sessionId: sessionIdSkip,
newMessage: {
role: "user",
parts: [{ text: "This message won't reach the LLM." }]
}
});
for await (const event of events2) {
// Print final output (either from LLM or callback override)
if (event.isFinalResponse() && event.content && event.content.parts) {
console.log(`Final Output: [${event.author}] ${event.content.parts[0].text?.trim()}`);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
} catch (error) {
console.error(`Error in main function: ${error}`);
}
}
// --- 4. Execute ---
// In TypeScript/JS we can call async functions at the top level
main().catch(error => {
console.error(`Unhandled error in main: ${error}`);
});
// Export the agent and main function for external use
export const agent = llmAgentWithBeforeCallback;
export { main as runBeforeAgentCallbackDemo };
Note on the before_agent_callback
Example:
- What it Shows: This example demonstrates the
before_agent_callback
. This callback runs right before the agent's main processing logic starts for a given request. - How it Works: The callback function (
checkIfAgentShouldRun
) looks at a flag (skip_llm_agent
) in the session's state.- If the flag is
true
, the callback returns aContent
object. This tells the ADK framework to skip the agent's main execution entirely and use the callback's returned content as the final response. - If the flag is
false
(or not set), the callback returnsnull
. This tells the ADK framework to proceed with the agent's normal execution (calling the LLM in this case).
- If the flag is
- Expected Outcome: You'll see two scenarios:
- In the session with the
skip_llm_agent: true
state, the agent's LLM call is bypassed, and the output comes directly from the callback ("Agent... skipped..."). - In the session without that state flag, the callback allows the agent to run, and you see the actual response from the LLM (e.g., "Hello!").
- In the session with the
- Understanding Callbacks: This highlights how
before_
callbacks act as gatekeepers, allowing you to intercept execution before a major step and potentially prevent it based on checks (like state, input validation, permissions).
After Agent Callback¶
When: Called immediately after the agent's _run_async_impl
(or _run_live_impl
) method successfully completes. It does not run if the agent was skipped due to before_agent_callback
returning content or if end_invocation
was set during the agent's run.
Purpose: Useful for cleanup tasks, post-execution validation, logging the completion of an agent's activity, modifying final state, or augmenting/replacing the agent's final output.
Code
import {
LlmAgent,
CallbackContext,
} from 'adk-typescript/agents';
import {
Content,
LlmRegistry,
} from 'adk-typescript/models';
import { runners } from 'adk-typescript';
// Define the model - Use the specific model name requested
const GEMINI_2_FLASH = "gemini-2.0-flash";
// --- 1. Define the Callback Function ---
function modifyOutputAfterAgent(callbackContext: CallbackContext): Content | null {
/**
* Logs exit from an agent and checks 'add_concluding_note' in session state.
* If True, returns new Content to *replace* the agent's original output.
* If False or not present, returns null, allowing the agent's original output to be used.
*/
const agentName = callbackContext.agentName;
const invocationId = callbackContext.invocationId;
const currentState = callbackContext.state;
console.log(`\n[Callback] Exiting agent: ${agentName} (Inv: ${invocationId})`);
console.log(`[Callback] Current State: ${JSON.stringify(currentState)}`);
// Example: Check state to decide whether to modify the final output
if (currentState.add_concluding_note === true) {
console.log(`[Callback] State condition 'add_concluding_note=true' met: Replacing agent ${agentName}'s output.`);
// Return Content to *replace* the agent's own output
return {
parts: [{ text: `Concluding note added by after_agent_callback, replacing original output.` }],
role: "model" // Assign model role to the overriding response
};
} else {
console.log(`[Callback] State condition not met: Using agent ${agentName}'s original output.`);
// Return null - the agent's output produced just before this callback will be used.
return null;
}
}
// --- 2. Setup Agent with Callback ---
// Create model instance (using LlmRegistry)
const model = LlmRegistry.newLlm(GEMINI_2_FLASH);
const llmAgentWithAfterCallback = new LlmAgent({
name: "MySimpleAgentWithAfter",
model: model,
instruction: "You are a simple agent. Just say 'Processing complete!'",
description: "An LLM agent demonstrating after_agent_callback for output modification",
afterAgentCallback: modifyOutputAfterAgent // Assign the callback here
});
// --- 3. Setup Runner and Sessions using InMemoryRunner ---
async function main(): Promise<void> {
const appName = "after_agent_demo";
const userId = "test_user_after";
const sessionIdNormal = "session_run_normally";
const sessionIdModify = "session_modify_output";
// Use InMemoryRunner - it includes InMemorySessionService
const runner = new runners.InMemoryRunner(llmAgentWithAfterCallback, appName);
// Get the bundled session service
const sessionService = runner.sessionService;
// Create session 1: Agent output will be used as is (default empty state)
sessionService.createSession({
appName: appName,
userId: userId,
sessionId: sessionIdNormal
// No initial state means 'add_concluding_note' will be false in the callback check
});
// Create session 2: Agent output will be replaced by the callback
sessionService.createSession({
appName: appName,
userId: userId,
sessionId: sessionIdModify,
state: { add_concluding_note: true } // Set the state flag here
});
try {
// --- Scenario 1: Run where callback allows agent's original output ---
console.log("\n" + "=".repeat(20) +
` SCENARIO 1: Running Agent on Session '${sessionIdNormal}' (Should Use Original Output) ` +
"=".repeat(20));
const events1 = runner.run({
userId: userId,
sessionId: sessionIdNormal,
newMessage: {
role: "user",
parts: [{ text: "Process this please." }]
}
});
for await (const event of events1) {
// Print final output (either from LLM or callback override)
if (event.isFinalResponse() && event.content && event.content.parts) {
console.log(`Final Output: [${event.author}] ${event.content.parts[0].text?.trim()}`);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
// --- Scenario 2: Run where callback replaces the agent's output ---
console.log("\n" + "=".repeat(20) +
` SCENARIO 2: Running Agent on Session '${sessionIdModify}' (Should Replace Output) ` +
"=".repeat(20));
const events2 = runner.run({
userId: userId,
sessionId: sessionIdModify,
newMessage: {
role: "user",
parts: [{ text: "Process this and add note." }]
}
});
for await (const event of events2) {
// Print final output (either from LLM or callback override)
if (event.isFinalResponse() && event.content && event.content.parts) {
console.log(`Final Output: [${event.author}] ${event.content.parts[0].text?.trim()}`);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
} catch (error) {
console.error(`Error in main function: ${error}`);
}
}
// --- 4. Execute ---
// In TypeScript/JS we can call async functions at the top level
main().catch(error => {
console.error(`Unhandled error in main: ${error}`);
});
// Export the agent and main function for external use
export const agent = llmAgentWithAfterCallback;
export { main as runAfterAgentCallbackDemo };
Note on the afterAgentCallback
Example:
- What it Shows: This example demonstrates the
afterAgentCallback
. This callback runs right after the agent's main processing logic has finished and produced its result, but before that result is finalized and returned. - How it Works: The callback function (
modifyOutputAfterAgent
) checks a flag (add_concluding_note
) in the session's state.- If the flag is
true
, the callback returns a newContent
object. This tells the ADK framework to replace the agent's original output with the content returned by the callback. - If the flag is
false
(or not set), the callback returnsnull
. This tells the ADK framework to use the original output generated by the agent.
- If the flag is
- Expected Outcome: You'll see two scenarios:
- In the session without the
add_concluding_note: true
state, the callback allows the agent's original output ("Processing complete!") to be used. - In the session with that state flag, the callback intercepts the agent's original output and replaces it with its own message ("Concluding note added...").
- In the session without the
- Understanding Callbacks: This highlights how
after_
callbacks allow post-processing or modification. You can inspect the result of a step (the agent's run) and decide whether to let it pass through, change it, or completely replace it based on your logic.
LLM Interaction Callbacks¶
These callbacks are specific to LlmAgent
and provide hooks around the interaction with the Large Language Model.
Before Model Callback¶
When: Called just before the generate_content_async
(or equivalent) request is sent to the LLM within an LlmAgent
's flow.
Purpose: Allows inspection and modification of the request going to the LLM. Use cases include adding dynamic instructions, injecting few-shot examples based on state, modifying model config, implementing guardrails (like profanity filters), or implementing request-level caching.
Return Value Effect:
If the callback returns None
, the LLM continues its normal workflow. If the callback returns an LlmResponse
object, then the call to the LLM is skipped. The returned LlmResponse
is used directly as if it came from the model. This is powerful for implementing guardrails or caching.
Code
import {
LlmAgent,
CallbackContext,
} from 'adk-typescript/agents';
import {
LlmRequest,
LlmResponse,
Content,
LlmRegistry,
} from 'adk-typescript/models';
import { runners } from 'adk-typescript';
import { InMemorySessionService } from 'adk-typescript/sessions';
// Define the model
const GEMINI_2_FLASH = "gemini-2.0-flash";
// --- Define the Callback Function ---
function simpleBeforeModelModifier(
callbackContext: CallbackContext,
llmRequest: LlmRequest
): LlmResponse | undefined {
/**
* Inspects/modifies the LLM request or skips the call.
*/
const agentName = callbackContext.agentName;
console.log(`[Callback] Before model call for agent: ${agentName}`);
// Inspect the last user message in the request contents
let lastUserMessage = "";
if (llmRequest.contents && llmRequest.contents.length > 0) {
const lastContent = llmRequest.contents[llmRequest.contents.length - 1];
if (lastContent.role === 'user' && lastContent.parts && lastContent.parts.length > 0) {
lastUserMessage = lastContent.parts[0].text || "";
}
}
console.log(`[Callback] Inspecting last user message: '${lastUserMessage}'`);
// --- Modification Example ---
// Add a prefix to the system instruction
if (llmRequest.config.systemInstruction) {
const originalInstruction = llmRequest.config.systemInstruction as string;
const prefix = "[Modified by Callback] ";
const modifiedText = prefix + originalInstruction;
llmRequest.config.systemInstruction = modifiedText;
console.log(`[Callback] Modified system instruction to: '${modifiedText}'`);
}
// --- Skip Example ---
// Check if the last user message contains "BLOCK"
if (lastUserMessage.toUpperCase().includes("BLOCK")) {
console.log("[Callback] 'BLOCK' keyword found. Skipping LLM call.");
// Return an LlmResponse to skip the actual LLM call
return new LlmResponse({
content: {
role: "model",
parts: [{ text: "LLM call was blocked by before_model_callback." }]
}
});
} else {
console.log("[Callback] Proceeding with LLM call.");
// Return undefined to allow the (modified) request to go to the LLM
return undefined;
}
}
// Create model instance (using LlmRegistry)
const model = LlmRegistry.newLlm(GEMINI_2_FLASH);
// Create LlmAgent and Assign Callback
const myLlmAgent = new LlmAgent({
name: "ModelCallbackAgent",
model: model,
instruction: "You are a helpful assistant.", // Base instruction
description: "An LLM agent demonstrating before_model_callback",
beforeModelCallback: simpleBeforeModelModifier // Assign the function here
});
// Setup constants for the session
const APP_NAME = "guardrail_app";
const USER_ID = "user_1";
const SESSION_ID = "session_001";
// Create Session and Runner
const sessionService = new InMemorySessionService();
sessionService.createSession({
appName: APP_NAME,
userId: USER_ID,
sessionId: SESSION_ID
});
const runner = new runners.Runner({
agent: myLlmAgent,
appName: APP_NAME,
sessionService: sessionService
});
// Agent Interaction function
async function callAgent(query: string): Promise<void> {
// Create content for the request
const content: Content = {
role: 'user',
parts: [{ text: query }]
};
// Run the agent and collect results
try {
const events = runner.run({
userId: USER_ID,
sessionId: SESSION_ID,
newMessage: content
});
for await (const event of events) {
if (event.isFinalResponse() && event.content && event.content.parts && event.content.parts[0].text) {
const finalResponse = event.content.parts[0].text;
console.log("Agent Response: ", finalResponse);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
} catch (error) {
console.error("Error running agent:", error);
}
}
// Execute with a sample query
callAgent("Tell me about the weather today.");
// Try with a blocking keyword
setTimeout(() => {
console.log("\nTrying with blocking keyword:");
callAgent("BLOCK this request please");
}, 2000);
// Export for external use
export const agent = myLlmAgent;
export async function runBeforeModelCallbackDemo(query: string): Promise<void> {
await callAgent(query);
}
After Model Callback¶
When: Called just after a response (LlmResponse
) is received from the LLM, before it's processed further by the invoking agent.
Purpose: Allows inspection or modification of the raw LLM response. Use cases include
- logging model outputs,
- reformatting responses,
- censoring sensitive information generated by the model,
- parsing structured data from the LLM response and storing it in
callback_context.state
- or handling specific error codes.
Code
import {
LlmAgent,
CallbackContext,
} from 'adk-typescript/agents';
import {
LlmResponse,
Content,
LlmRegistry,
} from 'adk-typescript/models';
import { runners } from 'adk-typescript';
import { InMemorySessionService } from 'adk-typescript/sessions';
// Define the model
const GEMINI_2_FLASH = "gemini-2.0-flash";
// --- Define the Callback Function ---
function simpleAfterModelModifier(
callbackContext: CallbackContext,
llmResponse: LlmResponse
): LlmResponse | undefined {
/**
* Inspects/modifies the LLM response after it's received.
*/
const agentName = callbackContext.agentName;
console.log(`[Callback] After model call for agent: ${agentName}`);
// --- Inspection ---
let originalText = "";
if (llmResponse.content && llmResponse.content.parts && llmResponse.content.parts.length > 0) {
// Check if it's a text response
const firstPart = llmResponse.content.parts[0];
if (firstPart.text) {
originalText = firstPart.text;
console.log(`[Callback] Inspected original response text: '${originalText.substring(0, 100)}...'`); // Log snippet
} else if (firstPart.functionCall) {
console.log(`[Callback] Inspected response: Contains function call '${firstPart.functionCall.name}'. No text modification.`);
return undefined; // Don't modify tool calls in this example
} else {
console.log("[Callback] Inspected response: No text content found.");
return undefined;
}
} else if (llmResponse.errorMessage) {
console.log(`[Callback] Inspected response: Contains error '${llmResponse.errorMessage}'. No modification.`);
return undefined;
} else {
console.log("[Callback] Inspected response: Empty LlmResponse.");
return undefined; // Nothing to modify
}
// --- Modification Example ---
// Replace "joke" with "funny story" (case-insensitive)
const searchTerm = "joke";
const replaceTerm = "funny story";
if (originalText.toLowerCase().includes(searchTerm)) {
console.log(`[Callback] Found '${searchTerm}'. Modifying response.`);
// Perform the replacements with case sensitivity in mind
let modifiedText = originalText.replace(
new RegExp(searchTerm, 'g'),
replaceTerm
);
modifiedText = modifiedText.replace(
new RegExp(searchTerm.charAt(0).toUpperCase() + searchTerm.slice(1), 'g'),
replaceTerm.charAt(0).toUpperCase() + replaceTerm.slice(1)
);
// Create a new LlmResponse with the modified content
// Clone the structure to avoid modifying original if other callbacks exist
const newResponse = new LlmResponse({
content: {
role: "model",
parts: [{ text: modifiedText }]
},
// Copy other relevant fields if necessary
groundingMetadata: llmResponse.groundingMetadata
});
console.log(`[Callback] Returning modified response.`);
return newResponse; // Return the modified response
} else {
console.log(`[Callback] '${searchTerm}' not found. Passing original response through.`);
// Return undefined to use the original llm_response
return undefined;
}
}
// Create model instance (using LlmRegistry)
const model = LlmRegistry.newLlm(GEMINI_2_FLASH);
// Create LlmAgent and Assign Callback
const myLlmAgent = new LlmAgent({
name: "AfterModelCallbackAgent",
model: model,
instruction: "You are a helpful assistant.",
description: "An LLM agent demonstrating after_model_callback",
afterModelCallback: simpleAfterModelModifier // Assign the function here
});
// Setup constants for the session
const APP_NAME = "guardrail_app";
const USER_ID = "user_1";
const SESSION_ID = "session_001";
// Create Session and Runner
const sessionService = new InMemorySessionService();
sessionService.createSession({
appName: APP_NAME,
userId: USER_ID,
sessionId: SESSION_ID
});
const runner = new runners.Runner({
agent: myLlmAgent,
appName: APP_NAME,
sessionService: sessionService
});
// Agent Interaction function
async function callAgent(query: string): Promise<void> {
// Create content for the request
const content: Content = {
role: 'user',
parts: [{ text: query }]
};
// Run the agent and collect results
try {
const events = runner.run({
userId: USER_ID,
sessionId: SESSION_ID,
newMessage: content
});
for await (const event of events) {
if (event.isFinalResponse() && event.content && event.content.parts && event.content.parts[0].text) {
const finalResponse = event.content.parts[0].text;
console.log("Agent Response: ", finalResponse);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
} catch (error) {
console.error("Error running agent:", error);
}
}
// Regular query (may not contain the word 'joke')
callAgent("Tell me about the weather today.");
// Query specifically asking for a joke to trigger the replacement
setTimeout(() => {
console.log("\nTrying with a query that should trigger word replacement:");
callAgent("Tell me a joke about programming.");
}, 2000);
// Export for external use
export const agent = myLlmAgent;
export async function runAfterModelCallbackDemo(query: string): Promise<void> {
await callAgent(query);
}
Tool Execution Callbacks¶
These callbacks are also specific to LlmAgent
and trigger around the execution of tools (including FunctionTool
, AgentTool
, etc.) that the LLM might request.
Before Tool Callback¶
When: Called just before a specific tool's run_async
method is invoked, after the LLM has generated a function call for it.
Purpose: Allows inspection and modification of tool arguments, performing authorization checks before execution, logging tool usage attempts, or implementing tool-level caching.
Return Value Effect:
- If the callback returns
None
, the tool'srun_async
method is executed with the (potentially modified)args
. - If a dictionary is returned, the tool's
run_async
method is skipped. The returned dictionary is used directly as the result of the tool call. This is useful for caching or overriding tool behavior.
Code
import {
LlmAgent,
} from 'adk-typescript/agents';
import {
Content,
LlmRegistry,
} from 'adk-typescript/models';
import {
FunctionTool,
ToolContext,
BaseTool,
} from 'adk-typescript/tools';
import { runners } from 'adk-typescript';
import { InMemorySessionService } from 'adk-typescript/sessions';
// Define the model
const GEMINI_2_FLASH = "gemini-2.0-flash";
// Create a function that will be converted to a tool
function getCapitalCity(params: Record<string, any>): string {
/**
* Retrieves the capital city of a given country.
*/
const country = params.country as string;
console.log(`--- Tool 'get_capital_city' executing with country: ${country} ---`);
const countryCapitals: Record<string, string> = {
"united states": "Washington, D.C.",
"canada": "Ottawa",
"france": "Paris",
"germany": "Berlin",
};
return countryCapitals[country.toLowerCase()] || `Capital not found for ${country}`;
}
// Create the function tool
const capitalTool = new FunctionTool(getCapitalCity);
// Define the before tool callback
function simpleBeforeToolModifier(
tool: BaseTool,
args: Record<string, any>,
toolContext: ToolContext
): Record<string, any> | undefined {
/**
* Inspects/modifies tool args or skips the tool call.
*/
const agentName = toolContext.agentName;
const toolName = tool.name;
console.log(`[Callback] Before tool call for tool '${toolName}' in agent '${agentName}'`);
console.log(`[Callback] Original args: ${JSON.stringify(args)}`);
if (toolName === 'get_capital_city' && args.country?.toLowerCase() === 'canada') {
console.log("[Callback] Detected 'Canada'. Modifying args to 'France'.");
args.country = 'France';
console.log(`[Callback] Modified args: ${JSON.stringify(args)}`);
return undefined;
}
// If the tool is 'get_capital_city' and country is 'BLOCK'
if (toolName === 'get_capital_city' && args.country?.toUpperCase() === 'BLOCK') {
console.log("[Callback] Detected 'BLOCK'. Skipping tool execution.");
return { result: "Tool execution was blocked by before_tool_callback." };
}
console.log("[Callback] Proceeding with original or previously modified args.");
return undefined;
}
// Create model instance (using LlmRegistry)
const model = LlmRegistry.newLlm(GEMINI_2_FLASH);
// Create the LLM agent with tool and callback
const myLlmAgent = new LlmAgent({
name: "ToolCallbackAgent",
model: model,
instruction: "You are an agent that can find capital cities. Use the get_capital_city tool.",
description: "An LLM agent demonstrating before_tool_callback",
tools: [capitalTool],
beforeToolCallback: simpleBeforeToolModifier
});
// Setup constants for the session
const APP_NAME = "guardrail_app";
const USER_ID = "user_1";
const SESSION_ID = "session_001";
// Create Session and Runner
const sessionService = new InMemorySessionService();
sessionService.createSession({
appName: APP_NAME,
userId: USER_ID,
sessionId: SESSION_ID
});
const runner = new runners.Runner({
agent: myLlmAgent,
appName: APP_NAME,
sessionService: sessionService
});
// Agent Interaction function
async function callAgent(query: string): Promise<void> {
// Create content for the request
const content: Content = {
role: 'user',
parts: [{ text: query }]
};
// Run the agent and collect results
try {
const events = runner.run({
userId: USER_ID,
sessionId: SESSION_ID,
newMessage: content
});
for await (const event of events) {
if (event.isFinalResponse() && event.content && event.content.parts && event.content.parts[0].text) {
const finalResponse = event.content.parts[0].text;
console.log("Agent Response: ", finalResponse);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
} catch (error) {
console.error("Error running agent:", error);
}
}
// Execute with a sample query
callAgent("What is the capital of Canada?");
// Export for external use
export const agent = myLlmAgent;
export async function runBeforeToolCallbackDemo(query: string): Promise<void> {
await callAgent(query);
}
After Tool Callback¶
When: Called just after the tool's run_async
method completes successfully.
Purpose: Allows inspection and modification of the tool's result before it's sent back to the LLM (potentially after summarization). Useful for logging tool results, post-processing or formatting results, or saving specific parts of the result to the session state.
Return Value Effect:
- If the callback returns
None
, the originaltool_response
is used. - If a new dictionary is returned, it replaces the original
tool_response
. This allows modifying or filtering the result seen by the LLM.
Code
import {
LlmAgent,
} from 'adk-typescript/agents';
import {
Content,
LlmRegistry,
} from 'adk-typescript/models';
import {
FunctionTool,
ToolContext,
BaseTool,
} from 'adk-typescript/tools';
import { runners } from 'adk-typescript';
import { InMemorySessionService } from 'adk-typescript/sessions';
// Define the model
const GEMINI_2_FLASH = "gemini-2.0-flash";
// --- Define a Simple Tool Function ---
function getCapitalCity(params: Record<string, any>): Record<string, string> {
/**
* Retrieves the capital city of a given country.
*/
const country = params.country as string;
console.log(`--- Tool 'get_capital_city' executing with country: ${country} ---`);
const countryCapitals: Record<string, string> = {
"united states": "Washington, D.C.",
"canada": "Ottawa",
"france": "Paris",
"germany": "Berlin",
};
return { result: countryCapitals[country.toLowerCase()] || `Capital not found for ${country}` };
}
// --- Wrap the function into a Tool ---
const capitalTool = new FunctionTool(getCapitalCity);
// --- Define the Callback Function ---
function simpleAfterToolModifier(
tool: BaseTool,
args: Record<string, any>,
toolContext: ToolContext,
toolResponse: Record<string, any>
): Record<string, any> | undefined {
/**
* Inspects/modifies the tool result after execution.
*/
const agentName = toolContext.agentName;
const toolName = tool.name;
console.log(`[Callback] After tool call for tool '${toolName}' in agent '${agentName}'`);
console.log(`[Callback] Args used: ${JSON.stringify(args)}`);
console.log(`[Callback] Original tool_response: ${JSON.stringify(toolResponse)}`);
// Default structure for function tool results is {"result": <return_value>}
const originalResultValue = toolResponse.result || "";
// --- Modification Example ---
// If the tool was 'get_capital_city' and result is 'Washington, D.C.'
if (toolName === 'get_capital_city' && originalResultValue === "Washington, D.C.") {
console.log("[Callback] Detected 'Washington, D.C.'. Modifying tool response.");
// IMPORTANT: Create a new object or clone the existing one
const modifiedResponse = { ...toolResponse };
modifiedResponse.result = `${originalResultValue} (Note: This is the capital of the USA).`;
modifiedResponse.note_added_by_callback = true; // Add extra info if needed
console.log(`[Callback] Modified tool_response: ${JSON.stringify(modifiedResponse)}`);
return modifiedResponse; // Return the modified dictionary
}
console.log("[Callback] Passing original tool response through.");
// Return undefined to use the original tool_response
return undefined;
}
// Create model instance (using LlmRegistry)
const model = LlmRegistry.newLlm(GEMINI_2_FLASH);
// Create LlmAgent and Assign Callback
const myLlmAgent = new LlmAgent({
name: "AfterToolCallbackAgent",
model: model,
instruction: "You are an agent that finds capital cities using the get_capital_city tool. Report the result clearly.",
description: "An LLM agent demonstrating after_tool_callback",
tools: [capitalTool], // Add the tool
afterToolCallback: simpleAfterToolModifier // Assign the callback
});
// Setup constants for the session
const APP_NAME = "guardrail_app";
const USER_ID = "user_1";
const SESSION_ID = "session_001";
// Create Session and Runner
const sessionService = new InMemorySessionService();
sessionService.createSession({
appName: APP_NAME,
userId: USER_ID,
sessionId: SESSION_ID
});
const runner = new runners.Runner({
agent: myLlmAgent,
appName: APP_NAME,
sessionService: sessionService
});
// Agent Interaction function
async function callAgent(query: string): Promise<void> {
// Create content for the request
const content: Content = {
role: 'user',
parts: [{ text: query }]
};
// Run the agent and collect results
try {
const events = runner.run({
userId: USER_ID,
sessionId: SESSION_ID,
newMessage: content
});
for await (const event of events) {
if (event.isFinalResponse() && event.content && event.content.parts && event.content.parts[0].text) {
const finalResponse = event.content.parts[0].text;
console.log("Agent Response: ", finalResponse);
} else if (event.errorCode) {
console.log(`Error Event: [${event.errorCode}] ${event.errorMessage}`);
}
}
} catch (error) {
console.error("Error running agent:", error);
}
}
// Execute with a sample query
callAgent("What is the capital of United States?");
setTimeout(() => {
console.log("\nTrying with a different country:");
callAgent("What is the capital of France?");
}, 2000);
// Export for external use
export const agent = myLlmAgent;
export async function runAfterToolCallbackDemo(query: string): Promise<void> {
await callAgent(query);
}