How to stream events from within a tool (without LangChain LLMs / tools)¶
Prerequisites
This guide assumes familiarity with the following:
In this guide, we will demonstrate how to stream tokens from tools used by a custom ReAct agent, without relying on LangChain’s chat models or tool-calling functionalities.
We will use the OpenAI client library directly for the chat model interaction. The tool execution will be implemented from scratch.
This showcases how LangGraph can be utilized independently of built-in LangChain components like chat models or tools.
Setup¶
First, let's install the required packages and set our API keys
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")
Set up LangSmith for LangGraph development
Sign up for LangSmith to quickly spot issues and improve the performance of your LangGraph projects. LangSmith lets you use trace data to debug, test, and monitor your LLM apps built with LangGraph — read more about how to get started here.
Define the graph¶
Define a node that will call OpenAI API¶
from openai import AsyncOpenAI
from langchain_core.language_models.chat_models import ChatGenerationChunk
from langchain_core.messages import AIMessageChunk
from langchain_core.runnables.config import (
ensure_config,
get_callback_manager_for_config,
)
openai_client = AsyncOpenAI()
# define tool schema for openai tool calling
tool = {
"type": "function",
"function": {
"name": "get_items",
"description": "Use this tool to look up which items are in the given place.",
"parameters": {
"type": "object",
"properties": {"place": {"type": "string"}},
"required": ["place"],
},
},
}
async def call_model(state, config=None):
config = ensure_config(config | {"tags": ["agent_llm"]})
callback_manager = get_callback_manager_for_config(config)
messages = state["messages"]
llm_run_manager = callback_manager.on_chat_model_start({}, [messages])[0]
response = await openai_client.chat.completions.create(
messages=messages, model="gpt-3.5-turbo", tools=[tool], stream=True
)
response_content = ""
role = None
tool_call_id = None
tool_call_function_name = None
tool_call_function_arguments = ""
async for chunk in response:
delta = chunk.choices[0].delta
if delta.role is not None:
role = delta.role
if delta.content:
response_content += delta.content
llm_run_manager.on_llm_new_token(delta.content)
if delta.tool_calls:
# note: for simplicity we're only handling a single tool call here
if delta.tool_calls[0].function.name is not None:
tool_call_function_name = delta.tool_calls[0].function.name
tool_call_id = delta.tool_calls[0].id
# note: we're wrapping the tools calls in ChatGenerationChunk so that the events from .astream_events in the graph can render tool calls correctly
tool_call_chunk = ChatGenerationChunk(
message=AIMessageChunk(
content="",
additional_kwargs={"tool_calls": [delta.tool_calls[0].dict()]},
)
)
llm_run_manager.on_llm_new_token("", chunk=tool_call_chunk)
tool_call_function_arguments += delta.tool_calls[0].function.arguments
if tool_call_function_name is not None:
tool_calls = [
{
"id": tool_call_id,
"function": {
"name": tool_call_function_name,
"arguments": tool_call_function_arguments,
},
"type": "function",
}
]
else:
tool_calls = None
response_message = {
"role": role,
"content": response_content,
"tool_calls": tool_calls,
}
return {"messages": [response_message]}
Define our tools and a tool-calling node¶
import json
from langchain_core.callbacks import adispatch_custom_event
async def get_items(place: str) -> str:
"""Use this tool to look up which items are in the given place."""
# this can be replaced with any actual streaming logic that you might have
def stream(place: str):
if "bed" in place: # For under the bed
yield from ["socks", "shoes", "dust bunnies"]
elif "shelf" in place: # For 'shelf'
yield from ["books", "penciles", "pictures"]
else: # if the agent decides to ask about a different place
yield "cat snacks"
tokens = []
for token in stream(place):
await adispatch_custom_event(
# this will allow you to filter events by name
"tool_call_token_stream",
{
"function_name": "get_items",
"arguments": {"place": place},
"tool_output_token": token,
},
# this will allow you to filter events by tags
config={"tags": ["tool_call"]},
)
tokens.append(token)
return ", ".join(tokens)
# define mapping to look up functions when running tools
function_name_to_function = {"get_items": get_items}
async def call_tools(state):
messages = state["messages"]
tool_call = messages[-1]["tool_calls"][0]
function_name = tool_call["function"]["name"]
function_arguments = tool_call["function"]["arguments"]
arguments = json.loads(function_arguments)
function_response = await function_name_to_function[function_name](**arguments)
tool_message = {
"tool_call_id": tool_call["id"],
"role": "tool",
"name": function_name,
"content": function_response,
}
return {"messages": [tool_message]}
Define our graph¶
import operator
from typing import Annotated, Literal
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, END, START
class State(TypedDict):
messages: Annotated[list, operator.add]
def should_continue(state) -> Literal["tools", END]:
messages = state["messages"]
last_message = messages[-1]
if last_message["tool_calls"]:
return "tools"
return END
workflow = StateGraph(State)
workflow.add_edge(START, "model")
workflow.add_node("model", call_model) # i.e. our "agent"
workflow.add_node("tools", call_tools)
workflow.add_conditional_edges("model", should_continue)
workflow.add_edge("tools", "model")
graph = workflow.compile()
Stream tokens from within the tool¶
Here, we'll use the astream_events
API to stream back individual events. Please see astream_events for more details.
async for event in graph.astream_events(
{"messages": [{"role": "user", "content": "what's in the bedroom"}]}, version="v2"
):
tags = event.get("tags", [])
if event["event"] == "on_custom_event" and "tool_call" in tags:
print("Tool token", event["data"]["tool_output_token"])