File size: 2,165 Bytes
2dfaba1 c547a9c 2dfaba1 d795b31 c547a9c 3ee0dc2 751892f fca0e83 d795b31 c547a9c d795b31 c547a9c d795b31 751892f d795b31 751892f d795b31 751892f d795b31 751892f 3ee0dc2 d795b31 751892f d795b31 751892f d795b31 3ee0dc2 751892f 3ee0dc2 751892f d795b31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import os
import getpass
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langchain.chat_models import init_chat_model
from langgraph.checkpoint.memory import MemorySaver
from tools import *
if not os.environ.get("GOOGLE_API_KEY"):
os.environ["GOOGLE_API_KEY"] = getpass.getpass("Enter API key for Google Gemini: ")
# Generate the chat interface, including the tools
chat = init_chat_model("gemini-2.5-flash", model_provider="google_genai")
tools = [guest_info_tool, weather_info_tool, hub_stats_tool, search_tool]
chat_with_tools = chat.bind_tools(tools)
# Generate the AgentState and Agent graph
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
def assistant(state: AgentState):
return {
"messages": [chat_with_tools.invoke(state["messages"])],
}
## The graph
builder = StateGraph(AgentState)
memory = MemorySaver()
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message requires a tool, route to tools
# Otherwise, provide a direct response
tools_condition,
)
builder.add_edge("tools", "assistant")
alfred = builder.compile(checkpointer=memory)
config = {"configurable": {"thread_id": "1"}}
# First interaction
response = alfred.invoke({"messages": [HumanMessage(content="Tell me about 'Lady Ada Lovelace'. What's her background and how is she related to me?")]}, config=config)
print("π© Alfred's Response:")
print(response['messages'][-1].content)
print()
# Second interaction (referencing the first)
response = alfred.invoke({"messages": [HumanMessage(content="What projects is she currently working on?")]}, config=config)
print("π© Alfred's Response:")
print(response['messages'][-1].content) |