Skip to content

Commit 546960d

Browse files
authored
Create langgraph_ai_agent.py
1 parent 8dda195 commit 546960d

File tree

1 file changed

+87
-0
lines changed

1 file changed

+87
-0
lines changed
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
from typing import Annotated, Literal, TypedDict
2+
3+
from langchain_core.messages import HumanMessage
4+
from langchain_anthropic import ChatAnthropic
5+
from langchain_core.tools import tool
6+
from langgraph.checkpoint.memory import MemorySaver
7+
from langgraph.graph import END, START, StateGraph, MessagesState
8+
from langgraph.prebuilt import ToolNode
9+
10+
11+
# Define the tools for the agent to use
12+
@tool
13+
def search(query: str):
14+
"""Call to surf the web."""
15+
# This is a placeholder, but don't tell the LLM that...
16+
if "sf" in query.lower() or "san francisco" in query.lower():
17+
return "It's 60 degrees and foggy."
18+
return "It's 90 degrees and sunny."
19+
20+
21+
tools = [search]
22+
23+
tool_node = ToolNode(tools)
24+
25+
model = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0).bind_tools(
26+
tools
27+
)
28+
29+
30+
# Define the function that determines whether to continue or not
31+
def should_continue(state: MessagesState) -> Literal["tools", END]:
32+
messages = state["messages"]
33+
last_message = messages[-1]
34+
# If the LLM makes a tool call, then we route to the "tools" node
35+
if last_message.tool_calls:
36+
return "tools"
37+
# Otherwise, we stop (reply to the user)
38+
return END
39+
40+
41+
# Define the function that calls the model
42+
def call_model(state: MessagesState):
43+
messages = state["messages"]
44+
response = model.invoke(messages)
45+
# We return a list, because this will get added to the existing list
46+
return {"messages": [response]}
47+
48+
49+
# Define a new graph
50+
workflow = StateGraph(MessagesState)
51+
52+
# Define the two nodes we will cycle between
53+
workflow.add_node("agent", call_model)
54+
workflow.add_node("tools", tool_node)
55+
56+
# Set the entrypoint as `agent`
57+
# This means that this node is the first one called
58+
workflow.add_edge(START, "agent")
59+
60+
# We now add a conditional edge
61+
workflow.add_conditional_edges(
62+
# First, we define the start node. We use `agent`.
63+
# This means these are the edges taken after the `agent` node is called.
64+
"agent",
65+
# Next, we pass in the function that will determine which node is called next.
66+
should_continue,
67+
)
68+
69+
# We now add a normal edge from `tools` to `agent`.
70+
# This means that after `tools` is called, `agent` node is called next.
71+
workflow.add_edge("tools", "agent")
72+
73+
# Initialize memory to persist state between graph runs
74+
checkpointer = MemorySaver()
75+
76+
# Finally, we compile it!
77+
# This compiles it into a LangChain Runnable,
78+
# meaning you can use it as you would any other runnable.
79+
# Note that we're (optionally) passing the memory when compiling the graph
80+
app = workflow.compile(checkpointer=checkpointer)
81+
82+
# Use the Runnable
83+
final_state = app.invoke(
84+
{"messages": [HumanMessage(content="what is the weather in sf")]},
85+
config={"configurable": {"thread_id": 42}},
86+
)
87+
final_state["messages"][-1].content

0 commit comments

Comments
 (0)