1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
| import os import operator from typing import Literal from dotenv import load_dotenv
from langchain.tools import tool from langchain.chat_models import init_chat_model from langchain.messages import AnyMessage, SystemMessage, ToolMessage, HumanMessage from typing_extensions import TypedDict, Annotated from langgraph.graph import StateGraph, START, END
load_dotenv()
BASE_URL = os.getenv("OPENAI_BASE_URL") API_KEY = os.getenv("OPENAI_API_KEY") MODEL_NAME = os.getenv("OPENAI_MODEL")
if not BASE_URL: raise ValueError("OPENAI_BASE_URL 未在 .env 文件中配置") if not API_KEY: raise ValueError("OPENAI_API_KEY 未在 .env 文件中配置") if not MODEL_NAME: raise ValueError("OPENAI_MODEL 未在 .env 文件中配置")
model = init_chat_model( model=MODEL_NAME, model_provider="openai", temperature=0.7, base_url=BASE_URL, api_key=API_KEY, )
@tool def multiply(a: int, b: int) -> int: """Multiply `a` and `b`.
Args: a: First int b: Second int """ return a * b
@tool def add(a: int, b: int) -> int: """Adds `a` and `b`.
Args: a: First int b: Second int """ return a + b
@tool def divide(a: int, b: int) -> float: """Divide `a` and `b`.
Args: a: First int b: Second int """ return a / b
tools = [add, multiply, divide] tools_by_name = {tool.name: tool for tool in tools} model_with_tools = model.bind_tools(tools)
class MessagesState(TypedDict): messages: Annotated[list[AnyMessage], operator.add] llm_calls: int
def llm_call(state: dict): """LLM decides whether to call a tool or not""" return { "messages": [ model_with_tools.invoke( [ SystemMessage( content="You are a helpful assistant tasked with performing arithmetic on a set of inputs." ) ] + state["messages"] ) ], "llm_calls": state.get('llm_calls', 0) + 1 }
def tool_node(state: dict): """Performs the tool call""" result = [] for tool_call in state["messages"][-1].tool_calls: tool = tools_by_name[tool_call["name"]] observation = tool.invoke(tool_call["args"]) result.append(ToolMessage(content=str(observation), tool_call_id=tool_call["id"])) return {"messages": result}
def should_continue(state: MessagesState) -> Literal["tool_node", END]: """Decide if we should continue the loop or stop based upon whether the LLM made a tool call""" messages = state["messages"] last_message = messages[-1]
if last_message.tool_calls: return "tool_node"
return END
agent_builder = StateGraph(MessagesState)
agent_builder.add_node("llm_call", llm_call) agent_builder.add_node("tool_node", tool_node)
agent_builder.add_edge(START, "llm_call") agent_builder.add_conditional_edges( "llm_call", should_continue, ["tool_node", END] ) agent_builder.add_edge("tool_node", "llm_call")
agent = agent_builder.compile()
if __name__ == "__main__": print("=== LangGraph Calculator Agent Demo ===\n") print("Test 1: Add 3 and 4") messages = [HumanMessage(content="Add 3 and 4.")] result = agent.invoke({"messages": messages}) print(f"LLM calls: {result.get('llm_calls', 0)}") for m in result["messages"]: print(f"- {m.__class__.__name__}: {m.content}") print("\n") print("Test 2: Multiply 5 and 6") messages = [HumanMessage(content="Multiply 5 and 6.")] result = agent.invoke({"messages": messages}) print(f"LLM calls: {result.get('llm_calls', 0)}") for m in result["messages"]: print(f"- {m.__class__.__name__}: {m.content}") print("\n") print("Test 3: Divide 20 by 4") messages = [HumanMessage(content="Divide 20 by 4.")] result = agent.invoke({"messages": messages}) print(f"LLM calls: {result.get('llm_calls', 0)}") for m in result["messages"]: print(f"- {m.__class__.__name__}: {m.content}") print("\n") print("Test 4: Add 10 and 5, then multiply the result by 3") messages = [HumanMessage(content="Add 10 and 5, then multiply the result by 3.")] result = agent.invoke({"messages": messages}) print(f"LLM calls: {result.get('llm_calls', 0)}") for m in result["messages"]: print(f"- {m.__class__.__name__}: {m.content}")
|