|
| 1 | +import { AIMessage, ToolMessage } from '@langchain/core/messages'; |
| 2 | +import { ToolCall } from '@langchain/core/messages/tool'; |
| 3 | +import { tool } from '@langchain/core/tools'; |
| 4 | +import { |
| 5 | + Command, |
| 6 | + END, |
| 7 | + interrupt, |
| 8 | + MemorySaver, |
| 9 | + MessagesAnnotation, |
| 10 | + START, |
| 11 | + StateGraph, |
| 12 | +} from '@langchain/langgraph'; |
| 13 | +import { ToolNode } from '@langchain/langgraph/prebuilt'; |
| 14 | +import { ChatOpenAI } from '@langchain/openai'; |
| 15 | +import { z } from 'zod'; |
| 16 | + |
| 17 | +const weatherSearch = tool( |
| 18 | + (input: { city: string }) => { |
| 19 | + console.log('----'); |
| 20 | + console.log(`Searching for: ${input.city}`); |
| 21 | + console.log('----'); |
| 22 | + return 'Sunny!'; |
| 23 | + }, |
| 24 | + { |
| 25 | + name: 'weather_search', |
| 26 | + description: 'Search for the weather', |
| 27 | + schema: z.object({ |
| 28 | + city: z.string(), |
| 29 | + }), |
| 30 | + }, |
| 31 | +); |
| 32 | + |
| 33 | +const tools = [weatherSearch]; |
| 34 | + |
| 35 | +const llm = new ChatOpenAI({ |
| 36 | + model: 'qwen/qwen3-4b-2507', |
| 37 | + // model: 'google/gemma-3-12b', |
| 38 | + configuration: { |
| 39 | + baseURL: 'http://localhost:1234/v1', |
| 40 | + apiKey: 'not-needed', |
| 41 | + }, |
| 42 | + temperature: 0, |
| 43 | +}); |
| 44 | + |
| 45 | +const model = llm.bindTools([...tools]); |
| 46 | + |
| 47 | +const callLLM = async (state: typeof MessagesAnnotation.State) => { |
| 48 | + const response = await model.invoke(state.messages); |
| 49 | + return { messages: [response] }; |
| 50 | +}; |
| 51 | + |
| 52 | +const humanReviewNode = async ( |
| 53 | + state: typeof MessagesAnnotation.State, |
| 54 | +): Promise<Command> => { |
| 55 | + const lastMessage = state.messages[state.messages.length - 1] as AIMessage; |
| 56 | + const toolCall = lastMessage.tool_calls![lastMessage.tool_calls!.length - 1]; |
| 57 | + |
| 58 | + type ReviewDisplayUI = { |
| 59 | + question: string; |
| 60 | + toolCall: ToolCall; |
| 61 | + }; |
| 62 | + type ReviewResult = { |
| 63 | + action: string; |
| 64 | + data: any; |
| 65 | + }; |
| 66 | + // 👷 waiting for feedback |
| 67 | + const humanReview = interrupt<ReviewDisplayUI, ReviewResult>({ |
| 68 | + question: 'Is this correct?', |
| 69 | + toolCall: toolCall, |
| 70 | + }); |
| 71 | + |
| 72 | + const reviewAction = humanReview.action; |
| 73 | + const reviewData = humanReview.data; |
| 74 | + |
| 75 | + if (reviewAction === 'continue') { |
| 76 | + // 🧩 resume |
| 77 | + return new Command({ goto: 'run_tool' }); |
| 78 | + } else if (reviewAction === 'update') { |
| 79 | + const updatedMessage = { |
| 80 | + role: 'ai', |
| 81 | + content: lastMessage.content, |
| 82 | + tool_calls: [ |
| 83 | + { |
| 84 | + id: toolCall.id, |
| 85 | + name: toolCall.name, |
| 86 | + args: reviewData, |
| 87 | + }, |
| 88 | + ], |
| 89 | + id: lastMessage.id, |
| 90 | + }; |
| 91 | + // 🧩 resume |
| 92 | + return new Command({ |
| 93 | + goto: 'run_tool', |
| 94 | + update: { messages: [updatedMessage] }, |
| 95 | + }); |
| 96 | + } else if (reviewAction === 'feedback') { |
| 97 | + const toolMessage = new ToolMessage({ |
| 98 | + name: toolCall.name, |
| 99 | + content: reviewData, |
| 100 | + // @ts-expect-error fix-types |
| 101 | + tool_call_id: toolCall.id, |
| 102 | + }); |
| 103 | + // 🧩 resume |
| 104 | + return new Command({ |
| 105 | + goto: 'call_llm', |
| 106 | + update: { messages: [toolMessage] }, |
| 107 | + }); |
| 108 | + } |
| 109 | + throw new Error('Invalid review action'); |
| 110 | +}; |
| 111 | + |
| 112 | +const runTool = async (state: typeof MessagesAnnotation.State) => { |
| 113 | + const newMessages: ToolMessage[] = []; |
| 114 | + const tools = { weather_search: weatherSearch }; |
| 115 | + const lastMessage = state.messages[state.messages.length - 1] as AIMessage; |
| 116 | + const toolCalls = lastMessage.tool_calls!; |
| 117 | + |
| 118 | + for (const toolCall of toolCalls) { |
| 119 | + const tool = tools[toolCall.name as keyof typeof tools]; |
| 120 | + // @ts-expect-error fix-types |
| 121 | + const result = await tool.invoke(toolCall.args); |
| 122 | + newMessages.push( |
| 123 | + new ToolMessage({ |
| 124 | + name: toolCall.name, |
| 125 | + // @ts-expect-error fix-types |
| 126 | + content: result, |
| 127 | + // @ts-expect-error fix-types |
| 128 | + tool_call_id: toolCall.id, |
| 129 | + }), |
| 130 | + ); |
| 131 | + } |
| 132 | + return { messages: newMessages }; |
| 133 | +}; |
| 134 | + |
| 135 | +const routeAfterLLM = ( |
| 136 | + state: typeof MessagesAnnotation.State, |
| 137 | +): typeof END | 'human_review_node' => { |
| 138 | + const lastMessage = state.messages[state.messages.length - 1] as AIMessage; |
| 139 | + if (!lastMessage.tool_calls?.length) { |
| 140 | + return END; |
| 141 | + } |
| 142 | + return 'human_review_node'; |
| 143 | +}; |
| 144 | + |
| 145 | +const workflow = new StateGraph(MessagesAnnotation) |
| 146 | + .addNode('call_llm', callLLM) |
| 147 | + .addNode('run_tool', runTool) |
| 148 | + .addNode('human_review_node', humanReviewNode, { |
| 149 | + ends: ['run_tool', 'call_llm'], |
| 150 | + }) |
| 151 | + .addEdge(START, 'call_llm') |
| 152 | + .addConditionalEdges('call_llm', routeAfterLLM, ['human_review_node', END]) |
| 153 | + .addEdge('run_tool', 'call_llm'); |
| 154 | + |
| 155 | +const memory = new MemorySaver(); |
| 156 | + |
| 157 | +const graph = workflow.compile({ checkpointer: memory }); |
| 158 | + |
| 159 | +// ------- |
| 160 | + |
| 161 | +// 🌰 an example when no review is required (because no tools are called) |
| 162 | + |
| 163 | +let inputs = { messages: [{ role: 'user', content: 'hi!' }] }; |
| 164 | +let config = { |
| 165 | + configurable: { thread_id: '1' }, |
| 166 | + streamMode: 'values' as const, |
| 167 | +}; |
| 168 | + |
| 169 | +let stream: any; |
| 170 | +// let stream = await graph.stream(inputs, config); |
| 171 | + |
| 172 | +// for await (const event of stream) { |
| 173 | +// const recentMsg = event.messages[event.messages.length - 1]; |
| 174 | +// console.log(`================================ ${recentMsg._getType()} Message (1) =================================`) |
| 175 | +// console.log(recentMsg.content); |
| 176 | +// } |
| 177 | + |
| 178 | +// 🌰 an example to approve a tool call |
| 179 | + |
| 180 | +inputs = { |
| 181 | + messages: [{ role: 'user', content: "what's the weather in guangzhou?" }], |
| 182 | +}; |
| 183 | +config = { configurable: { thread_id: '2' }, streamMode: 'values' as const }; |
| 184 | + |
| 185 | +stream = await graph.stream(inputs, config); |
| 186 | + |
| 187 | +for await (const event of stream) { |
| 188 | + if (event.messages) { |
| 189 | + const recentMsg = event.messages[event.messages.length - 1]; |
| 190 | + console.log( |
| 191 | + `================================ ${recentMsg._getType()} Message (1) =================================`, |
| 192 | + ); |
| 193 | + console.log(recentMsg.content); |
| 194 | + } |
| 195 | +} |
| 196 | + |
| 197 | +for await (const event of await graph.stream( |
| 198 | + // ✅ provide resume value of { action: "continue" } to navigate to run_tool node |
| 199 | + new Command({ resume: { action: 'continue' } }), |
| 200 | + config, |
| 201 | +)) { |
| 202 | + const recentMsg = event.messages[event.messages.length - 1]; |
| 203 | + console.log(`============ ${recentMsg.getType()} Message (1) ===========`); |
| 204 | + console.log(recentMsg.content); |
| 205 | +} |
0 commit comments