Skip to content

Commit fe4bdd8

Browse files
committed
feat: langgraphjs eg - human in the loop
1 parent 57629ef commit fe4bdd8

File tree

5 files changed

+481
-74
lines changed

5 files changed

+481
-74
lines changed

README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,12 @@ npx tsx ./server/chain-groq1-starter.ts
3030
```
3131

3232
# roadmap
33+
3334
- [ ] `graph.stream` not work with local llm
3435
# notes
3536
- examples in python: https://github.com/uptonking/langchain-langgraph-play
3637

38+
- why does rag return only one relavent doc?
39+
- changing embedding model may help
3740
# license
3841
MIT
Lines changed: 165 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,165 @@
1+
import { AIMessage, ToolMessage } from '@langchain/core/messages';
2+
import { tool } from '@langchain/core/tools';
3+
import {
4+
Command,
5+
END,
6+
interrupt,
7+
MemorySaver,
8+
MessagesAnnotation,
9+
START,
10+
StateGraph,
11+
} from '@langchain/langgraph';
12+
import { ToolNode } from '@langchain/langgraph/prebuilt';
13+
import { ChatOpenAI } from '@langchain/openai';
14+
import { z } from 'zod';
15+
16+
const search = tool(
17+
() => {
18+
return "It's sunny, but you better look out.";
19+
},
20+
{
21+
name: 'search',
22+
description: 'Call to surf the web.',
23+
schema: z.string(),
24+
},
25+
);
26+
const tools = [search];
27+
28+
const model = new ChatOpenAI({
29+
model: 'qwen/qwen3-4b-2507',
30+
// model: 'google/gemma-3-12b',
31+
configuration: {
32+
baseURL: 'http://localhost:1234/v1',
33+
apiKey: 'not-needed',
34+
},
35+
temperature: 0,
36+
});
37+
38+
const askHumanTool = tool(
39+
() => {
40+
return 'The human said XYZ';
41+
},
42+
{
43+
name: 'askHuman',
44+
description: 'Ask the human for input.',
45+
schema: z.string(),
46+
},
47+
);
48+
const modelWithTools = model.bindTools([...tools, askHumanTool]);
49+
50+
// Define the function that determines whether to continue or not
51+
function shouldContinue(
52+
state: typeof MessagesAnnotation.State,
53+
): 'action' | 'askHuman' | typeof END {
54+
const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
55+
// If there is no function call, then we finish
56+
if (lastMessage && !lastMessage.tool_calls?.length) {
57+
return END;
58+
}
59+
// If tool call is askHuman, we return that node
60+
// You could also add logic here to let some system know that there's something that requires Human input
61+
// For example, send a slack message, etc
62+
if (lastMessage.tool_calls?.[0]?.name === 'askHuman') {
63+
console.log('--- ASKING HUMAN ---');
64+
return 'askHuman';
65+
}
66+
// Otherwise if it isn't, we continue with the action node
67+
return 'action';
68+
}
69+
70+
// 👾 Define the function that calls the model
71+
async function callModel(
72+
state: typeof MessagesAnnotation.State,
73+
): Promise<Partial<typeof MessagesAnnotation.State>> {
74+
const messages = state.messages;
75+
const response = await modelWithTools.invoke(messages);
76+
// We return an object with a messages property, because this will get added to the existing list
77+
return { messages: [response] };
78+
}
79+
80+
// We define a fake node to ask the human
81+
function askHuman(
82+
state: typeof MessagesAnnotation.State,
83+
): Partial<typeof MessagesAnnotation.State> {
84+
const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
85+
const toolCallId = lastMessage.tool_calls?.[0].id;
86+
// 👷 Call interrupt() inside the human_feedback node
87+
const location: string = interrupt('Please provide your location:');
88+
const newToolMessage = new ToolMessage({
89+
tool_call_id: toolCallId!,
90+
content: location,
91+
});
92+
return { messages: [newToolMessage] };
93+
}
94+
95+
const toolNode = new ToolNode<typeof MessagesAnnotation.State>(tools);
96+
97+
const workflow = new StateGraph(MessagesAnnotation)
98+
// Define the two nodes we will cycle between
99+
.addNode('agent', callModel)
100+
.addNode('action', toolNode)
101+
.addNode('askHuman', askHuman)
102+
// We now add a conditional edge
103+
.addConditionalEdges(
104+
// First, we define the start node. We use `agent`.
105+
// This means these are the edges taken after the `agent` node is called.
106+
'agent',
107+
// Next, we pass in the function that will determine which node is called next.
108+
shouldContinue,
109+
)
110+
// We now add a normal edge from `action` to `agent`.
111+
// This means that after `action` is called, `agent` node is called next.
112+
.addEdge('action', 'agent')
113+
// After we get back the human response, we go back to the agent
114+
.addEdge('askHuman', 'agent')
115+
// Set the entrypoint as `agent`
116+
// This means that this node is the first one called
117+
.addEdge(START, 'agent');
118+
119+
const messagesMemory = new MemorySaver();
120+
// const graph = workflow.compile();
121+
const messagesApp = workflow.compile({
122+
checkpointer: messagesMemory,
123+
});
124+
125+
// -------
126+
127+
// ask the user where they are, then tell them the weather.
128+
129+
const input = {
130+
role: 'user',
131+
content:
132+
'Use the search tool to ask the user where they are, then look up the weather there',
133+
};
134+
135+
const config2 = {
136+
configurable: { thread_id: '3' },
137+
streamMode: 'values' as const,
138+
};
139+
140+
for await (const event of await messagesApp.stream(
141+
{
142+
messages: [input],
143+
},
144+
config2,
145+
)) {
146+
console.log('\n---===---');
147+
console.log(event);
148+
if (event.messages?.length) {
149+
const recentMsg = event.messages[event.messages.length - 1];
150+
console.log(
151+
`================================ ${recentMsg.getType()} Message =================================`,
152+
);
153+
console.log(recentMsg.content);
154+
}
155+
}
156+
157+
// Continue the graph execution
158+
for await (const event of await messagesApp.stream(
159+
// 🧩 provide the requested value to the human_feedback node and resume execution
160+
new Command({ resume: 'guangzhou' }),
161+
config2,
162+
)) {
163+
console.log(event);
164+
console.log('\n====\n');
165+
}
Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
import { AIMessage, ToolMessage } from '@langchain/core/messages';
2+
import { ToolCall } from '@langchain/core/messages/tool';
3+
import { tool } from '@langchain/core/tools';
4+
import {
5+
Command,
6+
END,
7+
interrupt,
8+
MemorySaver,
9+
MessagesAnnotation,
10+
START,
11+
StateGraph,
12+
} from '@langchain/langgraph';
13+
import { ToolNode } from '@langchain/langgraph/prebuilt';
14+
import { ChatOpenAI } from '@langchain/openai';
15+
import { z } from 'zod';
16+
17+
const weatherSearch = tool(
18+
(input: { city: string }) => {
19+
console.log('----');
20+
console.log(`Searching for: ${input.city}`);
21+
console.log('----');
22+
return 'Sunny!';
23+
},
24+
{
25+
name: 'weather_search',
26+
description: 'Search for the weather',
27+
schema: z.object({
28+
city: z.string(),
29+
}),
30+
},
31+
);
32+
33+
const tools = [weatherSearch];
34+
35+
const llm = new ChatOpenAI({
36+
model: 'qwen/qwen3-4b-2507',
37+
// model: 'google/gemma-3-12b',
38+
configuration: {
39+
baseURL: 'http://localhost:1234/v1',
40+
apiKey: 'not-needed',
41+
},
42+
temperature: 0,
43+
});
44+
45+
const model = llm.bindTools([...tools]);
46+
47+
const callLLM = async (state: typeof MessagesAnnotation.State) => {
48+
const response = await model.invoke(state.messages);
49+
return { messages: [response] };
50+
};
51+
52+
const humanReviewNode = async (
53+
state: typeof MessagesAnnotation.State,
54+
): Promise<Command> => {
55+
const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
56+
const toolCall = lastMessage.tool_calls![lastMessage.tool_calls!.length - 1];
57+
58+
type ReviewDisplayUI = {
59+
question: string;
60+
toolCall: ToolCall;
61+
};
62+
type ReviewResult = {
63+
action: string;
64+
data: any;
65+
};
66+
// 👷 waiting for feedback
67+
const humanReview = interrupt<ReviewDisplayUI, ReviewResult>({
68+
question: 'Is this correct?',
69+
toolCall: toolCall,
70+
});
71+
72+
const reviewAction = humanReview.action;
73+
const reviewData = humanReview.data;
74+
75+
if (reviewAction === 'continue') {
76+
// 🧩 resume
77+
return new Command({ goto: 'run_tool' });
78+
} else if (reviewAction === 'update') {
79+
const updatedMessage = {
80+
role: 'ai',
81+
content: lastMessage.content,
82+
tool_calls: [
83+
{
84+
id: toolCall.id,
85+
name: toolCall.name,
86+
args: reviewData,
87+
},
88+
],
89+
id: lastMessage.id,
90+
};
91+
// 🧩 resume
92+
return new Command({
93+
goto: 'run_tool',
94+
update: { messages: [updatedMessage] },
95+
});
96+
} else if (reviewAction === 'feedback') {
97+
const toolMessage = new ToolMessage({
98+
name: toolCall.name,
99+
content: reviewData,
100+
// @ts-expect-error fix-types
101+
tool_call_id: toolCall.id,
102+
});
103+
// 🧩 resume
104+
return new Command({
105+
goto: 'call_llm',
106+
update: { messages: [toolMessage] },
107+
});
108+
}
109+
throw new Error('Invalid review action');
110+
};
111+
112+
const runTool = async (state: typeof MessagesAnnotation.State) => {
113+
const newMessages: ToolMessage[] = [];
114+
const tools = { weather_search: weatherSearch };
115+
const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
116+
const toolCalls = lastMessage.tool_calls!;
117+
118+
for (const toolCall of toolCalls) {
119+
const tool = tools[toolCall.name as keyof typeof tools];
120+
// @ts-expect-error fix-types
121+
const result = await tool.invoke(toolCall.args);
122+
newMessages.push(
123+
new ToolMessage({
124+
name: toolCall.name,
125+
// @ts-expect-error fix-types
126+
content: result,
127+
// @ts-expect-error fix-types
128+
tool_call_id: toolCall.id,
129+
}),
130+
);
131+
}
132+
return { messages: newMessages };
133+
};
134+
135+
const routeAfterLLM = (
136+
state: typeof MessagesAnnotation.State,
137+
): typeof END | 'human_review_node' => {
138+
const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
139+
if (!lastMessage.tool_calls?.length) {
140+
return END;
141+
}
142+
return 'human_review_node';
143+
};
144+
145+
const workflow = new StateGraph(MessagesAnnotation)
146+
.addNode('call_llm', callLLM)
147+
.addNode('run_tool', runTool)
148+
.addNode('human_review_node', humanReviewNode, {
149+
ends: ['run_tool', 'call_llm'],
150+
})
151+
.addEdge(START, 'call_llm')
152+
.addConditionalEdges('call_llm', routeAfterLLM, ['human_review_node', END])
153+
.addEdge('run_tool', 'call_llm');
154+
155+
const memory = new MemorySaver();
156+
157+
const graph = workflow.compile({ checkpointer: memory });
158+
159+
// -------
160+
161+
// 🌰 an example when no review is required (because no tools are called)
162+
163+
let inputs = { messages: [{ role: 'user', content: 'hi!' }] };
164+
let config = {
165+
configurable: { thread_id: '1' },
166+
streamMode: 'values' as const,
167+
};
168+
169+
let stream: any;
170+
// let stream = await graph.stream(inputs, config);
171+
172+
// for await (const event of stream) {
173+
// const recentMsg = event.messages[event.messages.length - 1];
174+
// console.log(`================================ ${recentMsg._getType()} Message (1) =================================`)
175+
// console.log(recentMsg.content);
176+
// }
177+
178+
// 🌰 an example to approve a tool call
179+
180+
inputs = {
181+
messages: [{ role: 'user', content: "what's the weather in guangzhou?" }],
182+
};
183+
config = { configurable: { thread_id: '2' }, streamMode: 'values' as const };
184+
185+
stream = await graph.stream(inputs, config);
186+
187+
for await (const event of stream) {
188+
if (event.messages) {
189+
const recentMsg = event.messages[event.messages.length - 1];
190+
console.log(
191+
`================================ ${recentMsg._getType()} Message (1) =================================`,
192+
);
193+
console.log(recentMsg.content);
194+
}
195+
}
196+
197+
for await (const event of await graph.stream(
198+
// ✅ provide resume value of { action: "continue" } to navigate to run_tool node
199+
new Command({ resume: { action: 'continue' } }),
200+
config,
201+
)) {
202+
const recentMsg = event.messages[event.messages.length - 1];
203+
console.log(`============ ${recentMsg.getType()} Message (1) ===========`);
204+
console.log(recentMsg.content);
205+
}

0 commit comments

Comments
 (0)