Skip to content

Commit c01a5a2

Browse files
authored
Feature/integrate v2 api (#332)
* Integrate new v2 api * Handle TalkToUser tool call * Create a new flag for UseV2 API * Fix user message state in v2 api * Fix terminal condition in v2 api * Fix request completion params * Fix repeat user message * Fix non stopping execution * Track source of pending messages
1 parent d3cb662 commit c01a5a2

File tree

10 files changed

+436
-48
lines changed

10 files changed

+436
-48
lines changed

web/src/app/api/planner.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { dispatch, logoutState } from '../../state/dispatch';
44
import { toast } from '../toast';
55
const url = `${configs.PLANNER_BASE_URL}/getLLMResponse`
66
const dr_url = `${configs.BASE_SERVER_URL}/deepresearch/chat_planner`
7+
const dr_url_v2 = `${configs.BASE_SERVER_URL}/deepresearch/v2/chat_planner`
78
const dr_tool_url = `${configs.BASE_SERVER_URL}/deepresearch/chat`
89
const prewarm_url = `${configs.BASE_SERVER_URL}/deepresearch/chat_plan_warm`
910

web/src/chat/chat.ts

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { dispatch } from '../state/dispatch'
2-
import { addActionPlanMessage, addUserMessage, startNewThread } from '../state/chat/reducer'
2+
import { addActionPlanMessage, addActionPlanMessageV2, addUserMessage, startNewThread } from '../state/chat/reducer'
33
import { DefaultMessageContent } from '../state/chat/types'
4-
import { LLMResponse } from '../helpers/LLM/types'
4+
import { LLMResponse, LLMResponseV2 } from '../helpers/LLM/types'
55
import { updateCredits } from '../state/billing/reducer'
66
import { getState } from '../state/store'
77
import { toast } from '../app/toast'
@@ -35,6 +35,11 @@ export default {
3535
// update credits. not sure if this is the best place to do this
3636
dispatch(updateCredits(llmResponse.credits))
3737
},
38+
addActionPlanFromLlmResponseV2(llmResponseV2: LLMResponseV2, debug: any) {
39+
dispatch(addActionPlanMessageV2({llmResponseV2, debug}))
40+
// update credits
41+
dispatch(updateCredits(llmResponseV2.credits))
42+
},
3843
async createNewThreadIfNeeded() {
3944
const state = getState()
4045
// if on an old thread, create a new one

web/src/components/devtools/Settings.tsx

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { Checkbox, Button, Input, VStack, Text, Link, HStack, Box, Divider, AbsoluteCenter, Stack, Switch, Textarea, Radio, RadioGroup, IconButton, Icon, Tag, TagLabel, Badge } from '@chakra-ui/react';
22
import React, { useEffect, useState } from 'react';
33
import { dispatch, logoutState, resetState } from '../../state/dispatch';
4-
import { updateIsLocal, updateIsDevToolsOpen, updateUploadLogs, updateDevToolsTabName, DevToolsTabName, setConfirmChanges, setDemoMode, setDRMode, setAnalystMode, setModelsMode, setViewAllCatalogs, setEnableHighlightHelpers, setUseMemory, setEnableStyleCustomization, setEnableUserDebugTools, setEnableReviews, setUseV2States } from '../../state/settings/reducer';
4+
import { updateIsLocal, updateIsDevToolsOpen, updateUploadLogs, updateDevToolsTabName, DevToolsTabName, setConfirmChanges, setDemoMode, setDRMode, setAnalystMode, setModelsMode, setViewAllCatalogs, setEnableHighlightHelpers, setUseMemory, setEnableStyleCustomization, setEnableUserDebugTools, setEnableReviews, setUseV2States, setUseV2API } from '../../state/settings/reducer';
55
import { useSelector } from 'react-redux';
66
import { RootState } from '../../state/store';
77
import { configs } from '../../constants';
@@ -85,6 +85,7 @@ const SettingsPage = () => {
8585
const enableUserDebugTools = useSelector((state: RootState) => state.settings.enableUserDebugTools)
8686
const enableReviews = useSelector((state: RootState) => state.settings.enableReviews)
8787
const useV2States = useSelector((state: RootState) => state.settings.useV2States)
88+
const useV2API = useSelector((state: RootState) => state.settings.useV2API)
8889
const metadataProcessingCache = useSelector((state: RootState) => state.settings.metadataProcessingCache)
8990
const isSubscribedOrEnterpriseCustomer = billing.isSubscribed || billing.isEnterpriseCustomer
9091

@@ -187,7 +188,10 @@ const SettingsPage = () => {
187188
const updateUseV2States = (value: boolean) => {
188189
dispatch(setUseV2States(value))
189190
}
190-
191+
const updateUseV2API = (value: boolean) => {
192+
dispatch(setUseV2API(value))
193+
}
194+
191195
// const CURRENT_ACTION_TESTS = ACTION_TESTS[tool];
192196
return (
193197
<VStack className='settings-body'
@@ -306,6 +310,10 @@ const SettingsPage = () => {
306310
<Text color={"minusxBW.800"} fontSize="sm">Use v2 states</Text>
307311
<Switch color={"minusxBW.800"} colorScheme='minusxGreen' size='md' isChecked={useV2States} onChange={(e) => updateUseV2States(e.target.checked)} />
308312
</HStack>}
313+
{configs.IS_DEV && <HStack justifyContent={"space-between"}>
314+
<Text color={"minusxBW.800"} fontSize="sm">Use v2 Chat Planner API</Text>
315+
<Switch color={"minusxBW.800"} colorScheme='minusxGreen' size='md' isChecked={useV2API} onChange={(e) => updateUseV2API(e.target.checked)} />
316+
</HStack>}
309317
</VStack>
310318
</SettingsBlock>
311319
<SettingsBlock title="Privacy">

web/src/helpers/LLM/remote.ts

Lines changed: 127 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
import { ToolCalls } from '../../state/chat/reducer'
2-
import { LLMResponse } from './types'
2+
import { LLMResponse, LLMResponseV2, CompletedToolCalls } from './types'
33
import { PlanActionsParams } from '.'
44
import { getLLMResponse } from '../../app/api'
55
import { getApp } from '../app'
66
import { getState } from '../../state/store'
77
import { set, unset } from 'lodash'
88
import { processAllMetadata } from '../metadataProcessor'
99
import { getParsedIframeInfo } from '../origin'
10+
import axios from 'axios'
11+
import { configs } from '../../constants'
1012

1113

1214
export async function planActionsRemote({
@@ -195,7 +197,7 @@ export const convertToMarkdown = async(appState, imgs): Promise<string[]> => {
195197
}
196198

197199
const systemMessage = `
198-
You are an incredible data scientist, and proficient at using jupyter notebooks.
200+
You are an incredible data scientist, and proficient at using jupyter notebooks.
199201
The user gives you a jupyter state and you must convert it into a markdown document.
200202
Just give a report as a markdown document based on the notebook
201203
Don't print any actual code
@@ -225,3 +227,126 @@ export const convertToMarkdown = async(appState, imgs): Promise<string[]> => {
225227
return content
226228
}
227229

230+
// V2 API planner
231+
export async function planActionsRemoteV2({
232+
signal,
233+
conversationID,
234+
meta,
235+
user_message
236+
}: Pick<PlanActionsParams, 'signal' | 'conversationID' | 'meta'> & { user_message: string }): Promise<LLMResponseV2> {
237+
const state = getState()
238+
const thread = state.chat.activeThread
239+
const activeThread = state.chat.threads[thread]
240+
const messageHistory = activeThread.messages
241+
242+
// Find the last user message to get tasks_id
243+
const lastUserMessageIdx = messageHistory.findLastIndex((message) => message.role === 'user')
244+
if (lastUserMessageIdx === -1) {
245+
throw new Error('No user message found in thread')
246+
}
247+
248+
const lastUserMessage = messageHistory[lastUserMessageIdx]
249+
const tasks_id = lastUserMessage.tasks_id || null
250+
251+
// Extract completed tool calls from the last 'pending' planner response only
252+
// Find the last assistant message with actions from pending source
253+
const completed_tool_calls: Array<{tool_call_id: string, content: string, role: 'tool'}> = []
254+
let lastPlanIdx = -1
255+
for (let i = messageHistory.length - 1; i >= lastUserMessageIdx; i--) {
256+
const msg = messageHistory[i]
257+
if (msg.role === 'assistant' && msg.content.type === 'ACTIONS') {
258+
lastPlanIdx = i
259+
break
260+
}
261+
}
262+
263+
// If we found a plan, only extract if it's from pending source (client executed)
264+
if (lastPlanIdx !== -1) {
265+
const planMessage = messageHistory[lastPlanIdx]
266+
if (planMessage.role === 'assistant' && planMessage.content.type === 'ACTIONS' &&
267+
planMessage.content.source === 'pending') {
268+
// Get all tool messages that belong to this plan and are finished
269+
for (const toolMessageIdx of planMessage.content.actionMessageIDs) {
270+
const toolMessage = messageHistory[toolMessageIdx]
271+
if (toolMessage?.role === 'tool' && toolMessage.action.finished) {
272+
let content = ''
273+
if (toolMessage.content.type === 'DEFAULT') {
274+
content = toolMessage.content.text
275+
} else if (toolMessage.content.type === 'BLANK') {
276+
content = toolMessage.content.content || ''
277+
}
278+
279+
completed_tool_calls.push({
280+
tool_call_id: toolMessage.action.id,
281+
content,
282+
role: 'tool'
283+
})
284+
}
285+
}
286+
}
287+
}
288+
289+
// Build request payload
290+
// Only send user_message on first call (when no completed_tool_calls)
291+
const payload: any = {
292+
conversationID,
293+
tasks_id,
294+
user_message: completed_tool_calls.length > 0 ? '' : user_message,
295+
completed_tool_calls,
296+
meta
297+
}
298+
299+
// Add metadata hashes for analyst mode (when both drMode and analystMode are enabled)
300+
if (state.settings.drMode && state.settings.analystMode) {
301+
try {
302+
const dbId = getApp().useStore().getState().toolContext?.dbId || undefined
303+
const parsedInfo = getParsedIframeInfo()
304+
const { cardsHash, dbSchemaHash, fieldsHash, selectedDbId } = await processAllMetadata(false, dbId)
305+
306+
payload.cardsHash = cardsHash
307+
payload.dbSchemaHash = dbSchemaHash
308+
payload.fieldsHash = fieldsHash
309+
payload.selectedDbId = `${selectedDbId}`
310+
payload.r = parsedInfo.r
311+
console.log('[minusx] Added metadata hashes to v2 request for analyst mode')
312+
} catch (error) {
313+
console.warn('[minusx] Failed to fetch metadata for analyst mode:', error)
314+
}
315+
}
316+
317+
// Add selected asset_slug if available and team memory is enabled
318+
const selectedAssetSlug = state.settings.selectedAssetId
319+
const useTeamMemory = state.settings.useTeamMemory
320+
if (selectedAssetSlug && useTeamMemory) {
321+
payload.asset_slug = selectedAssetSlug
322+
console.log('[minusx] Added asset_slug to v2 request for enhanced context:', selectedAssetSlug)
323+
}
324+
325+
// Make API call
326+
const response = await axios.post(
327+
`${configs.BASE_SERVER_URL}/deepresearch/v2/chat_planner`,
328+
payload,
329+
{
330+
headers: {
331+
'Content-Type': 'application/json',
332+
},
333+
signal
334+
}
335+
)
336+
337+
signal.throwIfAborted()
338+
339+
const jsonResponse = response.data
340+
if (jsonResponse.error) {
341+
throw new Error(jsonResponse.error)
342+
}
343+
344+
return {
345+
pending_tool_calls: jsonResponse.pending_tool_calls || [],
346+
completed_tool_calls: jsonResponse.completed_tool_calls || [],
347+
tasks_id: jsonResponse.tasks_id,
348+
credits: jsonResponse.credits,
349+
error: jsonResponse.error
350+
}
351+
}
352+

web/src/helpers/LLM/types.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,18 @@ export type LLMResponse = {
2323
credits?: number
2424
tasks?: Tasks
2525
}
26+
27+
// V2 API types
28+
export type CompletedToolCall = [ToolCalls[0], { tool_call_id: string, content: string }]
29+
export type CompletedToolCalls = CompletedToolCall[]
30+
31+
export type LLMResponseV2 = {
32+
pending_tool_calls: ToolCalls
33+
completed_tool_calls: CompletedToolCalls[] // List of lists of tuples
34+
tasks_id?: string | null
35+
credits?: number
36+
error?: string
37+
}
2638
// Should add more stuff here as and when we try to experiment with them
2739
export type LLMSettings = {
2840
model: string,

web/src/planner/planner.ts

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,46 @@ import { getParsedIframeInfo } from '../helpers/origin';
1313
import { configs } from '../constants';
1414
export const plannerListener = createListenerMiddleware();
1515
function shouldContinue(getState: () => RootState) {
16-
const thread = getState().chat.activeThread
17-
const activeThread = getState().chat.threads[thread]
16+
const state = getState()
17+
const thread = state.chat.activeThread
18+
const activeThread = state.chat.threads[thread]
1819
const messageHistory = activeThread.messages
1920
const lastMessage = messageHistory[messageHistory.length - 1]
21+
22+
// For V2 API: check if should continue
23+
const useV2Api = state.settings.useV2API && state.settings.drMode
24+
if (useV2Api) {
25+
// Check if there are any unfinished tool messages (TODO status)
26+
const hasUnfinishedTools = messageHistory.some(
27+
(msg) => msg.role === 'tool' && !msg.action.finished
28+
)
29+
30+
// If there are unfinished tools, continue to execute them
31+
if (hasUnfinishedTools) {
32+
return true
33+
}
34+
35+
// Find the last assistant message to check its source
36+
const lastAssistantMsg = messageHistory.findLast(
37+
(msg) => msg.role === 'assistant' && msg.content.type === 'ACTIONS'
38+
)
39+
40+
if (lastAssistantMsg && lastAssistantMsg.role === 'assistant' && lastAssistantMsg.content.type === 'ACTIONS') {
41+
// If tools came from pending (we executed them), send results back to server
42+
if (lastAssistantMsg.content.source === 'pending') {
43+
return true
44+
}
45+
// If tools came from completed (server sent them), stop (no more work)
46+
if (lastAssistantMsg.content.source === 'completed') {
47+
return false
48+
}
49+
}
50+
51+
// Default: stop
52+
return false
53+
}
54+
55+
// For V1 API: existing logic
2056
// check if there are 0 tool calls in the last assistant message. if so, we don't continue
2157
if (lastMessage.role == 'assistant' && lastMessage.content.toolCalls.length == 0) {
2258
return false
@@ -25,7 +61,7 @@ function shouldContinue(getState: () => RootState) {
2561
if (lastMessage.role == 'tool' && (lastMessage.action.function.name == 'markTaskDone' || lastMessage.action.function.name == 'UpdateTaskStatus')) {
2662
return false;
2763
} else {
28-
// if last tool was not respondToUser, we continue anyway. not sure if we should keep it this way?
64+
// if last tool was not respondToUser, we continue anyway. not sure if we should keep it this way?
2965
return true
3066
}
3167
}

0 commit comments

Comments
 (0)