@@ -38,18 +38,14 @@ import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage"
38
38
39
39
import { vscode } from "../../utils/vscode"
40
40
import VSCodeButtonLink from "../common/VSCodeButtonLink"
41
- import { OpenRouterModelPicker } from "./OpenRouterModelPicker"
42
- import OpenAiModelPicker from "./OpenAiModelPicker"
43
- import { GlamaModelPicker } from "./GlamaModelPicker"
44
- import { UnboundModelPicker } from "./UnboundModelPicker"
45
41
import { ModelInfoView } from "./ModelInfoView"
46
42
import { DROPDOWN_Z_INDEX } from "./styles"
47
- import { RequestyModelPicker } from "./RequestyModelPicker "
43
+ import { ModelPicker } from "./ModelPicker "
48
44
import { TemperatureControl } from "./TemperatureControl"
49
45
50
46
interface ApiOptionsProps {
51
47
uriScheme : string | undefined
52
- apiConfiguration : ApiConfiguration | undefined
48
+ apiConfiguration : ApiConfiguration
53
49
setApiConfigurationField : < K extends keyof ApiConfiguration > ( field : K , value : ApiConfiguration [ K ] ) => void
54
50
apiErrorMessage ?: string
55
51
modelIdErrorMessage ?: string
@@ -67,6 +63,20 @@ const ApiOptions = ({
67
63
const [ ollamaModels , setOllamaModels ] = useState < string [ ] > ( [ ] )
68
64
const [ lmStudioModels , setLmStudioModels ] = useState < string [ ] > ( [ ] )
69
65
const [ vsCodeLmModels , setVsCodeLmModels ] = useState < vscodemodels . LanguageModelChatSelector [ ] > ( [ ] )
66
+ const [ openRouterModels , setOpenRouterModels ] = useState < Record < string , ModelInfo > > ( {
67
+ [ openRouterDefaultModelId ] : openRouterDefaultModelInfo ,
68
+ } )
69
+ const [ glamaModels , setGlamaModels ] = useState < Record < string , ModelInfo > > ( {
70
+ [ glamaDefaultModelId ] : glamaDefaultModelInfo ,
71
+ } )
72
+ const [ unboundModels , setUnboundModels ] = useState < Record < string , ModelInfo > > ( {
73
+ [ unboundDefaultModelId ] : unboundDefaultModelInfo ,
74
+ } )
75
+ const [ requestyModels , setRequestyModels ] = useState < Record < string , ModelInfo > > ( {
76
+ [ requestyDefaultModelId ] : requestyDefaultModelInfo ,
77
+ } )
78
+ const [ openAiModels , setOpenAiModels ] = useState < Record < string , ModelInfo > | null > ( null )
79
+
70
80
const [ anthropicBaseUrlSelected , setAnthropicBaseUrlSelected ] = useState ( ! ! apiConfiguration ?. anthropicBaseUrl )
71
81
const [ azureApiVersionSelected , setAzureApiVersionSelected ] = useState ( ! ! apiConfiguration ?. azureApiVersion )
72
82
const [ openRouterBaseUrlSelected , setOpenRouterBaseUrlSelected ] = useState ( ! ! apiConfiguration ?. openRouterBaseUrl )
@@ -104,24 +114,93 @@ const ApiOptions = ({
104
114
vscode . postMessage ( { type : "requestLmStudioModels" , text : apiConfiguration ?. lmStudioBaseUrl } )
105
115
} else if ( selectedProvider === "vscode-lm" ) {
106
116
vscode . postMessage ( { type : "requestVsCodeLmModels" } )
117
+ } else if ( selectedProvider === "openai" ) {
118
+ vscode . postMessage ( {
119
+ type : "refreshOpenAiModels" ,
120
+ values : {
121
+ baseUrl : apiConfiguration ?. openAiBaseUrl ,
122
+ apiKey : apiConfiguration ?. openAiApiKey ,
123
+ } ,
124
+ } )
125
+ } else if ( selectedProvider === "openrouter" ) {
126
+ vscode . postMessage ( { type : "refreshOpenRouterModels" , values : { } } )
127
+ } else if ( selectedProvider === "glama" ) {
128
+ vscode . postMessage ( { type : "refreshGlamaModels" , values : { } } )
129
+ } else if ( selectedProvider === "requesty" ) {
130
+ vscode . postMessage ( {
131
+ type : "refreshRequestyModels" ,
132
+ values : {
133
+ apiKey : apiConfiguration ?. requestyApiKey ,
134
+ } ,
135
+ } )
107
136
}
108
137
} ,
109
138
250 ,
110
- [ selectedProvider , apiConfiguration ?. ollamaBaseUrl , apiConfiguration ?. lmStudioBaseUrl ] ,
139
+ [
140
+ selectedProvider ,
141
+ apiConfiguration ?. ollamaBaseUrl ,
142
+ apiConfiguration ?. lmStudioBaseUrl ,
143
+ apiConfiguration ?. openAiBaseUrl ,
144
+ apiConfiguration ?. openAiApiKey ,
145
+ apiConfiguration ?. requestyApiKey ,
146
+ ] ,
111
147
)
112
148
113
149
const handleMessage = useCallback ( ( event : MessageEvent ) => {
114
150
const message : ExtensionMessage = event . data
115
-
116
- if ( message . type === "ollamaModels" && Array . isArray ( message . ollamaModels ) ) {
117
- const newModels = message . ollamaModels
118
- setOllamaModels ( newModels )
119
- } else if ( message . type === "lmStudioModels" && Array . isArray ( message . lmStudioModels ) ) {
120
- const newModels = message . lmStudioModels
121
- setLmStudioModels ( newModels )
122
- } else if ( message . type === "vsCodeLmModels" && Array . isArray ( message . vsCodeLmModels ) ) {
123
- const newModels = message . vsCodeLmModels
124
- setVsCodeLmModels ( newModels )
151
+ switch ( message . type ) {
152
+ case "ollamaModels" :
153
+ {
154
+ const newModels = message . ollamaModels ?? [ ]
155
+ setOllamaModels ( newModels )
156
+ }
157
+ break
158
+ case "lmStudioModels" :
159
+ {
160
+ const newModels = message . lmStudioModels ?? [ ]
161
+ setLmStudioModels ( newModels )
162
+ }
163
+ break
164
+ case "vsCodeLmModels" :
165
+ {
166
+ const newModels = message . vsCodeLmModels ?? [ ]
167
+ setVsCodeLmModels ( newModels )
168
+ }
169
+ break
170
+ case "glamaModels" : {
171
+ const updatedModels = message . glamaModels ?? { }
172
+ setGlamaModels ( {
173
+ [ glamaDefaultModelId ] : glamaDefaultModelInfo , // in case the extension sent a model list without the default model
174
+ ...updatedModels ,
175
+ } )
176
+ break
177
+ }
178
+ case "openRouterModels" : {
179
+ const updatedModels = message . openRouterModels ?? { }
180
+ setOpenRouterModels ( {
181
+ [ openRouterDefaultModelId ] : openRouterDefaultModelInfo , // in case the extension sent a model list without the default model
182
+ ...updatedModels ,
183
+ } )
184
+ break
185
+ }
186
+ case "openAiModels" : {
187
+ const updatedModels = message . openAiModels ?? [ ]
188
+ setOpenAiModels ( Object . fromEntries ( updatedModels . map ( ( item ) => [ item , openAiModelInfoSaneDefaults ] ) ) )
189
+ break
190
+ }
191
+ case "unboundModels" : {
192
+ const updatedModels = message . unboundModels ?? { }
193
+ setUnboundModels ( updatedModels )
194
+ break
195
+ }
196
+ case "requestyModels" : {
197
+ const updatedModels = message . requestyModels ?? { }
198
+ setRequestyModels ( {
199
+ [ requestyDefaultModelId ] : requestyDefaultModelInfo , // in case the extension sent a model list without the default model
200
+ ...updatedModels ,
201
+ } )
202
+ break
203
+ }
125
204
}
126
205
} , [ ] )
127
206
@@ -616,7 +695,17 @@ const ApiOptions = ({
616
695
placeholder = "Enter API Key..." >
617
696
< span style = { { fontWeight : 500 } } > API Key</ span >
618
697
</ VSCodeTextField >
619
- < OpenAiModelPicker />
698
+ < ModelPicker
699
+ apiConfiguration = { apiConfiguration }
700
+ modelIdKey = "openAiModelId"
701
+ modelInfoKey = "openAiCustomModelInfo"
702
+ serviceName = "OpenAI"
703
+ serviceUrl = "https://platform.openai.com"
704
+ recommendedModel = "gpt-4-turbo-preview"
705
+ models = { openAiModels }
706
+ setApiConfigurationField = { setApiConfigurationField }
707
+ defaultModelInfo = { openAiModelInfoSaneDefaults }
708
+ />
620
709
< div style = { { display : "flex" , alignItems : "center" } } >
621
710
< Checkbox
622
711
checked = { apiConfiguration ?. openAiStreamingEnabled ?? true }
@@ -704,7 +793,7 @@ const ApiOptions = ({
704
793
} ) ( ) ,
705
794
} }
706
795
title = "Maximum number of tokens the model can generate in a single response"
707
- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
796
+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
708
797
const value = parseInt ( ( e . target as HTMLInputElement ) . value )
709
798
return {
710
799
...( apiConfiguration ?. openAiCustomModelInfo ||
@@ -751,7 +840,7 @@ const ApiOptions = ({
751
840
} ) ( ) ,
752
841
} }
753
842
title = "Total number of tokens (input + output) the model can process in a single request"
754
- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
843
+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
755
844
const value = ( e . target as HTMLInputElement ) . value
756
845
const parsed = parseInt ( value )
757
846
return {
@@ -897,7 +986,7 @@ const ApiOptions = ({
897
986
: "var(--vscode-errorForeground)"
898
987
} ) ( ) ,
899
988
} }
900
- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
989
+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
901
990
const value = ( e . target as HTMLInputElement ) . value
902
991
const parsed = parseInt ( value )
903
992
return {
@@ -942,7 +1031,7 @@ const ApiOptions = ({
942
1031
: "var(--vscode-errorForeground)"
943
1032
} ) ( ) ,
944
1033
} }
945
- onChange = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
1034
+ onInput = { handleInputChange ( "openAiCustomModelInfo" , ( e ) => {
946
1035
const value = ( e . target as HTMLInputElement ) . value
947
1036
const parsed = parseInt ( value )
948
1037
return {
@@ -1011,6 +1100,7 @@ const ApiOptions = ({
1011
1100
placeholder = { "e.g. meta-llama-3.1-8b-instruct" } >
1012
1101
< span style = { { fontWeight : 500 } } > Model ID</ span >
1013
1102
</ VSCodeTextField >
1103
+
1014
1104
{ lmStudioModels . length > 0 && (
1015
1105
< VSCodeRadioGroup
1016
1106
value = {
@@ -1220,7 +1310,18 @@ const ApiOptions = ({
1220
1310
} } >
1221
1311
This key is stored locally and only used to make API requests from this extension.
1222
1312
</ p >
1223
- < UnboundModelPicker />
1313
+ < ModelPicker
1314
+ apiConfiguration = { apiConfiguration }
1315
+ defaultModelId = { unboundDefaultModelId }
1316
+ defaultModelInfo = { unboundDefaultModelInfo }
1317
+ models = { unboundModels }
1318
+ modelInfoKey = "unboundModelInfo"
1319
+ modelIdKey = "unboundModelId"
1320
+ serviceName = "Unbound"
1321
+ serviceUrl = "https://api.getunbound.ai/models"
1322
+ recommendedModel = { unboundDefaultModelId }
1323
+ setApiConfigurationField = { setApiConfigurationField }
1324
+ />
1224
1325
</ div >
1225
1326
) }
1226
1327
@@ -1236,9 +1337,49 @@ const ApiOptions = ({
1236
1337
</ p >
1237
1338
) }
1238
1339
1239
- { selectedProvider === "glama" && < GlamaModelPicker /> }
1240
- { selectedProvider === "openrouter" && < OpenRouterModelPicker /> }
1241
- { selectedProvider === "requesty" && < RequestyModelPicker /> }
1340
+ { selectedProvider === "glama" && (
1341
+ < ModelPicker
1342
+ apiConfiguration = { apiConfiguration ?? { } }
1343
+ defaultModelId = { glamaDefaultModelId }
1344
+ defaultModelInfo = { glamaDefaultModelInfo }
1345
+ models = { glamaModels }
1346
+ modelInfoKey = "glamaModelInfo"
1347
+ modelIdKey = "glamaModelId"
1348
+ serviceName = "Glama"
1349
+ serviceUrl = "https://glama.ai/models"
1350
+ recommendedModel = "anthropic/claude-3-7-sonnet"
1351
+ setApiConfigurationField = { setApiConfigurationField }
1352
+ />
1353
+ ) }
1354
+
1355
+ { selectedProvider === "openrouter" && (
1356
+ < ModelPicker
1357
+ apiConfiguration = { apiConfiguration }
1358
+ setApiConfigurationField = { setApiConfigurationField }
1359
+ defaultModelId = { openRouterDefaultModelId }
1360
+ defaultModelInfo = { openRouterDefaultModelInfo }
1361
+ models = { openRouterModels }
1362
+ modelIdKey = "openRouterModelId"
1363
+ modelInfoKey = "openRouterModelInfo"
1364
+ serviceName = "OpenRouter"
1365
+ serviceUrl = "https://openrouter.ai/models"
1366
+ recommendedModel = "anthropic/claude-3.7-sonnet"
1367
+ />
1368
+ ) }
1369
+ { selectedProvider === "requesty" && (
1370
+ < ModelPicker
1371
+ apiConfiguration = { apiConfiguration }
1372
+ setApiConfigurationField = { setApiConfigurationField }
1373
+ defaultModelId = { requestyDefaultModelId }
1374
+ defaultModelInfo = { requestyDefaultModelInfo }
1375
+ models = { requestyModels }
1376
+ modelIdKey = "requestyModelId"
1377
+ modelInfoKey = "requestyModelInfo"
1378
+ serviceName = "Requesty"
1379
+ serviceUrl = "https://requesty.ai"
1380
+ recommendedModel = "anthropic/claude-3-7-sonnet-latest"
1381
+ />
1382
+ ) }
1242
1383
1243
1384
{ selectedProvider !== "glama" &&
1244
1385
selectedProvider !== "openrouter" &&
@@ -1260,7 +1401,6 @@ const ApiOptions = ({
1260
1401
{ selectedProvider === "deepseek" && createDropdown ( deepSeekModels ) }
1261
1402
{ selectedProvider === "mistral" && createDropdown ( mistralModels ) }
1262
1403
</ div >
1263
-
1264
1404
< ModelInfoView
1265
1405
selectedModelId = { selectedModelId }
1266
1406
modelInfo = { selectedModelInfo }
0 commit comments