2
2
3
3
using Newtonsoft . Json ;
4
4
using OpenAI . Extensions ;
5
+ using System ;
5
6
using System . Collections . Generic ;
6
7
using System . Linq ;
7
8
using UnityEngine . Scripting ;
@@ -11,6 +12,36 @@ namespace OpenAI.Assistants
11
12
[ Preserve ]
12
13
public sealed class CreateAssistantRequest
13
14
{
15
+ [ Obsolete ( "use new .ctr" ) ]
16
+ public CreateAssistantRequest (
17
+ AssistantResponse assistant ,
18
+ string model ,
19
+ string name ,
20
+ string description ,
21
+ string instructions ,
22
+ IEnumerable < Tool > tools ,
23
+ ToolResources toolResources ,
24
+ IReadOnlyDictionary < string , string > metadata ,
25
+ double ? temperature ,
26
+ double ? topP ,
27
+ JsonSchema jsonSchema ,
28
+ ChatResponseFormat ? responseFormat = null )
29
+ : this (
30
+ string . IsNullOrWhiteSpace ( model ) ? assistant . Model : model ,
31
+ string . IsNullOrWhiteSpace ( name ) ? assistant . Name : name ,
32
+ string . IsNullOrWhiteSpace ( description ) ? assistant . Description : description ,
33
+ string . IsNullOrWhiteSpace ( instructions ) ? assistant . Instructions : instructions ,
34
+ tools ?? assistant . Tools ,
35
+ toolResources ?? assistant . ToolResources ,
36
+ metadata ?? assistant . Metadata ,
37
+ temperature ?? ( assistant . ReasoningEffort > 0 ? null : assistant . Temperature ) ,
38
+ topP ?? assistant . TopP ,
39
+ 0 ,
40
+ jsonSchema ?? assistant . ResponseFormatObject ? . JsonSchema ,
41
+ responseFormat ?? assistant . ResponseFormat )
42
+ {
43
+ }
44
+
14
45
/// <summary>
15
46
/// Constructor
16
47
/// </summary>
@@ -57,6 +88,11 @@ public sealed class CreateAssistantRequest
57
88
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
58
89
/// We generally recommend altering this or temperature but not both.
59
90
/// </param>
91
+ /// <param name="reasoningEffort">
92
+ /// Constrains effort on reasoning for reasoning models.
93
+ /// Currently supported values are low, medium, and high.
94
+ /// Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
95
+ /// </param>
60
96
/// <param name="jsonSchema">
61
97
/// The <see cref="JsonSchema"/> to use for structured JSON outputs.<br/>
62
98
/// <see href="https://platform.openai.com/docs/guides/structured-outputs"/><br/>
@@ -83,6 +119,7 @@ public CreateAssistantRequest(
83
119
IReadOnlyDictionary < string , string > metadata = null ,
84
120
double ? temperature = null ,
85
121
double ? topP = null ,
122
+ ReasoningEffort reasoningEffort = 0 ,
86
123
JsonSchema jsonSchema = null ,
87
124
ChatResponseFormat ? responseFormat = null )
88
125
: this (
@@ -93,8 +130,9 @@ public CreateAssistantRequest(
93
130
tools ?? assistant . Tools ,
94
131
toolResources ?? assistant . ToolResources ,
95
132
metadata ?? assistant . Metadata ,
96
- temperature ?? assistant . Temperature ,
97
- topP ?? assistant . TopP ,
133
+ temperature ?? ( assistant . ReasoningEffort > 0 ? null : assistant . Temperature ) ,
134
+ topP ?? ( assistant . ReasoningEffort > 0 ? null : assistant . TopP ) ,
135
+ reasoningEffort ,
98
136
jsonSchema ?? assistant . ResponseFormatObject ? . JsonSchema ,
99
137
responseFormat ?? assistant . ResponseFormat )
100
138
{
@@ -145,6 +183,11 @@ public CreateAssistantRequest(
145
183
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
146
184
/// We generally recommend altering this or temperature but not both.
147
185
/// </param>
186
+ /// <param name="reasoningEffort">
187
+ /// Constrains effort on reasoning for reasoning models.
188
+ /// Currently supported values are low, medium, and high.
189
+ /// Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
190
+ /// </param>
148
191
/// <param name="jsonSchema">
149
192
/// The <see cref="JsonSchema"/> to use for structured JSON outputs.<br/>
150
193
/// <see href="https://platform.openai.com/docs/guides/structured-outputs"/><br/>
@@ -170,6 +213,7 @@ public CreateAssistantRequest(
170
213
IReadOnlyDictionary < string , string > metadata = null ,
171
214
double ? temperature = null ,
172
215
double ? topP = null ,
216
+ ReasoningEffort reasoningEffort = 0 ,
173
217
JsonSchema jsonSchema = null ,
174
218
ChatResponseFormat responseFormat = ChatResponseFormat . Text )
175
219
{
@@ -180,8 +224,9 @@ public CreateAssistantRequest(
180
224
Tools = tools ? . ToList ( ) ;
181
225
ToolResources = toolResources ;
182
226
Metadata = metadata ;
183
- Temperature = temperature ;
184
- TopP = topP ;
227
+ Temperature = reasoningEffort > 0 ? null : temperature ;
228
+ TopP = reasoningEffort > 0 ? null : topP ;
229
+ ReasoningEffort = reasoningEffort ;
185
230
186
231
if ( jsonSchema != null )
187
232
{
@@ -232,7 +277,7 @@ public CreateAssistantRequest(
232
277
/// Tools can be of types 'code_interpreter', 'retrieval', or 'function'.
233
278
/// </summary>
234
279
[ Preserve ]
235
- [ JsonProperty ( "tools" ) ]
280
+ [ JsonProperty ( "tools" , DefaultValueHandling = DefaultValueHandling . Ignore ) ]
236
281
public IReadOnlyList < Tool > Tools { get ; }
237
282
238
283
/// <summary>
@@ -241,7 +286,7 @@ public CreateAssistantRequest(
241
286
/// While the <see cref="Tool.FileSearch"/> requires a list vector store ids.
242
287
/// </summary>
243
288
[ Preserve ]
244
- [ JsonProperty ( "tool_resources" ) ]
289
+ [ JsonProperty ( "tool_resources" , DefaultValueHandling = DefaultValueHandling . Ignore ) ]
245
290
public ToolResources ToolResources { get ; }
246
291
247
292
/// <summary>
@@ -250,7 +295,7 @@ public CreateAssistantRequest(
250
295
/// while lower values like 0.2 will make it more focused and deterministic.
251
296
/// </summary>
252
297
[ Preserve ]
253
- [ JsonProperty ( "temperature" ) ]
298
+ [ JsonProperty ( "temperature" , DefaultValueHandling = DefaultValueHandling . Ignore ) ]
254
299
public double ? Temperature { get ; }
255
300
256
301
/// <summary>
@@ -259,9 +304,18 @@ public CreateAssistantRequest(
259
304
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
260
305
/// </summary>
261
306
[ Preserve ]
262
- [ JsonProperty ( "top_p" ) ]
307
+ [ JsonProperty ( "top_p" , DefaultValueHandling = DefaultValueHandling . Ignore ) ]
263
308
public double ? TopP { get ; }
264
309
310
+ /// <summary>
311
+ /// Constrains effort on reasoning for reasoning models.
312
+ /// Currently supported values are low, medium, and high.
313
+ /// Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
314
+ /// </summary>
315
+ [ Preserve ]
316
+ [ JsonProperty ( "reasoning_effort" , DefaultValueHandling = DefaultValueHandling . Ignore ) ]
317
+ public ReasoningEffort ReasoningEffort { get ; }
318
+
265
319
/// <summary>
266
320
/// Specifies the format that the model must output.
267
321
/// Setting to <see cref="ChatResponseFormat.Json"/> or <see cref="ChatResponseFormat.JsonSchema"/> enables JSON mode,
@@ -289,7 +343,7 @@ public CreateAssistantRequest(
289
343
/// Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long.
290
344
/// </summary>
291
345
[ Preserve ]
292
- [ JsonProperty ( "metadata" ) ]
346
+ [ JsonProperty ( "metadata" , DefaultValueHandling = DefaultValueHandling . Ignore ) ]
293
347
public IReadOnlyDictionary < string , string > Metadata { get ; }
294
348
}
295
349
}
0 commit comments