@@ -7,6 +7,7 @@ import com.aallam.openai.api.core.Status
7
7
import com.aallam.openai.api.model.ModelId
8
8
import com.aallam.openai.api.thread.ThreadId
9
9
import com.aallam.openai.api.core.LastError
10
+ import com.aallam.openai.api.core.Usage
10
11
import kotlinx.serialization.SerialName
11
12
import kotlinx.serialization.Serializable
12
13
@@ -102,4 +103,30 @@ public data class Run(
102
103
* Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long.
103
104
*/
104
105
@SerialName(" metadata" ) val metadata : Map <String , String >? = null ,
106
+
107
+ /* *
108
+ * Usage statistics related to the run.
109
+ * This value will be null if the run is not in a terminal state (i.e. in_progress, queued, etc.).
110
+ */
111
+ @SerialName(" usage" ) public val usage : Usage ? = null ,
112
+
113
+ /* *
114
+ * The Unix timestamp (in seconds) for when the run was completed.
115
+ */
116
+ @SerialName(" temperature" ) val temperature : Int? = null ,
117
+
118
+ /* *
119
+ * The nucleus sampling value used for this run. If not set, defaults to 1.
120
+ */
121
+ @SerialName(" top_p" ) val topP : Int? = null ,
122
+
123
+ /* *
124
+ * The maximum number of prompt tokens specified to have been used over the course of the run.
125
+ */
126
+ @SerialName(" max_prompt_tokens" ) val maxPromptTokens : Int? = null ,
127
+
128
+ /* *
129
+ * The maximum number of completion tokens specified to have been used over the course of the run.
130
+ */
131
+ @SerialName(" max_completion_tokens" ) val maxCompletionTokens : Int? = null ,
105
132
)
0 commit comments