diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c10c3496..60bb1c95 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,6 +69,8 @@ jobs: test: "*.TestThreads" - name: Vector Stores test: "*.TestVectorStores" + - name: Responses + test: "*.TestResponses" - name: Misc. test: "*.misc.*" steps: diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt index ff7b7560..05a92f59 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/OpenAI.kt @@ -11,7 +11,7 @@ import kotlin.time.Duration.Companion.seconds * OpenAI API. */ public interface OpenAI : Completions, Files, Edits, Embeddings, Models, Moderations, FineTunes, Images, Chat, Audio, - FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Batch, AutoCloseable + FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Batch, Responses, AutoCloseable /** * Creates an instance of [OpenAI]. diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Responses.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Responses.kt new file mode 100644 index 00000000..13a1f560 --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/Responses.kt @@ -0,0 +1,66 @@ +package com.aallam.openai.client + +import com.aallam.openai.api.core.RequestOptions +import com.aallam.openai.api.responses.Response +import com.aallam.openai.api.responses.ResponseIncludable +import com.aallam.openai.api.responses.ResponseItem +import com.aallam.openai.api.responses.ResponseRequest + +/** Interface for OpenAI's Responses API */ +public interface Responses { + /** + * Create a new response. + * + * @param request The request for creating a response + * @param requestOptions Optional request configuration + * @return The created response + */ + public suspend fun createResponse( + request: ResponseRequest, + requestOptions: RequestOptions? = null + ): Response + + /** + * Retrieves a model response with the given ID. + * + * @param responseId The ID of the response to retrieve + * @param include Additional fields to include in the response. + * @param requestOptions Optional request configuration + */ + public suspend fun getResponse( + responseId: String, + include: List? = null, + requestOptions: RequestOptions? = null): Response + + /** + * Deletes a model response with the given ID. + * + * @param responseId The ID of the response to delete + * @param requestOptions Optional request configuration + */ + public suspend fun deleteResponse( + responseId: String, + requestOptions: RequestOptions? = null): Boolean + + /** + * Returns a list of input items for a given response. + * + * @param responseId The ID of the response + * @param after An item ID to list items after, used in pagination. + * @param before An item ID to list items before, used in pagination. + * @param include Additional fields to include in the response. + * @param limit A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + * @param order The order to return the input items in. Can be either "asc" or "desc". Default is "desc". + * @param requestOptions Optional request configuration + */ + public suspend fun listResponseItems( + responseId: String, + after: String? = null, + before: String? = null, + include: List? = null, + limit: Int? = null, + order: String? = null, + requestOptions: RequestOptions? = null): List + + //TODO Streaming +} diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt index 4612c433..e86dba01 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/OpenAIApi.kt @@ -29,4 +29,5 @@ internal class OpenAIApi( Messages by MessagesApi(requester), VectorStores by VectorStoresApi(requester), Batch by BatchApi(requester), + Responses by ResponsesApi(requester), AutoCloseable by requester diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt index 72fd0e62..46334f40 100644 --- a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ApiPath.kt @@ -23,4 +23,5 @@ internal object ApiPath { const val Threads = "threads" const val VectorStores = "vector_stores" const val Batches = "batches" + const val Responses = "responses" } diff --git a/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ResponsesApi.kt b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ResponsesApi.kt new file mode 100644 index 00000000..72204d1c --- /dev/null +++ b/openai-client/src/commonMain/kotlin/com.aallam.openai.client/internal/api/ResponsesApi.kt @@ -0,0 +1,85 @@ +package com.aallam.openai.client.internal.api + +import com.aallam.openai.api.core.DeleteResponse +import com.aallam.openai.api.core.ListResponse +import com.aallam.openai.api.core.RequestOptions +import com.aallam.openai.api.responses.Response +import com.aallam.openai.api.responses.ResponseIncludable +import com.aallam.openai.api.responses.ResponseItem +import com.aallam.openai.api.responses.ResponseRequest +import com.aallam.openai.client.Responses +import com.aallam.openai.client.internal.extension.requestOptions +import com.aallam.openai.client.internal.http.HttpRequester +import com.aallam.openai.client.internal.http.perform + +import io.ktor.client.* +import io.ktor.client.call.* +import io.ktor.client.request.* +import io.ktor.client.statement.* +import io.ktor.http.* + +internal class ResponsesApi(private val requester: HttpRequester) : Responses { + override suspend fun createResponse(request: ResponseRequest, requestOptions: RequestOptions?): Response { + return requester.perform { client: HttpClient -> + client.post { + url(path = ApiPath.Responses) + setBody(request.copy(stream = false)) + contentType(ContentType.Application.Json) + requestOptions(requestOptions) + }.body() + } + } + + override suspend fun getResponse( + responseId: String, + include: List?, + requestOptions: RequestOptions? + ): Response { + return requester.perform { client: HttpClient -> + client.get { + url(path = "${ApiPath.Responses}/$responseId") + parameter("include", include) + requestOptions(requestOptions) + }.body() + } + } + + override suspend fun deleteResponse(responseId: String, requestOptions: RequestOptions?): Boolean { + val response = requester.perform { + it.delete { + url(path = "${ApiPath.Responses}/$responseId") + requestOptions(requestOptions) + } + } + + return when (response.status) { + HttpStatusCode.NotFound -> false + else -> response.body().deleted + } + } + + override suspend fun listResponseItems( + responseId: String, + after: String?, + before: String?, + include: List?, + limit: Int?, + order: String?, + requestOptions: RequestOptions? + ): List { + return requester.perform> { + it.get { + url(path = "${ApiPath.Responses}/$responseId/items") + parameter("after", after) + parameter("before", before) + parameter("include", include) + parameter("limit", limit) + parameter("order", order) + requestOptions(requestOptions) + } + }.data + } + + //TODO Add streaming + +} diff --git a/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestResponses.kt b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestResponses.kt new file mode 100644 index 00000000..90113691 --- /dev/null +++ b/openai-client/src/commonTest/kotlin/com/aallam/openai/client/TestResponses.kt @@ -0,0 +1,81 @@ +package com.aallam.openai.client + +import com.aallam.openai.api.core.Parameters.Companion.buildJsonObject +import com.aallam.openai.api.model.ModelId +import com.aallam.openai.api.responses.* +import kotlinx.serialization.json.add +import kotlinx.serialization.json.put +import kotlinx.serialization.json.putJsonArray +import kotlinx.serialization.json.putJsonObject +import kotlin.test.Test +import kotlin.test.assertNotNull + +class TestResponses : TestOpenAI() { + + @Test + fun basicResponse() = test { + val response = openAI.createResponse( + request = responseRequest { + model = ModelId("gpt-4o") + input = ResponseInput.from("What is the capital of France?") + } + ) + + assertNotNull(response) + assertNotNull(response.output) + } + + @Test + fun responseWithTools() = test { + val response = openAI.createResponse( + request = responseRequest { + model = ModelId("gpt-4o") + input = ResponseInput.from("What's the weather like in Paris?") + tools { + add( + ResponseTool.ResponseFunctionTool( + name = "get_weather", + description = "Get the current weather", + parameters = buildJsonObject { + put("type", "object") + putJsonObject("properties") { + putJsonObject("location") { + put("type", "string") + put("description", "The city and state, e.g. San Francisco, CA") + } + putJsonObject("unit") { + put("type", "string") + putJsonArray("enum") { + add("celsius") + add("fahrenheit") + } + } + } + putJsonArray("required") { + add("location") + } + }) + ) + } + }) + + + assertNotNull(response) + assertNotNull(response.output) + } + + @Test + fun responseWithInstructions() = test { + val response = openAI.createResponse( + request = responseRequest { + model = ModelId("gpt-4o") + input = ResponseInput.from("Tell me about artificial intelligence") + instructions = "Provide a concise answer focusing on recent developments" + maxOutputTokens = 200 + } + ) + + assertNotNull(response) + assertNotNull(response.output) + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ReasoningConfig.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ReasoningConfig.kt new file mode 100644 index 00000000..1004927a --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ReasoningConfig.kt @@ -0,0 +1,52 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Configuration options for reasoning models + */ +@Serializable +public data class ReasoningConfig( + /** + * Constrains effort on reasoning for reasoning models. + * Currently supported values are `low`, `medium`, and `high`. + * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + */ + @SerialName("effort") + val effort: ReasoningEffort? = null, + + /** + * A summary of the reasoning performed by the model. + * This can be useful for debugging and understanding the model's reasoning process. + * One of `concise` or `detailed`. + */ + @SerialName("generate_summary") + val generateSummary: String? = null +) + + +/** + * Reasoning effort levels for models with reasoning capabilities + */ +@JvmInline +@Serializable +public value class ReasoningEffort(public val value: String) { + public companion object { + /** + * Low reasoning effort + */ + public val Low: ReasoningEffort = ReasoningEffort("low") + + /** + * Medium reasoning effort (default) + */ + public val Medium: ReasoningEffort = ReasoningEffort("medium") + + /** + * High reasoning effort + */ + public val High: ReasoningEffort = ReasoningEffort("high") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Response.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Response.kt new file mode 100644 index 00000000..53863fd2 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Response.kt @@ -0,0 +1,175 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.core.Status +import com.aallam.openai.api.model.ModelId +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Response from the OpenAI Responses API + */ +@Serializable +public data class Response( + + /** + * The Unix timestamp (in seconds) of when the response was created + */ + @SerialName("created_at") + val createdAt: Long, + + /** + * An error object returned when the model fails to generate a Response. + * + */ + @SerialName("error") + val error: ResponseError?, + + /** + * Unique identifier for this Response. + */ + @SerialName("id") + val id: String, + + /** + * Details about why the response is incomplete. + * + */ + @SerialName("incomplete_details") + val incompleteDetails: IncompleteDetails?, + + /** + * Inserts a system (or developer) message as the first item in the model's context. + * + * When using along with previous_response_id, the instructions from a previous response will not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. + */ + @SerialName("instructions") + val instructions: String?, + + /** + * An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. + */ + @SerialName("max_output_tokens") + val maxOutputTokens: Long? = null, + + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + */ + @SerialName("metadata") + val metadata: Map = emptyMap(), + + /** + * Model ID used to generate the response, like gpt-4o or o1. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the model guide to browse and compare available models. + */ + @SerialName("model") + val model: ModelId, + + /** + * The object type, always "response" + */ + @SerialName("object") + val objectType: String = "response", + + + /** + * An array of content items generated by the model. + * + * The length and order of items in the output array is dependent on the model's response. + */ + @SerialName("output") + val output: List = emptyList(), + + /** + * Whether parallel tool calls were enabled + */ + @SerialName("parallel_tool_calls") + val parallelToolCalls: Boolean, + + /** + * The unique ID of the previous response to the model. Use this to create multi-turn conversations. + */ + @SerialName("previous_response_id") + val previousResponseId: String?, + + /** + * Configuration options for reasoning models. + * + */ + @SerialName("reasoning") + val reasoning: ReasoningConfig?, + + /** + * The status of the response generation. One of `completed`, `failed`, `in_progress`, or `incomplete`. + */ + @SerialName("status") + val status: Status, + + /** + * The temperature used for sampling + */ + @SerialName("temperature") + val temperature: Double, + /** + * Configuration options for a text response from the model. Can be plain text or structured JSON data. + */ + @SerialName("text") + val text: ResponseTextConfig? = null, + + /** + * How the model should select which tool (or tools) to use when generating a response. See the tools parameter to see how to specify which tools the model can call. + */ + @SerialName("tool_choice") + val toolChoice: ResponseToolChoiceConfig, + + /** + * An array of tools the model may call while generating a response. You can specify which tool to use by setting the tool_choice parameter. + * + * The two categories of tools you can provide the model are: + * + * Built-in tools: Tools that are provided by OpenAI that extend the model's capabilities, like web search or file search. Learn more about built-in tools. + * Function calls (custom tools): Functions that are defined by you, enabling the model to call your own code. Learn more about function calling. + */ + @SerialName("tools") + val tools: List, + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @SerialName("top_p") + val topP: Double, + + /** + * The truncation strategy used for the model response. + */ + @SerialName("truncation") + val truncation: Truncation? = null, + + /** + * Represents token usage details including input tokens, output tokens, a breakdown of output tokens, and the total tokens used. + */ + @SerialName("usage") + val usage: ResponseUsage? = null, + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + */ + @SerialName("user") + val user: String? = null + +) + +/** + * Details about why the response is incomplete + */ +@Serializable +public data class IncompleteDetails( + /** + * The reason why the response is incomplete + */ + @SerialName("reason") + val reason: String +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseError.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseError.kt new file mode 100644 index 00000000..6558cdb0 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseError.kt @@ -0,0 +1,22 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Information about an error during response generation + */ +@Serializable +public data class ResponseError( + /** + * The error code for the response. + */ + @SerialName("code") + val code: String? = null, + + /** + * A human-readable description of the error. + */ + @SerialName("message") + val message: String? = null, +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseIncludable.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseIncludable.kt new file mode 100644 index 00000000..849a594b --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseIncludable.kt @@ -0,0 +1,30 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Additional data to include in the response + * + * Specify additional output data to include in the model response. + */ +@JvmInline +@Serializable +public value class ResponseIncludable(public val value: String) { + public companion object { + /** + * Include the search results of the file search tool call + */ + public val FileSearchCallResults: ResponseIncludable = ResponseIncludable("file_search_call.results") + + /** + * Include image urls from the input message + */ + public val MessageInputImageUrl: ResponseIncludable = ResponseIncludable("message.input_image.image_url") + + /** + * Include image urls from the computer call output + */ + public val ComputerCallOutputImageUrl: ResponseIncludable = ResponseIncludable("computer_call_output.output.image_url") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseInput.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseInput.kt new file mode 100644 index 00000000..7171bc80 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseInput.kt @@ -0,0 +1,60 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.responses.ResponseInput.ListInput +import com.aallam.openai.api.responses.ResponseInput.TextInput +import kotlinx.serialization.DeserializationStrategy +import kotlinx.serialization.Serializable +import kotlinx.serialization.SerializationException +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonContentPolymorphicSerializer +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonPrimitive +import kotlin.jvm.JvmInline + +/** + * Text, image, or file inputs to the model, used to generate a response. + * + * Can be either a simple text string or a list of messages. + */ +@Serializable(with = InputSerializer::class) +public sealed interface ResponseInput { + /** + * A text input to the model, equivalent to a text input with the `user` role. + */ + @Serializable + @JvmInline + public value class TextInput(public val value: String) : ResponseInput + + /** + * A list of chat messages as input to the model. + */ + @Serializable + @JvmInline + public value class ListInput(public val values: List) : ResponseInput + + public companion object { + /** + * Create a text input from a string. + */ + public fun from(text: String): ResponseInput = TextInput(text) + + /** + * Create an input list from a list of items. + */ + public fun from(items: List): ResponseInput = ListInput(items) + } +} + +/** + * Custom serializer for Input that handles direct string or array serialization. + */ +internal class InputSerializer : JsonContentPolymorphicSerializer(ResponseInput::class) { + + override fun selectDeserializer(element: JsonElement): DeserializationStrategy { + return when (element) { + is JsonPrimitive -> TextInput.serializer() + is JsonArray -> ListInput.serializer() + else -> throw SerializationException("Unsupported JSON element: $element") + } + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseItem.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseItem.kt new file mode 100644 index 00000000..69b449c4 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseItem.kt @@ -0,0 +1,6 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable + +@Serializable +public sealed interface ResponseItem diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseOutput.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseOutput.kt new file mode 100644 index 00000000..11acc957 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseOutput.kt @@ -0,0 +1,299 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * A single output item in the response + */ +@Serializable +public sealed interface ResponseOutput : ResponseItem { + + /** + * The ID of the output item. + * Will always be populated when coming from the API. It is optional here, so you can construct your own ResponseMessages. + */ + @SerialName("id") + public val id: String? + + /** + * The status of the item, one of "in_progress", "completed", or "incomplete". + * Will always be populated when coming from the AP. It is optional here, so you can construct your own OutputMessages + */ + @SerialName("status") + public val status: ResponseStatus? +} + + +/** + * A message input to the model with a role indicating instruction following hierarchy. Instructions given with the developer or system role take precedence over instructions given with the user role. Messages with the assistant role are presumed to have been generated by the model in previous interactions. + * + */ +@Serializable +@SerialName("message") +public data class ResponseMessage( + + + /** + * The role of the author of this message. + */ + @SerialName("role") public val role: ResponseRole, + + /** + * A list of one or many input items to the model, containing different content types. + * + * Important: + * If the role is "Assistant", only ResponseOutputText and Refusal are allowed in the content. + * If the role is "System", only ResponseInputText is allowed in the content. + * If the role is "User", only ResponseInputText, ResponseInputImage, + * and ResponseInputFile are allowed in the content. + * + * Note: If we were to implement this with proper polymorphism, + * serialization breaks because of the common "message" type. + */ + @SerialName("content") public val content: List = emptyList(), + + + /** + * The unique ID of the input message. + */ + @SerialName("id") public override val id: String? = null, + + /** + * The status of item. One of in_progress, completed, or incomplete. Populated when items are returned via API. + */ + @SerialName("status") public override val status: ResponseStatus? = null, +) : ResponseOutput + +/** + * Represents a chat message part. + */ +@Serializable +public sealed interface ResponseContent + +/** + * Text output from the model + */ +@Serializable +@SerialName("output_text") +public data class ResponseOutputText( + + /** + * The text output from the model. + */ + @SerialName("text") + val text: String, + + /** + * The annotations of the text output. + */ + @SerialName("annotations") + val annotations: List = emptyList() +) : ResponseContent + +/** + * Refusal message from the model + */ +@Serializable +@SerialName("refusal") +public data class Refusal( + + /** + * The refusal explanation from the model. + */ + @SerialName("refusal") + val refusal: String +) : ResponseContent + +/** + * A text input to the model. + * + * @param text the text content. + */ +@Serializable +@SerialName("input_text") +public data class ResponseInputText(@SerialName("text") val text: String) : ResponseContent + +/** + * An image input to the model. + * + * @param imageUrl the image url. + */ +@Serializable +@SerialName("input_image") +public data class ResponseInputImage( + /** + * The detail level of the image to be sent to the model. One of high, low, or auto. Defaults to auto. + * */ + @SerialName("detail") val detail: ImageDetail? = null, + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL. + * */ + @SerialName("image_url") val imageUrl: String? = null, + /** + * The ID of the file to be sent to the model. + */ + @SerialName("file_id") val fileId: String? = null, +) : ResponseContent + + +/** + * The detail level of the image to be sent to the model. + */ +@JvmInline +@Serializable +public value class ImageDetail(public val value: String) { + public companion object { + public val High: ImageDetail = ImageDetail("high") + public val Low: ImageDetail = ImageDetail("low") + public val Auto: ImageDetail = ImageDetail("auto") + } +} + +/** + * A file input to the model. + */ +@Serializable +@SerialName("input_file") +public data class ResponseInputFile( + + /** + * The content of the file to be sent to the model. + * + */ + @SerialName("file_data") val fileData: String? = null, + + /** + * The ID of the file to be sent to the model. + */ + @SerialName("file_id") val fileId: String? = null, + + /** + * The name of the file to be sent to the model. + */ + @SerialName("filename") val fileName: String? = null, +) : ResponseContent + + +//TODO add input_audio (when available) + + +/** + * An annotation in text output + */ +@Serializable +public sealed interface Annotation + +/** + * A citation to a file. + */ +@Serializable +@SerialName("file_citation") +public data class FileCitation( + /** + * The ID of the file. + * + */ + @SerialName("file_id") + val fileId: String, + + /** + * The index of the file in the list of files. + */ + @SerialName("index") + val index: Int + +) : Annotation + +/** + * A citation for a web resource used to generate a model response. + */ +@Serializable +@SerialName("url_citation") +public data class UrlCitation( + + /** + * The title of the web resource. + */ + @SerialName("title") + val title: String, + + /** + * The URL of the web resource. + */ + @SerialName("url") + val url: String, + + /** + * The index of the first character of the URL citation in the message. + */ + @SerialName("start_index") + val startIndex: Int, + + /** + * The index of the last character of the URL citation in the message. + */ + @SerialName("end_index") + val endIndex: Int +) : Annotation + +/** + * A path to a file. + */ +@Serializable +@SerialName("file_path") +public data class FilePath( + + /** + * The ID of the file. + */ + @SerialName("file_id") + val fileId: String, + + /** + * The index of the file in the list of files. + */ + @SerialName("index") + val index: Int +) : Annotation + + +/** + * Reasoning item for model reasoning + */ +@Serializable +@SerialName("reasoning") +public data class Reasoning( + /** + * The unique ID of the reasoning item. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the reasoning item. + */ + @SerialName("status") + override val status: ResponseStatus, + + /** + * Reasoning text contents. + */ + @SerialName("summary") + val summary: List +) : ResponseOutput + +/** + * A summary text item in the reasoning output + */ +@Serializable +@SerialName("summary_text") +public data class SummaryText( + + /** + * A short summary of the reasoning used by the model. + */ + @SerialName("text") + val text: String +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRequest.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRequest.kt new file mode 100644 index 00000000..d438b503 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRequest.kt @@ -0,0 +1,225 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.OpenAIDsl +import com.aallam.openai.api.model.ModelId +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** Request object for the OpenAI Responses API */ +@Serializable +public data class ResponseRequest( + + /** + * Text, image, or file inputs to the model, used to generate a response. + */ + @SerialName("input") val input: ResponseInput, + + /** + * Model ID used to generate the response, like gpt-4o or o1. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the model guide to browse and compare available models. + */ + @SerialName("model") val model: ModelId, + + /** Specify additional output data to include in the model response. */ + @SerialName("include") val include: List? = null, + + /** + * Inserts a system (or developer) message as the first item in the model's context. + * + * When using along with previous_response_id, the instructions from a previous response will not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. + */ + @SerialName("instructions") val instructions: String? = null, + + /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */ + @SerialName("max_output_tokens") val maxOutputTokens: Int? = null, + + /** + * Set of key-value pairs that can be attached to an object. This can be + * useful for storing additional information about the object in a structured + * format, and querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings + * with a maximum length of 512 characters. + * */ + @SerialName("metadata") val metadata: Map? = null, + + /** Whether to allow the model to run tool calls in parallel. */ + @SerialName("parallel_tool_calls") val parallelToolCalls: Boolean? = null, + + /** The unique ID of the previous response to the model. Use this to create multi-turn conversations. */ + @SerialName("previous_response_id") val previousResponseId: String? = null, + + /** Configuration for reasoning models. */ + @SerialName("reasoning") val reasoning: ReasoningConfig? = null, + + /** Whether to store the generated model response for later retrieval via API.*/ + @SerialName("store") val store: Boolean? = null, + + /** + * If set to true, the model response data will be streamed to the client as it is generated using server-sent events. See the Streaming section below for more information. + */ + @SerialName("stream") val stream: Boolean? = null, + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. + */ + @SerialName("temperature") val temperature: Double? = null, + + /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */ + @SerialName("text") val text: ResponseTextConfig? = null, + + + /** How the model should select which tool (or tools) to use when generating a response. See the tools parameter to see how to specify which tools the model can call. */ + @SerialName("tool_choice") val toolChoice: ResponseToolChoiceConfig? = null, + + + /** + * An array of tools the model may call while generating a response. You can specify which tool to use by setting the tool_choice parameter. + + The two categories of tools you can provide the model are: + + Built-in tools: Tools that are provided by OpenAI that extend the model's capabilities, like web search or file search. Learn more about built-in tools. + Function calls (custom tools): Functions that are defined by you, enabling the model to call your own code. Learn more about function calling. + */ + @SerialName("tools") val tools: List? = null, + + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @SerialName("top_p") val topP: Double? = null, + + /** + * The truncation strategy to use for the model response. + * - `auto`: If the context exceeds the model's context window size, the model will truncate + * the response by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size, + * the request will fail with a 400 error. + */ + @SerialName("truncation") val truncation: Truncation? = null, + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + */ + @SerialName("user") val user: String? = null +) + +/** Builder for ResponseRequest objects */ +@OpenAIDsl +public class ResponseRequestBuilder { + /** ID of the model to use */ + public var model: ModelId? = null + + /** The input to the model */ + public var input: ResponseInput? = null + + /** Specify additional output data to include in the model response */ + public var include: List? = null + + /** Instructions for the model */ + public var instructions: String? = null + + /** Maximum number of tokens to generate */ + public var maxOutputTokens: Int? = null + + /** Custom metadata */ + public var metadata: Map? = null + + /** Whether to allow parallel tool calls */ + public var parallelToolCalls: Boolean? = null + + /** ID of a previous response to continue from */ + public var previousResponseId: String? = null + + /** Reasoning configuration */ + public var reasoning: ReasoningConfig? = null + + /** Whether to store the response */ + public var store: Boolean? = null + + /** Whether to stream the response */ + public var stream: Boolean? = null + + /** Sampling temperature */ + public var temperature: Double? = null + + /** Text response configuration */ + public var text: ResponseTextConfig? = null + + /** Tool choice configuration */ + public var toolChoice: ResponseToolChoiceConfig? = null + + /** Tools that the model may use */ + public var tools: MutableList? = null + + /** Top-p sampling parameter */ + public var topP: Double? = null + + /** + * Truncation configuration + * - `auto`: If the context exceeds the model's context window size, the model will truncate + * the response by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size, + * the request will fail with a 400 error. + */ + public var truncation: Truncation? = null + + /** End-user identifier */ + public var user: String? = null + + /** Add a tool to the request */ + public fun tool(tool: ResponseTool) { + if (tools == null) { + tools = mutableListOf() + } + tools?.add(tool) + } + + /** Add multiple tools to the request */ + public fun tools(init: MutableList.() -> Unit) { + if (tools == null) { + tools = mutableListOf() + } + tools?.init() + } + + /** Add an includable option */ + public fun include(includable: ResponseIncludable) { + include = include.orEmpty() + includable + } + + /** Build the ResponseRequest object */ + public fun build(): ResponseRequest { + requireNotNull(model) { "Model must be set" } + requireNotNull(input) { "Input must be set" } + + return ResponseRequest( + input = input!!, + model = model!!, + include = include, + instructions = instructions, + maxOutputTokens = maxOutputTokens, + metadata = metadata, + parallelToolCalls = parallelToolCalls, + previousResponseId = previousResponseId, + reasoning = reasoning, + store = store, + stream = stream, + temperature = temperature, + text = text, + toolChoice = toolChoice, + tools = tools, + topP = topP, + truncation = truncation, + user = user + ) + } +} + +/** Creates a new ResponseRequest using a builder DSL */ +public fun responseRequest(init: ResponseRequestBuilder.() -> Unit): ResponseRequest { + val builder = ResponseRequestBuilder() + builder.init() + return builder.build() +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRole.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRole.kt new file mode 100644 index 00000000..dc1bd07f --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseRole.kt @@ -0,0 +1,8 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.core.Role + +/** + * The role of the author of a message. + */ +public typealias ResponseRole = Role diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseStatus.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseStatus.kt new file mode 100644 index 00000000..e26d8c1a --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseStatus.kt @@ -0,0 +1,17 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Status of an output item + */ +@JvmInline +@Serializable +public value class ResponseStatus(public val value: String) { + public companion object { + public val InProgress: ResponseStatus = ResponseStatus("in_progress") + public val Completed: ResponseStatus = ResponseStatus("completed") + public val Incomplete: ResponseStatus = ResponseStatus("incomplete") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTextConfig.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTextConfig.kt new file mode 100644 index 00000000..488bd0a2 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTextConfig.kt @@ -0,0 +1,76 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonObject + +/** Configuration for text responses */ +@Serializable +public data class ResponseTextConfig( + /** The format to use for text responses */ + @SerialName("format") val format: TextResponseFormatConfiguration +) + +/** + * Configuration for text response format + */ +@Serializable +public sealed interface TextResponseFormatConfiguration + +/** + * Plain text format - default response format. + * Used to generate text responses. + */ +@Serializable +@SerialName("text") +public data object TextFormat : TextResponseFormatConfiguration + +/** + * JSON object response format. An older method of generating JSON responses. + * Using `json_schema` is recommended for models that support it. + * Note that the model will not generate JSON without a system or user message + * instructing it to do so. + */ +@Serializable +@SerialName("json_object") +public data object JsonObjectFormat : TextResponseFormatConfiguration + +/** + * JSON Schema response format. Used to generate structured JSON responses. + */ +@Serializable +@SerialName("json_schema") +public data class JsonSchemaFormat( + /** Structured Outputs configuration options, including a JSON Schema */ + @SerialName("json_schema") val jsonSchema: ResponseJsonSchema +) : TextResponseFormatConfiguration + +/** + * Structured Outputs configuration options, including a JSON Schema + */ +@Serializable +public data class ResponseJsonSchema( + /** + * A description of what the response format is for, used by the model to + * determine how to respond in the format. + */ + @SerialName("description") val description: String? = null, + + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain + * underscores and dashes, with a maximum length of 64. + */ + @SerialName("name") val name: String? = null, + + /** + * The schema for the response format, described as a JSON Schema object. + */ + @SerialName("schema") val schema: JsonObject, + + /** + * Whether to enable strict schema adherence when generating the output. + * If set to true, the model will always follow the exact schema defined + * in the `schema` field. + */ + @SerialName("strict") val strict: Boolean? = null +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTool.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTool.kt new file mode 100644 index 00000000..9db6555e --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseTool.kt @@ -0,0 +1,780 @@ +package com.aallam.openai.api.responses + +import com.aallam.openai.api.core.Parameters +import com.aallam.openai.api.responses.ResponseTool.* +import com.aallam.openai.api.vectorstore.VectorStoreId +import kotlinx.serialization.KSerializer +import kotlinx.serialization.Required +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.descriptors.buildClassSerialDescriptor +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.encoding.Encoder +import kotlinx.serialization.json.* +import kotlin.jvm.JvmInline + +/** + * An array of tools the model may call while generating a response. + */ +@Serializable +public sealed interface ResponseTool { + + /** + * A tool that searches for relevant content from uploaded files. + */ + @Serializable + @SerialName("file_search") + public data class FileSearch( + /** + * The IDs of the vector stores to search. + */ + @SerialName("vector_store_ids") + val vectorStoreIds: List = emptyList(), + /** + * A filter to apply based on file attributes. + */ + @SerialName("filters") + val filters: FileSearchFilter? = null, + + /** + * Ranking options for search. + */ + @SerialName("ranking_options") + val rankingOptions: FileSearchRankingOptions? = null, + + /** + * The maximum number of results to return. This number should be between 1 and 50 inclusive. + */ + @SerialName("max_num_results") + val maxNumResults: Int? = null, + ) : ResponseTool + + + @Serializable( + with = FileSearchFilterSerializer::class + ) + public sealed interface FileSearchFilter + + /** + * A filter used to compare a specified attribute key to a given value using a defined comparison operation. + */ + @Serializable + public data class ComparisonFilter( + + /** + * Specifies the comparison operator: eq, ne, gt, gte, lt, lte. + */ + @SerialName("type") + public val type: String, + + /** + * The key to compare against the value. + */ + @SerialName("key") + public val key: String, + + /** + * The value to compare the attribute key to. + */ + @SerialName("value") + public val value: String, + + ) : FileSearchFilter + + /** + * Combine multiple filters using 'and' or 'or'. + */ + @Serializable + public data class CompoundFilter( + /** + * The logical operator to use: 'and' or 'or'. + */ + @SerialName("type") + public val type: String, + + /** + * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter. + */ + @SerialName("filters") + public val filters: List, + + ) : FileSearchFilter + + /** + * Ranking options for search. + */ + @Serializable + public data class FileSearchRankingOptions( + /** + *The ranker to use for the file search. + * Defaults to "auto" + */ + @SerialName("ranker") + val ranker: String? = null, + + /** + * The score threshold for the file search, a number between 0 and 1. + * Numbers closer to 1 will attempt to return only the most relevant results, but may return fewer results. + * Defaults to 0 + */ + @SerialName("score_threshold") + val scoreThreshold: Int? = null, + ) + + + /** + * Web search tool (preview) + */ + @Serializable + @SerialName("web_search_preview") + public data class WebSearchPreview( + /** + * Approximate location parameters for the search. + */ + @SerialName("user_location") + val userLocation: WebSearchLocation? = null, + + /** + * High level guidance for the amount of context window space to use for the search. + * One of 'low', 'medium', or 'high'. + * 'medium' is the default. + */ + @SerialName("search_context_size") + val searchContextSize: WebSearchContextSize? = null, + ) : ResponseTool + + /** + * Web search tool (preview 2025-03-11) + */ + @Serializable + @SerialName("web_search_preview_2025_03_11") + public data class WebSearchPreview2025( + + /** + * Approximate location parameters for the search. + */ + @SerialName("user_location") + val userLocation: WebSearchLocation? = null, + + /** + * High level guidance for the amount of context window space to use for the search. + * One of 'low', 'medium', or 'high'. + * 'medium' is the default. + */ + @SerialName("search_context_size") + val searchContextSize: WebSearchContextSize? = null, + ) : ResponseTool + + + /** + * Web search context size + */ + @JvmInline + @Serializable + public value class WebSearchContextSize(public val value: String) { + public companion object { + /** + * Low context size + */ + public val Low: WebSearchContextSize = WebSearchContextSize("low") + + /** + * Medium context size + */ + public val Medium: WebSearchContextSize = WebSearchContextSize("medium") + + /** + * High context size + */ + public val High: WebSearchContextSize = WebSearchContextSize("high") + } + } + + /** + * Web search location + */ + @Serializable + public data class WebSearchLocation( + /** + * Free text input for the city of the user, e.g., San Francisco. + */ + @SerialName("city") + val city: String? = null, + + /** + * The two-letter ISO-country code of the user, e.g., US. + */ + @SerialName("country") + val country: String? = null, + + /** + * Free text input for the region of the user, e.g., California. + */ + @SerialName("region") + val region: String? = null, + + /** + * The IANA time zone of the user, e.g., America/Los_Angeles. + */ + @SerialName("timezone") + val timezone: String? = null, + + ) { + /** + * The type of location approximation. Always approximate. + */ + @SerialName("type") + @Required + val type: String = "approximate" + } + + /** + * Computer tool for computational tasks (preview) + */ + @Serializable + @SerialName("computer_use_preview") + public data class ComputerUsePreview( + + /** + * The width of the computer display + */ + @SerialName("display_width") + val displayWidth: Int, + + /** + * The height of the computer display + */ + @SerialName("display_height") + val displayHeight: Int, + + /** + * The type of computer environment to control + */ + @SerialName("environment") + val environment: String + ) : ResponseTool + + /** + * Function tool for function calling + */ + @Serializable + @SerialName("function") + public data class ResponseFunctionTool( + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum + * length of 64. + */ + @SerialName("name") val name: String, + + /** + * The parameters the functions accept, described as a JSON Schema object. + * See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, + * and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about + * the format. + * + * Omitting `parameters` defines a function with an empty parameter list. + */ + @SerialName("parameters") val parameters: Parameters? = null, + + /** + * A description of what the function does, used by the model to choose when and how to call the function. + */ + @SerialName("description") val description: String? = null, + + /** + * Whether to enforce strict parameter validation. Default true. + */ + @SerialName("strict") val strict: Boolean? = null, + ) : ResponseTool +} + +internal class FileSearchFilterSerializer : KSerializer { + + override val descriptor = buildClassSerialDescriptor("FileSearchFilter") + + override fun serialize(encoder: Encoder, value: FileSearchFilter) { + val jsonEncoder = encoder as? JsonEncoder + ?: throw IllegalArgumentException("This serializer can only be used with JSON") + + when (value) { + is ComparisonFilter -> ComparisonFilter.serializer().serialize(jsonEncoder, value) + is CompoundFilter -> CompoundFilter.serializer().serialize(jsonEncoder, value) + } + } + + override fun deserialize(decoder: Decoder): FileSearchFilter { + val jsonDecoder = decoder as? JsonDecoder + ?: throw IllegalArgumentException("This serializer can only be used with JSON") + + return when (val type = jsonDecoder.decodeJsonElement().jsonObject["type"]?.jsonPrimitive?.content) { + "and" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "or" -> CompoundFilter.serializer().deserialize(jsonDecoder) + "eq" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "ne" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "gt" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "gte" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "lt" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + "lte" -> ComparisonFilter.serializer().deserialize(jsonDecoder) + else -> throw IllegalArgumentException("Unknown filter type: $type") + } + } + +} + +/** + * Computer action for the computer use tool + */ +@Serializable +public sealed interface ComputerAction + +/** + * A click action + */ +@Serializable +@SerialName("click") +public data class Click( + + /** + * The mouse button used for the click. + * One of "left", "right", "wheel", "back", or "forward" + */ + @SerialName("button") + val button: String, + + /** + * The x-coordinate where the click occurred + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate where the click occurred + */ + @SerialName("y") + val y: Int +) : ComputerAction + +/** + * A double click action + */ +@Serializable +@SerialName("double_click") +public data class DoubleClick( + + /** + * The x-coordinate where the double click occurred + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate where the double click occurred + */ + @SerialName("y") + val y: Int +) : ComputerAction + +/** + * A drag action + */ +@Serializable +@SerialName("drag") +public data class Drag( + + /** + * An array of coordinates representing the path of the drag action + */ + @SerialName("path") + val path: List +) : ComputerAction + +/** + * A keypress action + */ +@Serializable +@SerialName("keypress") +public data class KeyPress( + + /** + * The combination of keys to press + */ + @SerialName("keys") + val keys: List +) : ComputerAction + +/** + * A move action + */ +@Serializable +@SerialName("move") +public data class Move( + + /** + * The x-coordinate to move to + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate to move to + */ + @SerialName("y") + val y: Int +) : ComputerAction + +/** + * A screenshot action + */ +@Serializable +@SerialName("screenshot") +public data object Screenshot : ComputerAction + +/** + * A scroll action + */ +@Serializable +@SerialName("scroll") +public data class Scroll( + + /** + * The x-coordinate where the scroll occurred + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate where the scroll occurred + */ + @SerialName("y") + val y: Int, + + /** + * The horizontal scroll distance + */ + @SerialName("scroll_x") + val scrollX: Int, + + /** + * The vertical scroll distance + */ + @SerialName("scroll_y") + val scrollY: Int +) : ComputerAction + +/** + * A typing action + */ +@Serializable +@SerialName("type") +public data class Type( + + /** + * The text to type + */ + @SerialName("text") + val text: String +) : ComputerAction + +/** + * A wait action + */ +@Serializable +@SerialName("wait") +public data object Wait : ComputerAction + +/** + * A coordinate pair (x, y) + */ +@Serializable +public data class Coordinate( + /** + * The x-coordinate + */ + @SerialName("x") + val x: Int, + + /** + * The y-coordinate + */ + @SerialName("y") + val y: Int +) + + +/** + * File search tool call in a response + */ +@Serializable +@SerialName("file_search_call") +public data class FileSearchToolCall( + /** + * The unique ID of the file search tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the file search tool call. + */ + @SerialName("status") + override val status: ResponseStatus, + + /** + * The queries used to search for files. + */ + @SerialName("queries") + val queries: List, + + /** + * The results of the file search tool call. + */ + @SerialName("results") + val results: List? = null +) : ResponseOutput + +/** + * Result of a file search + */ +@Serializable +public data class FileSearchResult( + /** + * The ID of the file + */ + @SerialName("file_id") + val fileId: String, + + /** + * The text content from the file + */ + @SerialName("text") + val text: String, + + /** + * The filename + */ + @SerialName("filename") + val filename: String, + + /** + * The score or relevance rating + */ + @SerialName("score") + val score: Double +) + +/** + * Function tool call in a response + */ +@Serializable +@SerialName("function_call") +public data class FunctionToolCall( + /** + * The unique ID of the function tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the function tool call. + */ + @SerialName("status") + override val status: ResponseStatus, + + /** + * The unique ID of the function tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * The name of the function to run. + */ + @SerialName("name") + val name: String, + + /** + * A JSON string of the arguments to pass to the function. + */ + @SerialName("arguments") + val arguments: String, +) : ResponseOutput { + + /** + * Decodes the [arguments] JSON string into a JsonObject. + * If [arguments] is null, the function will return null. + * + * @param json The Json object to be used for decoding, defaults to a default Json instance + */ + public fun argumentsAsJson(json: Json = Json): JsonObject = json.decodeFromString(arguments) + +} + +/** + * The output of a function tool call. + * + */ +@Serializable +@SerialName("function_call_output") +public data class FunctionToolCallOutput( + + /** + * The unique ID of the function tool call output. Populated when this item is returned via API. + */ + @SerialName("id") + val id: String? = null, + + /** + * The unique ID of the function tool call generated by the model. + */ + @SerialName("call_id") + val callId: String, + + /** + * A JSON string of the output of the function tool call. + */ + @SerialName("output") + val output: String, + + /** + * The status of the item. One of in_progress, completed, or incomplete. Populated when items are returned via API. + */ + @SerialName("status") + val status: ResponseStatus? = null +) : ResponseItem + +/** + * Web search tool call in a response + */ +@Serializable +@SerialName("web_search_call") +public data class WebSearchToolCall( + /** + * The unique ID of the web search tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the web search tool call. + */ + @SerialName("status") + override val status: ResponseStatus +) : ResponseOutput + +/** + * Computer tool call in a response + */ +@Serializable +@SerialName("computer_call") +public data class ComputerToolCall( + /** + * The unique ID of the computer tool call. + */ + @SerialName("id") + override val id: String, + + /** + * The status of the computer tool call. + */ + @SerialName("status") + override val status: ResponseStatus, + + /** + * An identifier used when responding to the tool call with output. + */ + @SerialName("call_id") + val callId: String, + + /** + * The action to be performed + */ + @SerialName("action") + val action: ComputerAction, + + /** + * The pending safety checks for the computer call. + */ + @SerialName("pending_safety_checks") + val pendingSafetyChecks: List = emptyList() +) : ResponseOutput + +/** + * A safety check for a computer call + */ +@Serializable +public data class SafetyCheck( + /** + * The ID of the safety check + */ + @SerialName("id") + val id: String, + + /** + * The type code of the safety check + */ + @SerialName("code") + val code: String, + + /** + * The message about the safety check + */ + @SerialName("message") + val message: String +) + +/** + * The output of a computer tool call. + */ +@Serializable +@SerialName("computer_call_output") +public data class ComputerToolCallOutput( + /** + * The unique ID of the computer tool call output. + */ + @SerialName("id") + val id: String? = null, + + /** + * The ID of the computer tool call that produced the output. + */ + @SerialName("call_id") + val callId: String, + + /** + * A computer screenshot image used with the computer use tool. + */ + @SerialName("output") + val output: ComputerScreenshot, + + /** + * The safety checks reported by the API that have been acknowledged by the developer. + */ + @SerialName("acknowledged_safety_checks") + val acknowledgedSafetyChecks: List = emptyList(), + + /** + * The status of the item. One of in_progress, completed, or incomplete. Populated when items are returned via API. + */ + @SerialName("status") + val status: ResponseStatus? = null +) : ResponseItem + +/** + * A computer screenshot image used with the computer use tool. + */ +@Serializable +@SerialName("computer_screenshot") +public data class ComputerScreenshot( + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + @SerialName("file_id") + val fileId: String? = null, + + /** + * The URL of the screenshot image. + */ + @SerialName("image_url") + val imageUrl: String? = null +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseToolChoiceConfig.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseToolChoiceConfig.kt new file mode 100644 index 00000000..90a4b8db --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseToolChoiceConfig.kt @@ -0,0 +1,92 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonContentPolymorphicSerializer +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlin.jvm.JvmInline + +/** + * Controls which (if any) tool is called by the model in the Responses API. + */ +@Serializable(with = ResponseToolChoiceSerializer::class) +public sealed interface ResponseToolChoiceConfig { + + /** + * Represents a tool choice mode. + * - `"none"` means the model will not call any tool and instead generates a message. + * - `"auto"` means the model can pick between generating a message or calling one or more tools. + * - `"required"` means the model must call one or more tools. + */ + @JvmInline + @Serializable + public value class Mode(public val value: String) : ResponseToolChoiceConfig + + /** + * Specifies a specific tool the model should use. + */ + @Serializable + public data class Named( + /** + * The type of tool to use, either "function" or a built-in tool type + */ + @SerialName("type") public val type: String, + + /** + * The function details, only used when type is "function" + */ + @SerialName("function") public val function: FunctionToolChoice? = null, + ) : ResponseToolChoiceConfig + + public companion object { + /** Represents the `auto` mode. */ + public val Auto: ResponseToolChoiceConfig = Mode("auto") + + /** Represents the `none` mode. */ + public val None: ResponseToolChoiceConfig = Mode("none") + + /** Represents the `required` mode. */ + public val Required: ResponseToolChoiceConfig = Mode("required") + + /** Specifies a function for the model to call. */ + public fun function(name: String): ResponseToolChoiceConfig = + Named(type = "function", function = FunctionToolChoice(name = name)) + + /** Specifies a file search tool for the model to use. */ + public fun fileSearch(): ResponseToolChoiceConfig = Named(type = "file_search") + + /** Specifies a web search tool for the model to use. */ + public fun webSearch(): ResponseToolChoiceConfig = Named(type = "web_search_preview") + + /** Specifies a web search tool (preview 2025-03-11) for the model to use. */ + public fun webSearch2025(): ResponseToolChoiceConfig = Named(type = "web_search_preview_2025_03_11") + + /** Specifies a computer use tool for the model to use. */ + public fun computerUse(): ResponseToolChoiceConfig = Named(type = "computer_use_preview") + } +} + +/** + * Represents the function tool choice option. + */ +@Serializable +public data class FunctionToolChoice( + /** + * The name of the function to call. + */ + @SerialName("name") val name: String +) + +/** + * Serializer for [ResponseToolChoiceConfig]. + */ +internal class ResponseToolChoiceSerializer : + JsonContentPolymorphicSerializer(ResponseToolChoiceConfig::class) { + override fun selectDeserializer(element: JsonElement) = when (element) { + is JsonPrimitive -> ResponseToolChoiceConfig.Mode.serializer() + is JsonObject -> ResponseToolChoiceConfig.Named.serializer() + else -> throw IllegalArgumentException("Unknown element type: $element") + } +} diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseUsage.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseUsage.kt new file mode 100644 index 00000000..018c9ad1 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/ResponseUsage.kt @@ -0,0 +1,65 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable + +/** + * Represents token usage details including input tokens, output tokens, + * a breakdown of output tokens, and the total tokens used. + */ +@Serializable +public data class ResponseUsage( + /** + * The number of input tokens. + */ + @SerialName("input_tokens") + val inputTokens: Int, + + /** + * A detailed breakdown of the input tokens. + */ + @SerialName("input_tokens_details") + val inputTokensDetails: InputTokensDetails, + + /** + * The number of output tokens. + */ + @SerialName("output_tokens") + val outputTokens: Int, + + /** + * A detailed breakdown of the output tokens. + */ + @SerialName("output_tokens_details") + val outputTokensDetails: OutputTokensDetails, + + /** + * The total number of tokens used. + */ + @SerialName("total_tokens") + val totalTokens: Int +) + +/** + * A detailed breakdown of the input tokens. + */ +@Serializable +public data class InputTokensDetails( + /** + * The number of tokens that were retrieved from the cache. + */ + @SerialName("cached_tokens") + val cachedTokens: Int +) + +/** + * A detailed breakdown of the output tokens. + */ +@Serializable +public data class OutputTokensDetails( + /** + * The number of reasoning tokens. + */ + @SerialName("reasoning_tokens") + val reasoningTokens: Int +) diff --git a/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Truncation.kt b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Truncation.kt new file mode 100644 index 00000000..925d0714 --- /dev/null +++ b/openai-core/src/commonMain/kotlin/com.aallam.openai.api/responses/Truncation.kt @@ -0,0 +1,35 @@ +package com.aallam.openai.api.responses + +import kotlinx.serialization.Serializable +import kotlin.jvm.JvmInline + +/** + * Controls truncation behavior for the model + * + * - `auto`: If the context of this response and previous ones exceeds + * the model's context window size, the model will truncate the + * response to fit the context window by dropping input items in the + * middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window + * size for a model, the request will fail with a 400 error. + */ +@JvmInline +@Serializable +public value class Truncation(public val value: String) { + public companion object { + /** + * If the context of this response and previous ones exceeds + * the model's context window size, the model will truncate the + * response to fit the context window by dropping input items in the + * middle of the conversation. + */ + public val Auto: Truncation = Truncation("auto") + + /** + * If a model response will exceed the context window + * size for a model, the request will fail with a 400 error. + * This is the default. + */ + public val Disabled: Truncation = Truncation("disabled") + } +}