Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import dev.chungjungsoo.gptmobile.data.database.entity.Message
@Dao
interface MessageDao {

@Query("SELECT * FROM messages WHERE chat_id=:chatInt")
@Query("SELECT * FROM messages WHERE chat_id=:chatInt ORDER BY created_at ASC")
suspend fun loadMessages(chatInt: Int): List<Message>

@Insert
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package dev.chungjungsoo.gptmobile.data.database.entity
import androidx.room.ColumnInfo
import androidx.room.Entity
import androidx.room.ForeignKey
import androidx.room.Index
import androidx.room.PrimaryKey
import dev.chungjungsoo.gptmobile.data.model.ApiType

Expand All @@ -15,7 +16,8 @@ import dev.chungjungsoo.gptmobile.data.model.ApiType
childColumns = ["chat_id"],
onDelete = ForeignKey.CASCADE
)
]
],
indices = [Index(value = ["chat_id", "created_at"])]
)
data class Message(
@PrimaryKey(autoGenerate = true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,49 @@ class ChatRepositoryImpl @Inject constructor(
private val anthropic: AnthropicAPI
) : ChatRepository {

private lateinit var openAI: OpenAI
private lateinit var google: GenerativeModel
private lateinit var ollama: OpenAI
private lateinit var groq: OpenAI
// Configuration keys for caching clients
internal data class OpenAIClientConfig(val token: String, val baseUrl: String)
internal data class GoogleClientConfig(val token: String, val modelName: String) // Google client is tied to model name

// Caches for API clients
private val openAIClients = mutableMapOf<OpenAIClientConfig, OpenAI>()
private val googleClients = mutableMapOf<GoogleClientConfig, GenerativeModel>()
// Ollama and Groq use the OpenAI client, so they will share the openAIClients cache
// but need distinct keys if their configs differ beyond token/baseUrl (e.g. specific model quirks if any)
// For now, assuming they are distinguished by their baseUrl primarily.

private fun getOpenAIClient(token: String?, baseUrl: String): OpenAI {
val config = OpenAIClientConfig(token ?: "", baseUrl)
return openAIClients.getOrPut(config) {
OpenAI(config.token, host = OpenAIHost(baseUrl = config.baseUrl))
}
}

private fun getGoogleClient(token: String?, modelName: String?, systemPrompt: String?, temperature: Float?, topP: Float?): GenerativeModel {
val configKey = GoogleClientConfig(token ?: "", modelName ?: "") // Simplified key for lookup
// Actual config for creation uses all params
return googleClients.getOrPut(configKey) {
val genConfig = generationConfig {
this.temperature = temperature
this.topP = topP
}
GenerativeModel(
modelName = modelName ?: "",
apiKey = token ?: "",
systemInstruction = content { text(systemPrompt ?: ModelConstants.DEFAULT_PROMPT) },
generationConfig = genConfig,
safetySettings = listOf(
SafetySetting(HarmCategory.DANGEROUS_CONTENT, BlockThreshold.ONLY_HIGH),
SafetySetting(HarmCategory.SEXUALLY_EXPLICIT, BlockThreshold.NONE)
)
)
}
}
Comment on lines +67 to +86
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix Google client caching logic.

The caching key for Google clients only considers token and modelName, but the actual client creation uses additional parameters (systemPrompt, temperature, topP). This could lead to incorrect client reuse when these parameters differ.

Consider expanding the cache key to include all configuration parameters:

-internal data class GoogleClientConfig(val token: String, val modelName: String) // Google client is tied to model name
+internal data class GoogleClientConfig(
+    val token: String, 
+    val modelName: String,
+    val systemPrompt: String?,
+    val temperature: Float?,
+    val topP: Float?
+)

private fun getGoogleClient(token: String?, modelName: String?, systemPrompt: String?, temperature: Float?, topP: Float?): GenerativeModel {
-    val configKey = GoogleClientConfig(token ?: "", modelName ?: "") // Simplified key for lookup
+    val configKey = GoogleClientConfig(token ?: "", modelName ?: "", systemPrompt, temperature, topP)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
private fun getGoogleClient(token: String?, modelName: String?, systemPrompt: String?, temperature: Float?, topP: Float?): GenerativeModel {
val configKey = GoogleClientConfig(token ?: "", modelName ?: "") // Simplified key for lookup
// Actual config for creation uses all params
return googleClients.getOrPut(configKey) {
val genConfig = generationConfig {
this.temperature = temperature
this.topP = topP
}
GenerativeModel(
modelName = modelName ?: "",
apiKey = token ?: "",
systemInstruction = content { text(systemPrompt ?: ModelConstants.DEFAULT_PROMPT) },
generationConfig = genConfig,
safetySettings = listOf(
SafetySetting(HarmCategory.DANGEROUS_CONTENT, BlockThreshold.ONLY_HIGH),
SafetySetting(HarmCategory.SEXUALLY_EXPLICIT, BlockThreshold.NONE)
)
)
}
}
// Expand the cache key to include all relevant client parameters
internal data class GoogleClientConfig(
val token: String,
val modelName: String,
val systemPrompt: String?,
val temperature: Float?,
val topP: Float?
)
private fun getGoogleClient(
token: String?,
modelName: String?,
systemPrompt: String?,
temperature: Float?,
topP: Float?
): GenerativeModel {
val configKey = GoogleClientConfig(
token ?: "",
modelName ?: "",
systemPrompt,
temperature,
topP
)
// Actual config for creation uses all params
return googleClients.getOrPut(configKey) {
val genConfig = generationConfig {
this.temperature = temperature
this.topP = topP
}
GenerativeModel(
modelName = modelName ?: "",
apiKey = token ?: "",
systemInstruction = content { text(systemPrompt ?: ModelConstants.DEFAULT_PROMPT) },
generationConfig = genConfig,
safetySettings = listOf(
SafetySetting(HarmCategory.DANGEROUS_CONTENT, BlockThreshold.ONLY_HIGH),
SafetySetting(HarmCategory.SEXUALLY_EXPLICIT, BlockThreshold.NONE)
)
)
}
}
🤖 Prompt for AI Agents
In
app/src/main/kotlin/dev/chungjungsoo/gptmobile/data/repository/ChatRepositoryImpl.kt
around lines 67 to 86, the cache key for googleClients only includes token and
modelName, but the client creation depends also on systemPrompt, temperature,
and topP. To fix this, modify the cache key to include all these parameters so
that clients are correctly cached and reused only when all configuration
parameters match.



override suspend fun completeOpenAIChat(question: Message, history: List<Message>): Flow<ApiState> {
val platform = checkNotNull(settingRepository.fetchPlatforms().firstOrNull { it.name == ApiType.OPENAI })
openAI = OpenAI(platform.token ?: "", host = OpenAIHost(baseUrl = platform.apiUrl))
val currentOpenAIClient = getOpenAIClient(platform.token, platform.apiUrl)

val generatedMessages = messageToOpenAICompatibleMessage(ApiType.OPENAI, history + listOf(question))
val generatedMessageWithPrompt = listOf(
Expand All @@ -66,7 +101,7 @@ class ChatRepositoryImpl @Inject constructor(
topP = platform.topP?.toDouble()
)

return openAI.chatCompletions(chatCompletionRequest)
return currentOpenAIClient.chatCompletions(chatCompletionRequest)
.map<ChatCompletionChunk, ApiState> { chunk -> ApiState.Success(chunk.choices.getOrNull(0)?.delta?.content ?: "") }
.catch { throwable -> emit(ApiState.Error(throwable.message ?: "Unknown error")) }
.onStart { emit(ApiState.Loading) }
Expand Down Expand Up @@ -104,23 +139,18 @@ class ChatRepositoryImpl @Inject constructor(

override suspend fun completeGoogleChat(question: Message, history: List<Message>): Flow<ApiState> {
val platform = checkNotNull(settingRepository.fetchPlatforms().firstOrNull { it.name == ApiType.GOOGLE })
val config = generationConfig {
temperature = platform.temperature
val currentGoogleClient = getGoogleClient(
token = platform.token,
modelName = platform.model,
systemPrompt = platform.systemPrompt,
temperature = platform.temperature,
topP = platform.topP
}
google = GenerativeModel(
modelName = platform.model ?: "",
apiKey = platform.token ?: "",
systemInstruction = content { text(platform.systemPrompt ?: ModelConstants.DEFAULT_PROMPT) },
generationConfig = config,
safetySettings = listOf(
SafetySetting(HarmCategory.DANGEROUS_CONTENT, BlockThreshold.ONLY_HIGH),
SafetySetting(HarmCategory.SEXUALLY_EXPLICIT, BlockThreshold.NONE)
)
)

val inputContent = messageToGoogleMessage(history)
val chat = google.startChat(history = inputContent)
// For Google's SDK, startChat is on the client instance and returns a Chat object.
// The client itself is cached, but startChat would be called per logical session.
val chat = currentGoogleClient.startChat(history = inputContent)

return chat.sendMessageStream(question.content)
.map<GenerateContentResponse, ApiState> { response -> ApiState.Success(response.text ?: "") }
Expand All @@ -131,7 +161,7 @@ class ChatRepositoryImpl @Inject constructor(

override suspend fun completeGroqChat(question: Message, history: List<Message>): Flow<ApiState> {
val platform = checkNotNull(settingRepository.fetchPlatforms().firstOrNull { it.name == ApiType.GROQ })
groq = OpenAI(platform.token ?: "", host = OpenAIHost(baseUrl = platform.apiUrl))
val currentGroqClient = getOpenAIClient(platform.token, platform.apiUrl)

val generatedMessages = messageToOpenAICompatibleMessage(ApiType.GROQ, history + listOf(question))
val generatedMessageWithPrompt = listOf(
Expand All @@ -144,7 +174,7 @@ class ChatRepositoryImpl @Inject constructor(
topP = platform.topP?.toDouble()
)

return groq.chatCompletions(chatCompletionRequest)
return currentGroqClient.chatCompletions(chatCompletionRequest)
.map<ChatCompletionChunk, ApiState> { chunk -> ApiState.Success(chunk.choices.getOrNull(0)?.delta?.content ?: "") }
.catch { throwable -> emit(ApiState.Error(throwable.message ?: "Unknown error")) }
.onStart { emit(ApiState.Loading) }
Expand All @@ -153,7 +183,9 @@ class ChatRepositoryImpl @Inject constructor(

override suspend fun completeOllamaChat(question: Message, history: List<Message>): Flow<ApiState> {
val platform = checkNotNull(settingRepository.fetchPlatforms().firstOrNull { it.name == ApiType.OLLAMA })
ollama = OpenAI(platform.token ?: "", host = OpenAIHost(baseUrl = "${platform.apiUrl}v1/"))
// Ensure Ollama's specific path suffix is handled if needed, or make baseUrl more specific in settings
val baseUrl = if (platform.apiUrl.endsWith("/v1/")) platform.apiUrl else "${platform.apiUrl}v1/"
val currentOllamaClient = getOpenAIClient(platform.token, baseUrl)

val generatedMessages = messageToOpenAICompatibleMessage(ApiType.OLLAMA, history + listOf(question))
val generatedMessageWithPrompt = listOf(
Expand All @@ -166,7 +198,7 @@ class ChatRepositoryImpl @Inject constructor(
topP = platform.topP?.toDouble()
)

return ollama.chatCompletions(chatCompletionRequest)
return currentOllamaClient.chatCompletions(chatCompletionRequest)
.map<ChatCompletionChunk, ApiState> { chunk -> ApiState.Success(chunk.choices.getOrNull(0)?.delta?.content ?: "") }
.catch { throwable -> emit(ApiState.Error(throwable.message ?: "Unknown error")) }
.onStart { emit(ApiState.Loading) }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,10 +198,11 @@ fun ChatScreen(
modifier = Modifier.padding(innerPadding),
state = listState
) {
groupedMessages.keys.sorted().forEach { key ->
if (key % 2 == 0) {
groupedMessages.keys.sorted().forEach { groupKey ->
val currentGroup = groupedMessages[groupKey]!!
if (groupKey % 2 == 0) {
// User
item {
item(key = "user-${currentGroup[0].id}") {
Row(
modifier = Modifier
.fillMaxWidth()
Expand All @@ -219,20 +220,22 @@ fun ChatScreen(
}
} else {
// Assistant
item {
// Use a stable key for the group of assistant messages.
// Combining chatId and createdAt of the first message in the group for uniqueness.
item(key = "assistant-group-${currentGroup[0].chatId}-${currentGroup[0].createdAt}") {
Row(
modifier = Modifier
.fillMaxWidth()
.horizontalScroll(chatBubbleScrollStates[(key - 1) / 2])
.horizontalScroll(chatBubbleScrollStates[(groupKey - 1) / 2])
) {
Spacer(modifier = Modifier.width(8.dp))
groupedMessages[key]!!.sortedBy { it.platformType }.forEach { m ->
currentGroup.sortedBy { it.platformType }.forEach { m ->
m.platformType?.let { apiType ->
OpponentChatBubble(
modifier = Modifier
.padding(horizontal = 8.dp, vertical = 12.dp)
.widthIn(max = maximumChatBubbleWidth),
canRetry = canUseChat && isIdle && key >= latestMessageIndex,
canRetry = canUseChat && isIdle && groupKey >= latestMessageIndex,
isLoading = false,
apiType = apiType,
text = m.content,
Expand All @@ -248,7 +251,7 @@ fun ChatScreen(
}

if (!isIdle) {
item {
item(key = "live-user-${userMessage.createdAt}") {
Row(
modifier = Modifier
.fillMaxWidth()
Expand All @@ -265,13 +268,15 @@ fun ChatScreen(
}
}

item {
item(key = "live-assistant-group-${userMessage.createdAt}") {
Row(
modifier = Modifier
.fillMaxWidth()
.horizontalScroll(chatBubbleScrollStates[(latestMessageIndex + 1) / 2])
) {
Spacer(modifier = Modifier.width(8.dp))
// Individual live assistant bubbles are part of this single item's content.
// Keys for them are not LazyColumn keys but could be useful if this Row became a LazyRow.
chatViewModel.enabledPlatformsInChat.sorted().forEach { apiType ->
val message = when (apiType) {
ApiType.OPENAI -> openAIMessage
Expand Down
Loading
Loading