diff --git a/ollama-client/ollama-client-core/src/commonMain/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequest.kt b/ollama-client/ollama-client-core/src/commonMain/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequest.kt index 9227c76..18f43fe 100644 --- a/ollama-client/ollama-client-core/src/commonMain/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequest.kt +++ b/ollama-client/ollama-client-core/src/commonMain/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequest.kt @@ -1,17 +1,39 @@ package com.tddworks.ollama.api.chat +import com.tddworks.ollama.api.AnySerial import kotlinx.serialization.SerialName import kotlinx.serialization.Serializable -import kotlinx.serialization.json.* - +/** + * https://github.com/ollama/ollama/blob/main/docs/api.md + * Generate a chat completion + */ @Serializable data class OllamaChatRequest( + /** + * (required) the model name + */ @SerialName("model") val model: String, + /** + * (required) a list of messages to send to the model + */ @SerialName("messages") val messages: List, + /** + * Advanced parameters (optional): + * the format to return a response in. Currently the only accepted value is json + */ @SerialName("format") val format: String? = null, -// @SerialName("options") val options: Map? = null, + /** + * additional model parameters listed in the documentation for the Modelfile such as temperature + */ + @SerialName("options") val options: Map? = null, + /** + * keep_alive: controls how long the model will stay loaded into memory following the request (default: 5m) + */ @SerialName("keep_alive") val keepAlive: String? = null, + /** + * stream: if false the response will be returned as a single response object, rather than a stream of objects + */ @SerialName("stream") val stream: Boolean? = null, ) { diff --git a/openai-gateway/openai-gateway-core/src/jvmTest/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequestTest.kt b/openai-gateway/openai-gateway-core/src/jvmTest/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequestTest.kt index 955074d..8b0f324 100644 --- a/openai-gateway/openai-gateway-core/src/jvmTest/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequestTest.kt +++ b/openai-gateway/openai-gateway-core/src/jvmTest/kotlin/com/tddworks/ollama/api/chat/OllamaChatRequestTest.kt @@ -1,10 +1,48 @@ package com.tddworks.ollama.api.chat +import com.tddworks.ollama.api.chat.internal.JsonLenient import org.junit.jupiter.api.Test import kotlin.test.assertEquals class OllamaChatRequestTest { + @Test + fun `should convert json to object`() { + // given + val json = """ + { + "model": "llama3", + "messages": [{ + "role": "user", + "content": "Why is the sky blue?" + }], + "format": "json", + "keep_alive": "5m", + "stream": false, + "options": { + "num_predict": 100, + "temperature": 0.8, + "stop": ["\n", "user:"] + } + } + """.trimIndent() + + // when + val request = JsonLenient.decodeFromString(OllamaChatRequest.serializer(), json) + + // then + assertEquals("llama3", request.model) + assertEquals(1, request.messages.size) + assertEquals("user", request.messages[0].role) + assertEquals("Why is the sky blue?", request.messages[0].content) + assertEquals("json", request.format) + assertEquals("5m", request.keepAlive) + + assertEquals(100, request.options?.get("num_predict")) + assertEquals(0.8, request.options?.get("temperature")) + assertEquals(listOf("\n", "user:"), request.options?.get("stop")) + } + @Test fun `should return dummy request`() { // given