-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathollama.go
68 lines (58 loc) · 1.31 KB
/
ollama.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
package summaraizer
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
)
// Ollama is a provider that uses Ollama as an AI provider.
type Ollama struct {
Model string // The Ai model to use.
Prompt string // The prompt to use for the AI model.
Url string // The URL where Ollama is accessible.
}
func (o *Ollama) Summarize(reader io.Reader) (string, error) {
return decodeAndSummarize(reader, func(comments Comments) (string, error) {
prompt, err := resolvePrompt(o.Prompt, comments)
if err != nil {
return "", err
}
request := ollamaRequest{
Model: o.Model,
Prompt: prompt,
Stream: false,
}
reqBodyBytes, err := json.Marshal(request)
if err != nil {
return "", err
}
resp, err := http.Post(
fmt.Sprintf("%s/api/generate", o.Url),
"application/json",
bytes.NewBuffer(reqBodyBytes),
)
if err != nil {
return "", err
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var response ollamaResponse
err = json.Unmarshal(respBody, &response)
if err != nil {
return "", err
}
return response.Response, nil
})
}
type ollamaRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Stream bool `json:"stream"`
}
type ollamaResponse struct {
Response string `json:"response"`
}