mirror of
https://github.com/aljazceru/opencode.git
synced 2025-12-21 09:44:21 +01:00
reimplement agent,provider and add file history
This commit is contained in:
@@ -2,89 +2,65 @@ package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/kujtimiihoxha/termai/internal/llm/models"
|
||||
"github.com/kujtimiihoxha/termai/internal/config"
|
||||
"github.com/kujtimiihoxha/termai/internal/llm/tools"
|
||||
"github.com/kujtimiihoxha/termai/internal/logging"
|
||||
"github.com/kujtimiihoxha/termai/internal/message"
|
||||
"github.com/openai/openai-go"
|
||||
"github.com/openai/openai-go/option"
|
||||
)
|
||||
|
||||
type openaiProvider struct {
|
||||
client openai.Client
|
||||
model models.Model
|
||||
maxTokens int64
|
||||
baseURL string
|
||||
apiKey string
|
||||
systemMessage string
|
||||
type openaiOptions struct {
|
||||
baseURL string
|
||||
disableCache bool
|
||||
}
|
||||
|
||||
type OpenAIOption func(*openaiProvider)
|
||||
type OpenAIOption func(*openaiOptions)
|
||||
|
||||
func NewOpenAIProvider(opts ...OpenAIOption) (Provider, error) {
|
||||
provider := &openaiProvider{
|
||||
maxTokens: 5000,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(provider)
|
||||
}
|
||||
|
||||
clientOpts := []option.RequestOption{
|
||||
option.WithAPIKey(provider.apiKey),
|
||||
}
|
||||
if provider.baseURL != "" {
|
||||
clientOpts = append(clientOpts, option.WithBaseURL(provider.baseURL))
|
||||
}
|
||||
|
||||
provider.client = openai.NewClient(clientOpts...)
|
||||
if provider.systemMessage == "" {
|
||||
return nil, errors.New("system message is required")
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
type openaiClient struct {
|
||||
providerOptions providerClientOptions
|
||||
options openaiOptions
|
||||
client openai.Client
|
||||
}
|
||||
|
||||
func WithOpenAISystemMessage(message string) OpenAIOption {
|
||||
return func(p *openaiProvider) {
|
||||
p.systemMessage = message
|
||||
type OpenAIClient ProviderClient
|
||||
|
||||
func newOpenAIClient(opts providerClientOptions) OpenAIClient {
|
||||
openaiOpts := openaiOptions{}
|
||||
for _, o := range opts.openaiOptions {
|
||||
o(&openaiOpts)
|
||||
}
|
||||
|
||||
openaiClientOptions := []option.RequestOption{}
|
||||
if opts.apiKey != "" {
|
||||
openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
|
||||
}
|
||||
if openaiOpts.baseURL != "" {
|
||||
openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(openaiOpts.baseURL))
|
||||
}
|
||||
|
||||
client := openai.NewClient(openaiClientOptions...)
|
||||
return &openaiClient{
|
||||
providerOptions: opts,
|
||||
options: openaiOpts,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpenAIMaxTokens(maxTokens int64) OpenAIOption {
|
||||
return func(p *openaiProvider) {
|
||||
p.maxTokens = maxTokens
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpenAIModel(model models.Model) OpenAIOption {
|
||||
return func(p *openaiProvider) {
|
||||
p.model = model
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpenAIBaseURL(baseURL string) OpenAIOption {
|
||||
return func(p *openaiProvider) {
|
||||
p.baseURL = baseURL
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpenAIKey(apiKey string) OpenAIOption {
|
||||
return func(p *openaiProvider) {
|
||||
p.apiKey = apiKey
|
||||
}
|
||||
}
|
||||
|
||||
func (p *openaiProvider) convertToOpenAIMessages(messages []message.Message) []openai.ChatCompletionMessageParamUnion {
|
||||
var chatMessages []openai.ChatCompletionMessageParamUnion
|
||||
|
||||
chatMessages = append(chatMessages, openai.SystemMessage(p.systemMessage))
|
||||
func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
|
||||
// Add system message first
|
||||
openaiMessages = append(openaiMessages, openai.SystemMessage(o.providerOptions.systemMessage))
|
||||
|
||||
for _, msg := range messages {
|
||||
switch msg.Role {
|
||||
case message.User:
|
||||
chatMessages = append(chatMessages, openai.UserMessage(msg.Content().String()))
|
||||
openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
|
||||
|
||||
case message.Assistant:
|
||||
assistantMsg := openai.ChatCompletionAssistantMessageParam{
|
||||
@@ -111,23 +87,23 @@ func (p *openaiProvider) convertToOpenAIMessages(messages []message.Message) []o
|
||||
}
|
||||
}
|
||||
|
||||
chatMessages = append(chatMessages, openai.ChatCompletionMessageParamUnion{
|
||||
openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
|
||||
OfAssistant: &assistantMsg,
|
||||
})
|
||||
|
||||
case message.Tool:
|
||||
for _, result := range msg.ToolResults() {
|
||||
chatMessages = append(chatMessages,
|
||||
openaiMessages = append(openaiMessages,
|
||||
openai.ToolMessage(result.Content, result.ToolCallID),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return chatMessages
|
||||
return
|
||||
}
|
||||
|
||||
func (p *openaiProvider) convertToOpenAITools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
|
||||
func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
|
||||
openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
|
||||
|
||||
for i, tool := range tools {
|
||||
@@ -148,133 +124,238 @@ func (p *openaiProvider) convertToOpenAITools(tools []tools.BaseTool) []openai.C
|
||||
return openaiTools
|
||||
}
|
||||
|
||||
func (p *openaiProvider) extractTokenUsage(usage openai.CompletionUsage) TokenUsage {
|
||||
cachedTokens := int64(0)
|
||||
|
||||
cachedTokens = usage.PromptTokensDetails.CachedTokens
|
||||
inputTokens := usage.PromptTokens - cachedTokens
|
||||
|
||||
return TokenUsage{
|
||||
InputTokens: inputTokens,
|
||||
OutputTokens: usage.CompletionTokens,
|
||||
CacheCreationTokens: 0, // OpenAI doesn't provide this directly
|
||||
CacheReadTokens: cachedTokens,
|
||||
func (o *openaiClient) finishReason(reason string) message.FinishReason {
|
||||
switch reason {
|
||||
case "stop":
|
||||
return message.FinishReasonEndTurn
|
||||
case "length":
|
||||
return message.FinishReasonMaxTokens
|
||||
case "tool_calls":
|
||||
return message.FinishReasonToolUse
|
||||
default:
|
||||
return message.FinishReasonUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (p *openaiProvider) SendMessages(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (*ProviderResponse, error) {
|
||||
messages = cleanupMessages(messages)
|
||||
chatMessages := p.convertToOpenAIMessages(messages)
|
||||
openaiTools := p.convertToOpenAITools(tools)
|
||||
func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
|
||||
return openai.ChatCompletionNewParams{
|
||||
Model: openai.ChatModel(o.providerOptions.model.APIModel),
|
||||
Messages: messages,
|
||||
MaxTokens: openai.Int(o.providerOptions.maxTokens),
|
||||
Tools: tools,
|
||||
}
|
||||
}
|
||||
|
||||
params := openai.ChatCompletionNewParams{
|
||||
Model: openai.ChatModel(p.model.APIModel),
|
||||
Messages: chatMessages,
|
||||
MaxTokens: openai.Int(p.maxTokens),
|
||||
Tools: openaiTools,
|
||||
func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
|
||||
params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
|
||||
cfg := config.Get()
|
||||
if cfg.Debug {
|
||||
jsonData, _ := json.Marshal(params)
|
||||
logging.Debug("Prepared messages", "messages", string(jsonData))
|
||||
}
|
||||
attempts := 0
|
||||
for {
|
||||
attempts++
|
||||
openaiResponse, err := o.client.Chat.Completions.New(
|
||||
ctx,
|
||||
params,
|
||||
)
|
||||
// If there is an error we are going to see if we can retry the call
|
||||
if err != nil {
|
||||
retry, after, retryErr := o.shouldRetry(attempts, err)
|
||||
if retryErr != nil {
|
||||
return nil, retryErr
|
||||
}
|
||||
if retry {
|
||||
logging.WarnPersist("Retrying due to rate limit... attempt %d of %d", logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(time.Duration(after) * time.Millisecond):
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, retryErr
|
||||
}
|
||||
|
||||
content := ""
|
||||
if openaiResponse.Choices[0].Message.Content != "" {
|
||||
content = openaiResponse.Choices[0].Message.Content
|
||||
}
|
||||
|
||||
return &ProviderResponse{
|
||||
Content: content,
|
||||
ToolCalls: o.toolCalls(*openaiResponse),
|
||||
Usage: o.usage(*openaiResponse),
|
||||
FinishReason: o.finishReason(string(openaiResponse.Choices[0].FinishReason)),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
|
||||
params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
|
||||
params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
|
||||
IncludeUsage: openai.Bool(true),
|
||||
}
|
||||
|
||||
response, err := p.client.Chat.Completions.New(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cfg := config.Get()
|
||||
if cfg.Debug {
|
||||
jsonData, _ := json.Marshal(params)
|
||||
logging.Debug("Prepared messages", "messages", string(jsonData))
|
||||
}
|
||||
|
||||
content := ""
|
||||
if response.Choices[0].Message.Content != "" {
|
||||
content = response.Choices[0].Message.Content
|
||||
attempts := 0
|
||||
eventChan := make(chan ProviderEvent)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
attempts++
|
||||
openaiStream := o.client.Chat.Completions.NewStreaming(
|
||||
ctx,
|
||||
params,
|
||||
)
|
||||
|
||||
acc := openai.ChatCompletionAccumulator{}
|
||||
currentContent := ""
|
||||
toolCalls := make([]message.ToolCall, 0)
|
||||
|
||||
for openaiStream.Next() {
|
||||
chunk := openaiStream.Current()
|
||||
acc.AddChunk(chunk)
|
||||
|
||||
if tool, ok := acc.JustFinishedToolCall(); ok {
|
||||
toolCalls = append(toolCalls, message.ToolCall{
|
||||
ID: tool.Id,
|
||||
Name: tool.Name,
|
||||
Input: tool.Arguments,
|
||||
Type: "function",
|
||||
})
|
||||
}
|
||||
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
eventChan <- ProviderEvent{
|
||||
Type: EventContentDelta,
|
||||
Content: choice.Delta.Content,
|
||||
}
|
||||
currentContent += choice.Delta.Content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := openaiStream.Err()
|
||||
if err == nil || errors.Is(err, io.EOF) {
|
||||
// Stream completed successfully
|
||||
eventChan <- ProviderEvent{
|
||||
Type: EventComplete,
|
||||
Response: &ProviderResponse{
|
||||
Content: currentContent,
|
||||
ToolCalls: toolCalls,
|
||||
Usage: o.usage(acc.ChatCompletion),
|
||||
FinishReason: o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason)),
|
||||
},
|
||||
}
|
||||
close(eventChan)
|
||||
return
|
||||
}
|
||||
|
||||
// If there is an error we are going to see if we can retry the call
|
||||
retry, after, retryErr := o.shouldRetry(attempts, err)
|
||||
if retryErr != nil {
|
||||
eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
|
||||
close(eventChan)
|
||||
return
|
||||
}
|
||||
if retry {
|
||||
logging.WarnPersist("Retrying due to rate limit... attempt %d of %d", logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// context cancelled
|
||||
if ctx.Err() == nil {
|
||||
eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
|
||||
}
|
||||
close(eventChan)
|
||||
return
|
||||
case <-time.After(time.Duration(after) * time.Millisecond):
|
||||
continue
|
||||
}
|
||||
}
|
||||
eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
|
||||
close(eventChan)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return eventChan
|
||||
}
|
||||
|
||||
func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
|
||||
var apierr *openai.Error
|
||||
if !errors.As(err, &apierr) {
|
||||
return false, 0, err
|
||||
}
|
||||
|
||||
if apierr.StatusCode != 429 && apierr.StatusCode != 500 {
|
||||
return false, 0, err
|
||||
}
|
||||
|
||||
if attempts > maxRetries {
|
||||
return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
|
||||
}
|
||||
|
||||
retryMs := 0
|
||||
retryAfterValues := apierr.Response.Header.Values("Retry-After")
|
||||
|
||||
backoffMs := 2000 * (1 << (attempts - 1))
|
||||
jitterMs := int(float64(backoffMs) * 0.2)
|
||||
retryMs = backoffMs + jitterMs
|
||||
if len(retryAfterValues) > 0 {
|
||||
if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
|
||||
retryMs = retryMs * 1000
|
||||
}
|
||||
}
|
||||
return true, int64(retryMs), nil
|
||||
}
|
||||
|
||||
func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
|
||||
var toolCalls []message.ToolCall
|
||||
if len(response.Choices[0].Message.ToolCalls) > 0 {
|
||||
toolCalls = make([]message.ToolCall, len(response.Choices[0].Message.ToolCalls))
|
||||
for i, call := range response.Choices[0].Message.ToolCalls {
|
||||
toolCalls[i] = message.ToolCall{
|
||||
|
||||
if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
|
||||
for _, call := range completion.Choices[0].Message.ToolCalls {
|
||||
toolCall := message.ToolCall{
|
||||
ID: call.ID,
|
||||
Name: call.Function.Name,
|
||||
Input: call.Function.Arguments,
|
||||
Type: "function",
|
||||
}
|
||||
toolCalls = append(toolCalls, toolCall)
|
||||
}
|
||||
}
|
||||
|
||||
tokenUsage := p.extractTokenUsage(response.Usage)
|
||||
|
||||
return &ProviderResponse{
|
||||
Content: content,
|
||||
ToolCalls: toolCalls,
|
||||
Usage: tokenUsage,
|
||||
}, nil
|
||||
return toolCalls
|
||||
}
|
||||
|
||||
func (p *openaiProvider) StreamResponse(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (<-chan ProviderEvent, error) {
|
||||
messages = cleanupMessages(messages)
|
||||
chatMessages := p.convertToOpenAIMessages(messages)
|
||||
openaiTools := p.convertToOpenAITools(tools)
|
||||
func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
|
||||
cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
|
||||
inputTokens := completion.Usage.PromptTokens - cachedTokens
|
||||
|
||||
params := openai.ChatCompletionNewParams{
|
||||
Model: openai.ChatModel(p.model.APIModel),
|
||||
Messages: chatMessages,
|
||||
MaxTokens: openai.Int(p.maxTokens),
|
||||
Tools: openaiTools,
|
||||
StreamOptions: openai.ChatCompletionStreamOptionsParam{
|
||||
IncludeUsage: openai.Bool(true),
|
||||
},
|
||||
return TokenUsage{
|
||||
InputTokens: inputTokens,
|
||||
OutputTokens: completion.Usage.CompletionTokens,
|
||||
CacheCreationTokens: 0, // OpenAI doesn't provide this directly
|
||||
CacheReadTokens: cachedTokens,
|
||||
}
|
||||
|
||||
stream := p.client.Chat.Completions.NewStreaming(ctx, params)
|
||||
|
||||
eventChan := make(chan ProviderEvent)
|
||||
|
||||
toolCalls := make([]message.ToolCall, 0)
|
||||
go func() {
|
||||
defer close(eventChan)
|
||||
|
||||
acc := openai.ChatCompletionAccumulator{}
|
||||
currentContent := ""
|
||||
|
||||
for stream.Next() {
|
||||
chunk := stream.Current()
|
||||
acc.AddChunk(chunk)
|
||||
|
||||
if tool, ok := acc.JustFinishedToolCall(); ok {
|
||||
toolCalls = append(toolCalls, message.ToolCall{
|
||||
ID: tool.Id,
|
||||
Name: tool.Name,
|
||||
Input: tool.Arguments,
|
||||
Type: "function",
|
||||
})
|
||||
}
|
||||
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
eventChan <- ProviderEvent{
|
||||
Type: EventContentDelta,
|
||||
Content: choice.Delta.Content,
|
||||
}
|
||||
currentContent += choice.Delta.Content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stream.Err(); err != nil {
|
||||
eventChan <- ProviderEvent{
|
||||
Type: EventError,
|
||||
Error: err,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
tokenUsage := p.extractTokenUsage(acc.Usage)
|
||||
|
||||
eventChan <- ProviderEvent{
|
||||
Type: EventComplete,
|
||||
Response: &ProviderResponse{
|
||||
Content: currentContent,
|
||||
ToolCalls: toolCalls,
|
||||
Usage: tokenUsage,
|
||||
},
|
||||
}
|
||||
}()
|
||||
|
||||
return eventChan, nil
|
||||
}
|
||||
|
||||
func WithOpenAIBaseURL(baseURL string) OpenAIOption {
|
||||
return func(options *openaiOptions) {
|
||||
options.baseURL = baseURL
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpenAIDisableCache() OpenAIOption {
|
||||
return func(options *openaiOptions) {
|
||||
options.disableCache = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user