Files
LocalAGI/pkg/llm/mock_client.go
Richard Palethorpe 5698d0b832 chore(tests): Mock LLM in tests for PRs
This saves time when testing on CPU which is the only sensible thing
to do on GitHub CI for PRs. For releases or once the commit is merged
we could use an external runner with GPU or just wait.

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-05-12 13:51:45 +01:00

26 lines
819 B
Go

package llm
import (
"context"
"github.com/sashabaranov/go-openai"
)
type MockClient struct {
CreateChatCompletionFunc func(ctx context.Context, req openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error)
CreateImageFunc func(ctx context.Context, req openai.ImageRequest) (openai.ImageResponse, error)
}
func (m *MockClient) CreateChatCompletion(ctx context.Context, req openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error) {
if m.CreateChatCompletionFunc != nil {
return m.CreateChatCompletionFunc(ctx, req)
}
return openai.ChatCompletionResponse{}, nil
}
func (m *MockClient) CreateImage(ctx context.Context, req openai.ImageRequest) (openai.ImageResponse, error) {
if m.CreateImageFunc != nil {
return m.CreateImageFunc(ctx, req)
}
return openai.ImageResponse{}, nil
}