chore(tests): Mock LLM in tests for PRs

This saves time when testing on CPU which is the only sensible thing
to do on GitHub CI for PRs. For releases or once the commit is merged
we could use an external runner with GPU or just wait.

Signed-off-by: Richard Palethorpe <io@richiejp.com>
This commit is contained in:
Richard Palethorpe
2025-04-25 19:43:46 +01:00
parent c23e655f44
commit 5698d0b832
12 changed files with 429 additions and 72 deletions

View File

@@ -1,13 +1,33 @@
package llm
import (
"context"
"net/http"
"time"
"github.com/mudler/LocalAGI/pkg/xlog"
"github.com/sashabaranov/go-openai"
)
func NewClient(APIKey, URL, timeout string) *openai.Client {
type LLMClient interface {
CreateChatCompletion(ctx context.Context, req openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error)
CreateImage(ctx context.Context, req openai.ImageRequest) (openai.ImageResponse, error)
}
type realClient struct {
*openai.Client
}
func (r *realClient) CreateChatCompletion(ctx context.Context, req openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error) {
return r.Client.CreateChatCompletion(ctx, req)
}
func (r *realClient) CreateImage(ctx context.Context, req openai.ImageRequest) (openai.ImageResponse, error) {
return r.Client.CreateImage(ctx, req)
}
// NewClient returns a real OpenAI client as LLMClient
func NewClient(APIKey, URL, timeout string) LLMClient {
// Set up OpenAI client
if APIKey == "" {
//log.Fatal("OPENAI_API_KEY environment variable not set")
@@ -18,11 +38,12 @@ func NewClient(APIKey, URL, timeout string) *openai.Client {
dur, err := time.ParseDuration(timeout)
if err != nil {
xlog.Error("Failed to parse timeout", "error", err)
dur = 150 * time.Second
}
config.HTTPClient = &http.Client{
Timeout: dur,
}
return openai.NewClientWithConfig(config)
return &realClient{openai.NewClientWithConfig(config)}
}