chore(tests): Mock LLM in tests for PRs
This saves time when testing on CPU which is the only sensible thing to do on GitHub CI for PRs. For releases or once the commit is merged we could use an external runner with GPU or just wait. Signed-off-by: Richard Palethorpe <io@richiejp.com>
This commit is contained in:
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/mudler/LocalAGI/core/types"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
"github.com/mudler/LocalAGI/pkg/llm"
|
||||
)
|
||||
|
||||
type Option func(*options) error
|
||||
@@ -19,6 +20,7 @@ type llmOptions struct {
|
||||
}
|
||||
|
||||
type options struct {
|
||||
llmClient llm.LLMClient
|
||||
LLMAPI llmOptions
|
||||
character Character
|
||||
randomIdentityGuidance string
|
||||
@@ -68,6 +70,14 @@ type options struct {
|
||||
lastMessageDuration time.Duration
|
||||
}
|
||||
|
||||
// WithLLMClient allows injecting a custom LLM client (e.g. for testing)
|
||||
func WithLLMClient(client llm.LLMClient) Option {
|
||||
return func(o *options) error {
|
||||
o.llmClient = client
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (o *options) SeparatedMultimodalModel() bool {
|
||||
return o.LLMAPI.MultimodalModel != "" && o.LLMAPI.Model != o.LLMAPI.MultimodalModel
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user