diff --git a/.golangci.yaml b/.golangci.yaml index 7c8215d4f..b259f2c63 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -21,6 +21,7 @@ linters: - varnamelen - nlreturn - gomnd + - goerr113 - wrapcheck # TODO: we should probably enable this one (at least for new code). - testpackage - nolintlint # see https://github.com/golangci/golangci-lint/issues/3228. diff --git a/llms/anthropic/anthropicllm.go b/llms/anthropic/anthropicllm.go index 29d072483..3a8ba2f55 100644 --- a/llms/anthropic/anthropicllm.go +++ b/llms/anthropic/anthropicllm.go @@ -50,7 +50,7 @@ func newClient(opts ...Option) (*anthropicclient.Client, error) { // Call requests a completion for the given prompt. func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface. diff --git a/llms/cohere/coherellm.go b/llms/cohere/coherellm.go index 9a80a92f3..ba2391b10 100644 --- a/llms/cohere/coherellm.go +++ b/llms/cohere/coherellm.go @@ -25,7 +25,7 @@ type LLM struct { var _ llms.Model = (*LLM)(nil) func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface. diff --git a/llms/ernie/erniellm.go b/llms/ernie/erniellm.go index 34ffee1d3..17b7c711c 100644 --- a/llms/ernie/erniellm.go +++ b/llms/ernie/erniellm.go @@ -60,7 +60,7 @@ doc: https://cloud.baidu.com/doc/WENXINWORKSHOP/s/flfmc9do2`, ernieclient.ErrNot } func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface. diff --git a/llms/googleai/googleai_llm.go b/llms/googleai/googleai_llm.go index 048c37a31..eb8161025 100644 --- a/llms/googleai/googleai_llm.go +++ b/llms/googleai/googleai_llm.go @@ -1,7 +1,7 @@ // package googleai implements a langchaingo provider for Google AI LLMs. // See https://ai.google.dev/ for more details and documetnation. // -//nolint:goerr113, lll +// nolint: lll package googleai import ( @@ -64,7 +64,7 @@ func NewGoogleAI(ctx context.Context, opts ...Option) (*GoogleAI, error) { // Call implements the [llms.Model] interface. func (g *GoogleAI) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, g, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, g, prompt, options...) } // GenerateContent implements the [llms.Model] interface. diff --git a/llms/huggingface/huggingfacellm.go b/llms/huggingface/huggingfacellm.go index de00226e6..d33763824 100644 --- a/llms/huggingface/huggingfacellm.go +++ b/llms/huggingface/huggingfacellm.go @@ -25,7 +25,7 @@ var _ llms.Model = (*LLM)(nil) // Call implements the LLM interface. func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface. diff --git a/llms/llms.go b/llms/llms.go index 98c2dcae1..5b1ae5d2e 100644 --- a/llms/llms.go +++ b/llms/llms.go @@ -28,9 +28,11 @@ type Model interface { Call(ctx context.Context, prompt string, options ...CallOption) (string, error) } -// CallLLM is a helper function for implementing Call in terms of -// GenerateContent. It's aimed to be used by Model providers. -func CallLLM(ctx context.Context, llm Model, prompt string, options ...CallOption) (string, error) { +// GenerateFromSingle prompt is a convenience function for calling an LLM with +// a single string prompt, expecting a single string response. It's useful for +// simple, string-only interactions and provides a slightly more ergonomic API +// than the more general [llms.Model.GenerateContent]. +func GenerateFromSinglePrompt(ctx context.Context, llm Model, prompt string, options ...CallOption) (string, error) { msg := MessageContent{ Role: schema.ChatMessageTypeHuman, Parts: []ContentPart{TextContent{prompt}}, @@ -43,7 +45,7 @@ func CallLLM(ctx context.Context, llm Model, prompt string, options ...CallOptio choices := resp.Choices if len(choices) < 1 { - return "", errors.New("empty response from model") //nolint:goerr113 + return "", errors.New("empty response from model") } c1 := choices[0] return c1.Content, nil diff --git a/llms/local/localllm.go b/llms/local/localllm.go index 058730153..bbbbe9d40 100644 --- a/llms/local/localllm.go +++ b/llms/local/localllm.go @@ -30,7 +30,7 @@ var _ llms.Model = (*LLM)(nil) // Call calls the local LLM binary with the given prompt. func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } func (o *LLM) appendGlobalsToArgs(opts llms.CallOptions) { diff --git a/llms/ollama/ollamallm.go b/llms/ollama/ollamallm.go index f31a41abd..aafcefcd9 100644 --- a/llms/ollama/ollamallm.go +++ b/llms/ollama/ollamallm.go @@ -41,7 +41,7 @@ func New(opts ...Option) (*LLM, error) { // Call Implement the call interface for LLM. func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface. diff --git a/llms/openai/openaillm.go b/llms/openai/openaillm.go index 0a32b7856..e16d225c6 100644 --- a/llms/openai/openaillm.go +++ b/llms/openai/openaillm.go @@ -40,7 +40,7 @@ func New(opts ...Option) (*LLM, error) { // Call requests a completion for the given prompt. func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface. diff --git a/llms/vertexai/vertexai_palm_llm.go b/llms/vertexai/vertexai_palm_llm.go index f125d7205..3674a0b08 100644 --- a/llms/vertexai/vertexai_palm_llm.go +++ b/llms/vertexai/vertexai_palm_llm.go @@ -25,7 +25,7 @@ var _ llms.Model = (*LLM)(nil) // Call requests a completion for the given prompt. func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { - return llms.CallLLM(ctx, o, prompt, options...) + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) } // GenerateContent implements the Model interface.