Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Proposing a new package called Toolbox which facilitates using a group of tools #1103

Open
amritsingh183 opened this issue Jan 15, 2025 · 9 comments

Comments

@amritsingh183
Copy link

amritsingh183 commented Jan 15, 2025

Consider the following code which defines a ToolBox

package toolbox

import (
   "context"
   "fmt"
)
type ToolDefinition struct {
	// Name of the function
	Name string
	// Description is a description of the function.
	Description string
	// Parameters is a list of parameters for the function.
	Parameters map[string]interface{}
}
type GetsWorkDone interface {
	Call(context.Context, string) (string, error)
	Definition() *ToolDefinition
}

type ToolBox []GetsWorkDone

func (tb *ToolBox) UseTool(ctx context.Context, toolName string, toolArgs string) (string, error) {
	for _, tool := range *tb {
		if tool.Definition().Name == toolName {
			return tool.Call(ctx, toolArgs)
		}
	}
	return "", fmt.Errorf("invalid tool %v",toolName)
}

and using it with langchain's toolcalling like so

	toolResponse, err := toolBox.UseTool(ctx, toolCall.FunctionCall.Name, toolCall.FunctionCall.Arguments)
	if err != nil {
		log.Fatal(err)
	}

see the full example below which defines a tool called WeatherTool which implements ToolBox

type WeatherTool struct {
}

func (w *WeatherTool) Definition() *toolBox.ToolDefinition {
	return &toolBox.ToolDefinition{
		Name:        "get_weather",
		Description: "Get the weather in a given location",
		Parameters: map[string]interface{}{
			"type": "object",
			"properties": map[string]interface{}{
				"location": map[string]string{
					"description": "The name/location of place",
					"type":        "string",
				},
				"unit": map[string]interface{}{
					"enum": []string{
						"celsius",
						"fahrenheit",
					},
					"type": "string",
				},
			},
			"required": []string{
				"location",
			},
		},
	}
}
func (w *WeatherTool) Call(ctx context.Context, arguments string) *toolbox.WorkDone {
	// Extract the location from the function call arguments
	var args struct {
		Location string `json:"location"`
		Unit     string `json:"unit"`
	}
	if err := json.Unmarshal([]byte(arguments), &args); err != nil {
		return "", err
	}

	// Simulate getting weather data
	weatherData := fmt.Sprintf("Weather in %s is Sunny, 25° %s", args.Location, args.Unit)

	return weatherData, nil
}


func main() {
	ctx := context.Background()
	os.Setenv("OPENAI_API_KEY", "OPENAI_API_KEY")
	callOptions := []llms.CallOption{}
	toolBox := toolbox.ToolBox{
		&WeatherTool{},
		&TaxiTool{},
	}
	llmTools := make([]llms.Tool, len(toolBox))
	for i, tl := range toolBox {
		toolDef := tl.Definition()
		llmTools[i] = llms.Tool{
			Type: "function",
			Function: &llms.FunctionDefinition{
				Name:        toolDef.Name,
				Description: toolDef.Description,
				Parameters:  toolDef.Parameters,
			},
		}
	}

	callOptions = append(callOptions, llms.WithTools(llmTools))

	llm, err := langchainOpenAI.New(langchainOpenAI.WithModel(modelName))
	if err != nil {
		log.Fatal(err)
	}

	// type may be text, image etc
	messageHistory := []llms.MessageContent{
		{
			Role: llms.ChatMessageTypeSystem,
			Parts: []llms.ContentPart{
				llms.TextContent{
					Text: systemPrompt,
				},
			},
		},
		{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextContent{
					Text: userQuery,
				},
			},
		},
	}
	resp, err := llm.GenerateContent(ctx, messageHistory, callOptions...)
	if err != nil {
		log.Fatal(err)
	}
	for _, choice := range resp.Choices {
		toolCalls := choice.ToolCalls

		if len(toolCalls) == 0 {
			log.Fatal("No function call")
		}
		for _, toolCall := range choice.ToolCalls {
                        // using the tool from the toolBox
			toolResponse, err := toolBox.UseTool(ctx, toolCall.FunctionCall.Name, toolCall.FunctionCall.Arguments)
			if err != nil {
				log.Fatal(err)
			}
			assistantResponse := llms.MessageContent{
				Role: llms.ChatMessageTypeAI,
				Parts: []llms.ContentPart{
					llms.ToolCall{
						ID:   toolCall.ID,
						Type: toolCall.Type,
						FunctionCall: &llms.FunctionCall{
							Name:      toolCall.FunctionCall.Name,
							Arguments: toolCall.FunctionCall.Arguments,
						},
					},
				},
			}
			log.Printf("\n\n ToolCallResponse: %v", toolResponse)
			tool_result := llms.MessageContent{
				Role: llms.ChatMessageTypeTool,
				Parts: []llms.ContentPart{
					llms.ToolCallResponse{
						ToolCallID: toolCall.ID,
						Name:       toolCall.FunctionCall.Name,
						Content:    toolResponse,
					},
				},
			}
			messageHistory = append(messageHistory, assistantResponse)
			messageHistory = append(messageHistory, tool_result)

		}

		messageHistory = append(messageHistory, llms.MessageContent{
			Role: llms.ChatMessageTypeHuman,
			Parts: []llms.ContentPart{
				llms.TextContent{
					Text: "Please formulate your final response now",
				},
			},
		})

		log.Println("Querying with tool response...")
		resp, err = llm.GenerateContent(ctx, messageHistory, callOptions...)
		if err != nil {
			log.Fatal(err)
		}
		log.Printf("\n\n Final Response: %v", resp.Choices[0].Content)
		// populate ai response
		assistantResponse := llms.TextParts(llms.ChatMessageTypeAI, resp.Choices[0].Content)
		messageHistory = append(messageHistory, assistantResponse)
		log.Println(messageHistory)
	}

}
@abacusgauge
Copy link

seems useful for my use case as well. I am very new to the go language and could not figure out how to do this. I was looking for this very thing but could not find anything in the langchain docs about a way to use a group of tools.

i have also been using crewai for a longtime but with this idea i can start giving langchaingo a chance at my project. This would make it very easy to specify tools.

Thanks for proposing this!

I hope this gets into the repo soon.

@FluffyKebab
Copy link
Collaborator

If we add this, I think it should be a type wrapping []Tool in the tool package called box. We already have the Tool interface there that has the same function as your GetsWorkDone. However I don't know how useful it would be as it is very easy for a user to implement themself (4 lines of code). What do you think @amritsingh183?

@amritsingh183
Copy link
Author

I agree with you @FluffyKebab about how easy a gopher can implement it. But having it being offered by langchain is more convenient and would also help standardize the ToolBox (or Box) type of implementations( if and when an external/thirdparty wants to provide its ToolBox(s) [or Box(s)] to lanchains toolcalling logic).

@FluffyKebab
Copy link
Collaborator

True, I know for example that Zapier already has something that can be a toolbox instead. Do you want to add a PR for this?

@amritsingh183
Copy link
Author

Sure, will do.
So just to confirm, we want https://github.com/tmc/langchaingo/blob/main/tools/tool.go to be

package tools

import (
	"context"
	"fmt"
)

// Tool is a tool for the llm agent to interact with different applications.
type Tool interface {
	Name() string
	Description() string
	Call(ctx context.Context, input string) (string, error)
}

type ToolBox []Tool

func (tb *ToolBox) UseTool(ctx context.Context, toolName string, toolArgs string) (string, error) {
	for _, tool := range *tb {
		if tool.Name() == toolName {
			return tool.Call(ctx, toolArgs)
		}
	}
	return "", fmt.Errorf("invalid tool %v", toolName)
}

@FluffyKebab
Copy link
Collaborator

Maybe something like SelectTool, that returns a tool, can be more flexible? + redundancy in name "tool.ToolBox"

@amritsingh183
Copy link
Author

  1. I think Kit will resonate more and will not be redundant ie we will have tool.Kit
  2. Regarding SelectTool my intention was to allow for Usage of tool and not just Selecting it. Thoughts?
  3. Regarding flexibility: The user should handle the error [while calling the Tool proposed by the LLM when the tool does not exist. ] and give feedback to the Agent that it has made a mistake and remind it again about it's tool calling instructions. So, IMHO we should not offer flexibility here (as it will allow the agent to reflect and self-correct) Thoughts?

@amritsingh183
Copy link
Author

amritsingh183 commented Jan 28, 2025

I have created a PR for this #1117

@amritsingh183
Copy link
Author

the previous PRs were not neat, so created a fresh one again #1117

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants