From d2de8a7fd3de501d46feb737f7bd8a8889de6620 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 00:42:26 -0500 Subject: [PATCH 01/17] refactor model structure --- packages/core/src/generation.ts | 9 ++--- packages/core/src/types.ts | 70 ++++++++++++++++++--------------- 2 files changed, 42 insertions(+), 37 deletions(-) diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index 5db5b8e70a5..b5d2f5c9593 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -1204,7 +1204,7 @@ export const generateImage = async ( seed: data.seed || -1, }, }, - model_id: data.modelId || "FLUX.1-dev", + model_id: model, deadline: 60, priority: 1, }), @@ -1226,7 +1226,7 @@ export const generateImage = async ( ) { const together = new Together({ apiKey: apiKey as string }); const response = await together.images.create({ - model: "black-forest-labs/FLUX.1-schnell", + model: model, prompt: data.prompt, width: data.width, height: data.height, @@ -1341,7 +1341,7 @@ export const generateImage = async ( "Content-Type": "application/json", }, body: JSON.stringify({ - model: data.modelId || "fluently-xl", + model: model, prompt: data.prompt, negative_prompt: data.negativePrompt, width: data.width, @@ -1387,8 +1387,7 @@ export const generateImage = async ( "Content-Type": "application/json", }, body: JSON.stringify({ - model_id: - data.modelId || "ByteDance/SDXL-Lightning", + model_id: model, prompt: data.prompt, width: data.width || 1024, height: data.height || 1024, diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index 3687ded5e01..d0dbe6f987e 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -137,51 +137,57 @@ export enum ModelClass { } /** - * Configuration for an AI model + * Model settings */ -export type Model = { - /** Optional API endpoint */ - endpoint?: string; +type ModelSettins = { + /** Model name */ + name: string; - /** Model settings */ - settings: { - /** Maximum input tokens */ - maxInputTokens: number; + /** Maximum input tokens */ + maxInputTokens: number; - /** Maximum output tokens */ - maxOutputTokens: number; + /** Maximum output tokens */ + maxOutputTokens: number; - /** Optional frequency penalty */ - frequency_penalty?: number; + /** Optional frequency penalty */ + frequency_penalty?: number; - /** Optional presence penalty */ - presence_penalty?: number; + /** Optional presence penalty */ + presence_penalty?: number; - /** Optional repetition penalty */ - repetition_penalty?: number; + /** Optional repetition penalty */ + repetition_penalty?: number; - /** Stop sequences */ - stop: string[]; + /** Stop sequences */ + stop: string[]; - /** Temperature setting */ - temperature: number; + /** Temperature setting */ + temperature: number; - /** Optional telemetry configuration (experimental) */ - experimental_telemetry?: TelemetrySettings; - }; + /** Optional telemetry configuration (experimental) */ + experimental_telemetry?: TelemetrySettings; +}; - /** Optional image generation settings */ - imageSettings?: { - steps?: number; - }; +/** Image model settings */ +type imageModelSettings = { + name: string; + steps?: number; +}; + +/** + * Configuration for an AI model + */ +export type Model = { + /** Optional API endpoint */ + endpoint?: string; /** Model names by size class */ model: { - [ModelClass.SMALL]: string; - [ModelClass.MEDIUM]: string; - [ModelClass.LARGE]: string; - [ModelClass.EMBEDDING]?: string; - [ModelClass.IMAGE]?: string; + [ModelClass.SMALL]?: ModelSettins; + [ModelClass.MEDIUM]?: ModelSettins; + [ModelClass.LARGE]?: ModelSettins; + [ModelClass.EMBEDDING]?: ModelSettins; + [ModelClass.IMAGE]?: imageModelSettings; }; }; From 9302f2c011bd49d56893609855de449721c046f1 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 00:43:43 -0500 Subject: [PATCH 02/17] refactor model structure --- packages/core/src/models.ts | 1200 +++++++++++++++++++++++------------ 1 file changed, 807 insertions(+), 393 deletions(-) diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 99e8507821a..cbedf9bcf3c 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -4,520 +4,934 @@ import { Models, ModelProviderName, ModelClass } from "./types.ts"; export const models: Models = { [ModelProviderName.OPENAI]: { endpoint: settings.OPENAI_API_URL || "https://api.openai.com/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: settings.SMALL_OPENAI_MODEL || "gpt-4o-mini", - [ModelClass.MEDIUM]: settings.MEDIUM_OPENAI_MODEL || "gpt-4o", - [ModelClass.LARGE]: settings.LARGE_OPENAI_MODEL || "gpt-4o", - [ModelClass.EMBEDDING]: settings.EMBEDDING_OPENAI_MODEL || "text-embedding-3-small", - [ModelClass.IMAGE]: settings.IMAGE_OPENAI_MODEL || "dall-e-3", + [ModelClass.SMALL]: { + name: settings.SMALL_OPENAI_MODEL || "gpt-4o-mini", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: settings.MEDIUM_OPENAI_MODEL || "gpt-4o", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: settings.LARGE_OPENAI_MODEL || "gpt-4o", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.EMBEDDING]: { + name: + settings.EMBEDDING_OPENAI_MODEL || "text-embedding-3-small", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.IMAGE]: { + name: settings.IMAGE_OPENAI_MODEL || "dall-e-3", + }, }, }, [ModelProviderName.ETERNALAI]: { endpoint: settings.ETERNALAI_URL, - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: - settings.ETERNALAI_MODEL || - "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", - [ModelClass.MEDIUM]: - settings.ETERNALAI_MODEL || - "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", - [ModelClass.LARGE]: - settings.ETERNALAI_MODEL || - "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", - [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: "", + [ModelClass.SMALL]: { + name: + settings.ETERNALAI_MODEL || + "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: + settings.ETERNALAI_MODEL || + "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: + settings.ETERNALAI_MODEL || + "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, }, }, [ModelProviderName.ANTHROPIC]: { - settings: { - stop: [], - maxInputTokens: 200000, - maxOutputTokens: 4096, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, endpoint: "https://api.anthropic.com/v1", model: { - [ModelClass.SMALL]: settings.SMALL_ANTHROPIC_MODEL || "claude-3-haiku-20240307", - [ModelClass.MEDIUM]: settings.MEDIUM_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", - [ModelClass.LARGE]: settings.LARGE_ANTHROPIC_MODEL || "claude-3-5-sonnet-20241022", + [ModelClass.SMALL]: { + name: + settings.SMALL_ANTHROPIC_MODEL || "claude-3-haiku-20240307", + stop: [], + maxInputTokens: 200000, + maxOutputTokens: 4096, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_ANTHROPIC_MODEL || + "claude-3-5-sonnet-20241022", + stop: [], + maxInputTokens: 200000, + maxOutputTokens: 4096, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + + [ModelClass.LARGE]: { + name: + settings.LARGE_ANTHROPIC_MODEL || + "claude-3-5-sonnet-20241022", + stop: [], + maxInputTokens: 200000, + maxOutputTokens: 4096, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.CLAUDE_VERTEX]: { - settings: { - stop: [], - maxInputTokens: 200000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, endpoint: "https://api.anthropic.com/v1", // TODO: check model: { - [ModelClass.SMALL]: "claude-3-5-sonnet-20241022", - [ModelClass.MEDIUM]: "claude-3-5-sonnet-20241022", - [ModelClass.LARGE]: "claude-3-opus-20240229", + [ModelClass.SMALL]: { + name: "claude-3-5-sonnet-20241022", + stop: [], + maxInputTokens: 200000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: "claude-3-5-sonnet-20241022", + stop: [], + maxInputTokens: 200000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: "claude-3-opus-20240229", + stop: [], + maxInputTokens: 200000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.GROK]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, endpoint: "https://api.x.ai/v1", model: { - [ModelClass.SMALL]: settings.SMALL_GROK_MODEL || "grok-2-1212", - [ModelClass.MEDIUM]: settings.MEDIUM_GROK_MODEL || "grok-2-1212", - [ModelClass.LARGE]: settings.LARGE_GROK_MODEL || "grok-2-1212", - [ModelClass.EMBEDDING]: settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one + [ModelClass.SMALL]: { + name: settings.SMALL_GROK_MODEL || "grok-2-1212", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: settings.MEDIUM_GROK_MODEL || "grok-2-1212", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: settings.LARGE_GROK_MODEL || "grok-2-1212", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.GROQ]: { endpoint: "https://api.groq.com/openai/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8000, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, model: { - [ModelClass.SMALL]: - settings.SMALL_GROQ_MODEL || "llama-3.1-8b-instant", - [ModelClass.MEDIUM]: - settings.MEDIUM_GROQ_MODEL || "llama-3.3-70b-versatile", - [ModelClass.LARGE]: - settings.LARGE_GROQ_MODEL || "llama-3.2-90b-vision-preview", - [ModelClass.EMBEDDING]: - settings.EMBEDDING_GROQ_MODEL || "llama-3.1-8b-instant", + [ModelClass.SMALL]: { + name: settings.SMALL_GROQ_MODEL || "llama-3.1-8b-instant", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8000, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: settings.MEDIUM_GROQ_MODEL || "llama-3.3-70b-versatile", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8000, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_GROQ_MODEL || "llama-3.2-90b-vision-preview", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8000, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: settings.EMBEDDING_GROQ_MODEL || "llama-3.1-8b-instant", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8000, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.LLAMACLOUD]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 4, - }, endpoint: "https://api.llamacloud.com/v1", model: { - [ModelClass.SMALL]: "meta-llama/Llama-3.2-3B-Instruct-Turbo", - [ModelClass.MEDIUM]: "meta-llama-3.1-8b-instruct", - [ModelClass.LARGE]: "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - [ModelClass.EMBEDDING]: - "togethercomputer/m2-bert-80M-32k-retrieval", - [ModelClass.IMAGE]: "black-forest-labs/FLUX.1-schnell", + [ModelClass.SMALL]: { + name: "meta-llama/Llama-3.2-3B-Instruct-Turbo", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: "meta-llama-3.1-8b-instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: "togethercomputer/m2-bert-80M-32k-retrieval", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.IMAGE]: { + name: "black-forest-labs/FLUX.1-schnell", + steps: 4, + }, }, }, [ModelProviderName.TOGETHER]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 4, - }, endpoint: "https://api.together.ai/v1", model: { - [ModelClass.SMALL]: "meta-llama/Llama-3.2-3B-Instruct-Turbo", - [ModelClass.MEDIUM]: "meta-llama-3.1-8b-instruct", - [ModelClass.LARGE]: "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - [ModelClass.EMBEDDING]: - "togethercomputer/m2-bert-80M-32k-retrieval", - [ModelClass.IMAGE]: "black-forest-labs/FLUX.1-schnell", + [ModelClass.SMALL]: { + name: "meta-llama/Llama-3.2-3B-Instruct-Turbo", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: "meta-llama-3.1-8b-instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: "togethercomputer/m2-bert-80M-32k-retrieval", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.IMAGE]: { + name: "black-forest-labs/FLUX.1-schnell", + steps: 4, + }, }, }, [ModelProviderName.LLAMALOCAL]: { - settings: { - stop: ["<|eot_id|>", "<|eom_id|>"], - maxInputTokens: 32768, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, model: { - [ModelClass.SMALL]: - "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", - [ModelClass.MEDIUM]: - "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", // TODO: ?download=true - [ModelClass.LARGE]: - "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", - // "RichardErkhov/NousResearch_-_Meta-Llama-3.1-70B-gguf", // TODO: - [ModelClass.EMBEDDING]: - "togethercomputer/m2-bert-80M-32k-retrieval", + [ModelClass.SMALL]: { + name: "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", + stop: ["<|eot_id|>", "<|eom_id|>"], + maxInputTokens: 32768, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", // TODO: ?download=true + stop: ["<|eot_id|>", "<|eom_id|>"], + maxInputTokens: 32768, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: "NousResearch/Hermes-3-Llama-3.1-8B-GGUF/resolve/main/Hermes-3-Llama-3.1-8B.Q8_0.gguf?download=true", // "RichardErkhov/NousResearch_-_Meta-Llama-3.1-70B-gguf", // TODO: + stop: ["<|eot_id|>", "<|eom_id|>"], + maxInputTokens: 32768, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: "togethercomputer/m2-bert-80M-32k-retrieval", + stop: ["<|eot_id|>", "<|eom_id|>"], + maxInputTokens: 32768, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.GOOGLE]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, model: { - [ModelClass.SMALL]: - settings.SMALL_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "gemini-1.5-flash-latest", - [ModelClass.MEDIUM]: - settings.MEDIUM_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "gemini-1.5-flash-latest", - [ModelClass.LARGE]: - settings.LARGE_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "gemini-1.5-pro-latest", - [ModelClass.EMBEDDING]: - settings.EMBEDDING_GOOGLE_MODEL || - settings.GOOGLE_MODEL || - "text-embedding-004", + [ModelClass.SMALL]: { + name: + settings.SMALL_GOOGLE_MODEL || + settings.GOOGLE_MODEL || + "gemini-1.5-flash-latest", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_GOOGLE_MODEL || + settings.GOOGLE_MODEL || + "gemini-1.5-flash-latest", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_GOOGLE_MODEL || + settings.GOOGLE_MODEL || + "gemini-1.5-pro-latest", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: + settings.EMBEDDING_GOOGLE_MODEL || + settings.GOOGLE_MODEL || + "text-embedding-004", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.REDPILL]: { endpoint: "https://api.red-pill.ai/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, // Available models: https://docs.red-pill.ai/get-started/supported-models // To test other models, change the models below model: { - [ModelClass.SMALL]: - settings.SMALL_REDPILL_MODEL || - settings.REDPILL_MODEL || - "gpt-4o-mini", - [ModelClass.MEDIUM]: - settings.MEDIUM_REDPILL_MODEL || - settings.REDPILL_MODEL || - "gpt-4o", - [ModelClass.LARGE]: - settings.LARGE_REDPILL_MODEL || - settings.REDPILL_MODEL || - "gpt-4o", - [ModelClass.EMBEDDING]: "text-embedding-3-small", + [ModelClass.SMALL]: { + name: + settings.SMALL_REDPILL_MODEL || + settings.REDPILL_MODEL || + "gpt-4o-mini", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_REDPILL_MODEL || + settings.REDPILL_MODEL || + "gpt-4o", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + + [ModelClass.LARGE]: { + name: + settings.LARGE_REDPILL_MODEL || + settings.REDPILL_MODEL || + "gpt-4o", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + + [ModelClass.EMBEDDING]: { + name: "text-embedding-3-small", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, }, }, [ModelProviderName.OPENROUTER]: { endpoint: "https://openrouter.ai/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, // Available models: https://openrouter.ai/models // To test other models, change the models below model: { - [ModelClass.SMALL]: - settings.SMALL_OPENROUTER_MODEL || - settings.OPENROUTER_MODEL || - "nousresearch/hermes-3-llama-3.1-405b", - [ModelClass.MEDIUM]: - settings.MEDIUM_OPENROUTER_MODEL || - settings.OPENROUTER_MODEL || - "nousresearch/hermes-3-llama-3.1-405b", - [ModelClass.LARGE]: - settings.LARGE_OPENROUTER_MODEL || - settings.OPENROUTER_MODEL || - "nousresearch/hermes-3-llama-3.1-405b", - [ModelClass.EMBEDDING]: "text-embedding-3-small", + [ModelClass.SMALL]: { + name: + settings.SMALL_OPENROUTER_MODEL || + settings.OPENROUTER_MODEL || + "nousresearch/hermes-3-llama-3.1-405b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_OPENROUTER_MODEL || + settings.OPENROUTER_MODEL || + "nousresearch/hermes-3-llama-3.1-405b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_OPENROUTER_MODEL || + settings.OPENROUTER_MODEL || + "nousresearch/hermes-3-llama-3.1-405b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: "text-embedding-3-small", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.OLLAMA]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, - }, endpoint: settings.OLLAMA_SERVER_URL || "http://localhost:11434", model: { - [ModelClass.SMALL]: - settings.SMALL_OLLAMA_MODEL || - settings.OLLAMA_MODEL || - "llama3.2", - [ModelClass.MEDIUM]: - settings.MEDIUM_OLLAMA_MODEL || - settings.OLLAMA_MODEL || - "hermes3", - [ModelClass.LARGE]: - settings.LARGE_OLLAMA_MODEL || - settings.OLLAMA_MODEL || - "hermes3:70b", - [ModelClass.EMBEDDING]: - settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large", + [ModelClass.SMALL]: { + name: + settings.SMALL_OLLAMA_MODEL || + settings.OLLAMA_MODEL || + "llama3.2", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_OLLAMA_MODEL || + settings.OLLAMA_MODEL || + "hermes3", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + + [ModelClass.LARGE]: { + name: + settings.LARGE_OLLAMA_MODEL || + settings.OLLAMA_MODEL || + "hermes3:70b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, + + [ModelClass.EMBEDDING]: { + name: settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.HEURIST]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 20, - }, endpoint: "https://llm-gateway.heurist.xyz", model: { - [ModelClass.SMALL]: - settings.SMALL_HEURIST_MODEL || - "meta-llama/llama-3-70b-instruct", - [ModelClass.MEDIUM]: - settings.MEDIUM_HEURIST_MODEL || - "meta-llama/llama-3-70b-instruct", - [ModelClass.LARGE]: - settings.LARGE_HEURIST_MODEL || - "meta-llama/llama-3.1-405b-instruct", - [ModelClass.EMBEDDING]: "", //Add later, - [ModelClass.IMAGE]: settings.HEURIST_IMAGE_MODEL || "PepeXL", + [ModelClass.SMALL]: { + name: + settings.SMALL_HEURIST_MODEL || + "meta-llama/llama-3-70b-instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_HEURIST_MODEL || + "meta-llama/llama-3-70b-instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_HEURIST_MODEL || + "meta-llama/llama-3.1-405b-instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.IMAGE]: { + name: settings.HEURIST_IMAGE_MODEL || "PepeXL", + steps: 20, + }, }, }, [ModelProviderName.GALADRIEL]: { endpoint: "https://api.galadriel.com/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.5, - presence_penalty: 0.5, - temperature: 0.8, - }, model: { - [ModelClass.SMALL]: "llama3.1:70b", - [ModelClass.MEDIUM]: "llama3.1:70b", - [ModelClass.LARGE]: "llama3.1:405b", - [ModelClass.EMBEDDING]: "gte-large-en-v1.5", - [ModelClass.IMAGE]: "stabilityai/stable-diffusion-xl-base-1.0", + [ModelClass.SMALL]: { + name: "llama3.1:70b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.5, + presence_penalty: 0.5, + temperature: 0.8, + }, + [ModelClass.MEDIUM]: { + name: "llama3.1:70b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.5, + presence_penalty: 0.5, + temperature: 0.8, + }, + [ModelClass.LARGE]: { + name: "llama3.1:405b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.5, + presence_penalty: 0.5, + temperature: 0.8, + }, + [ModelClass.EMBEDDING]: { + name: "gte-large-en-v1.5", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.5, + presence_penalty: 0.5, + temperature: 0.8, + }, + [ModelClass.IMAGE]: { + name: "stabilityai/stable-diffusion-xl-base-1.0", + }, }, }, [ModelProviderName.FAL]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, - imageSettings: { - steps: 28, - }, endpoint: "https://api.fal.ai/v1", model: { - [ModelClass.SMALL]: "", // FAL doesn't provide text models - [ModelClass.MEDIUM]: "", - [ModelClass.LARGE]: "", - [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: "fal-ai/flux-lora", + [ModelClass.IMAGE]: { name: "fal-ai/flux-lora", steps: 28 }, }, }, [ModelProviderName.GAIANET]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, endpoint: settings.GAIANET_SERVER_URL, model: { - [ModelClass.SMALL]: - settings.GAIANET_MODEL || - settings.SMALL_GAIANET_MODEL || - "llama3b", - [ModelClass.MEDIUM]: - settings.GAIANET_MODEL || - settings.MEDIUM_GAIANET_MODEL || - "llama", - [ModelClass.LARGE]: - settings.GAIANET_MODEL || - settings.LARGE_GAIANET_MODEL || - "qwen72b", - [ModelClass.EMBEDDING]: - settings.GAIANET_EMBEDDING_MODEL || "nomic-embed", + [ModelClass.SMALL]: { + name: + settings.GAIANET_MODEL || + settings.SMALL_GAIANET_MODEL || + "llama3b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.GAIANET_MODEL || + settings.MEDIUM_GAIANET_MODEL || + "llama", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: + settings.GAIANET_MODEL || + settings.LARGE_GAIANET_MODEL || + "qwen72b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, + [ModelClass.EMBEDDING]: { + name: settings.GAIANET_EMBEDDING_MODEL || "nomic-embed", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + repetition_penalty: 0.4, + temperature: 0.7, + }, }, }, [ModelProviderName.ALI_BAILIAN]: { endpoint: "https://dashscope.aliyuncs.com/compatible-mode/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: "qwen-turbo", - [ModelClass.MEDIUM]: "qwen-plus", - [ModelClass.LARGE]: "qwen-max", - [ModelClass.IMAGE]: "wanx-v1", + [ModelClass.SMALL]: { + name: "qwen-turbo", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: "qwen-plus", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: "qwen-max", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, + [ModelClass.IMAGE]: { + name: "wanx-v1", + }, }, }, [ModelProviderName.VOLENGINE]: { - endpoint: settings.VOLENGINE_API_URL || "https://open.volcengineapi.com/api/v3/", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.6, - }, + endpoint: + settings.VOLENGINE_API_URL || + "https://open.volcengineapi.com/api/v3/", model: { - [ModelClass.SMALL]: - settings.SMALL_VOLENGINE_MODEL || - settings.VOLENGINE_MODEL || - "doubao-lite-128k", - [ModelClass.MEDIUM]: - settings.MEDIUM_VOLENGINE_MODEL || - settings.VOLENGINE_MODEL || - "doubao-pro-128k", - [ModelClass.LARGE]: - settings.LARGE_VOLENGINE_MODEL || - settings.VOLENGINE_MODEL || - "doubao-pro-256k", - [ModelClass.EMBEDDING]: - settings.VOLENGINE_EMBEDDING_MODEL || - "doubao-embedding", + [ModelClass.SMALL]: { + name: + settings.SMALL_VOLENGINE_MODEL || + settings.VOLENGINE_MODEL || + "doubao-lite-128k", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_VOLENGINE_MODEL || + settings.VOLENGINE_MODEL || + "doubao-pro-128k", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_VOLENGINE_MODEL || + settings.VOLENGINE_MODEL || + "doubao-pro-256k", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, + [ModelClass.EMBEDDING]: { + name: settings.VOLENGINE_EMBEDDING_MODEL || "doubao-embedding", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.4, + presence_penalty: 0.4, + temperature: 0.6, + }, }, }, [ModelProviderName.NANOGPT]: { endpoint: "https://nano-gpt.com/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: settings.SMALL_NANOGPT_MODEL || "gpt-4o-mini", - [ModelClass.MEDIUM]: settings.MEDIUM_NANOGPT_MODEL || "gpt-4o", - [ModelClass.LARGE]: settings.LARGE_NANOGPT_MODEL || "gpt-4o", - } + [ModelClass.SMALL]: { + name: settings.SMALL_NANOGPT_MODEL || "gpt-4o-mini", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: settings.MEDIUM_NANOGPT_MODEL || "gpt-4o", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: settings.LARGE_NANOGPT_MODEL || "gpt-4o", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + frequency_penalty: 0.0, + presence_penalty: 0.0, + temperature: 0.6, + }, + }, }, [ModelProviderName.HYPERBOLIC]: { endpoint: "https://api.hyperbolic.xyz/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: - settings.SMALL_HYPERBOLIC_MODEL || - settings.HYPERBOLIC_MODEL || - "meta-llama/Llama-3.2-3B-Instruct", - [ModelClass.MEDIUM]: - settings.MEDIUM_HYPERBOLIC_MODEL || - settings.HYPERBOLIC_MODEL || - "meta-llama/Meta-Llama-3.1-70B-Instruct", - [ModelClass.LARGE]: - settings.LARGE_HYPERBOLIC_MODEL || - settings.HYPERBOLIC_MODEL || - "meta-llama/Meta-Llama-3.1-405-Instruct", - [ModelClass.IMAGE]: settings.IMAGE_HYPERBOLIC_MODEL || "FLUX.1-dev", + [ModelClass.SMALL]: { + name: + settings.SMALL_HYPERBOLIC_MODEL || + settings.HYPERBOLIC_MODEL || + "meta-llama/Llama-3.2-3B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_HYPERBOLIC_MODEL || + settings.HYPERBOLIC_MODEL || + "meta-llama/Meta-Llama-3.1-70B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_HYPERBOLIC_MODEL || + settings.HYPERBOLIC_MODEL || + "meta-llama/Meta-Llama-3.1-405-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.IMAGE]: { + name: settings.IMAGE_HYPERBOLIC_MODEL || "FLUX.1-dev", + }, }, }, [ModelProviderName.VENICE]: { endpoint: "https://api.venice.ai/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: settings.SMALL_VENICE_MODEL || "llama-3.3-70b", - [ModelClass.MEDIUM]: settings.MEDIUM_VENICE_MODEL || "llama-3.3-70b", - [ModelClass.LARGE]: settings.LARGE_VENICE_MODEL || "llama-3.1-405b", - [ModelClass.IMAGE]: settings.IMAGE_VENICE_MODEL || "fluently-xl", + [ModelClass.SMALL]: { + name: settings.SMALL_VENICE_MODEL || "llama-3.3-70b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: settings.MEDIUM_VENICE_MODEL || "llama-3.3-70b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: settings.LARGE_VENICE_MODEL || "llama-3.1-405b", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.IMAGE]: { + name: settings.IMAGE_VENICE_MODEL || "fluently-xl", + }, }, }, [ModelProviderName.AKASH_CHAT_API]: { endpoint: "https://chatapi.akash.network/api/v1", - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - temperature: 0.6, - }, model: { - [ModelClass.SMALL]: - settings.SMALL_AKASH_CHAT_API_MODEL || - "Meta-Llama-3-2-3B-Instruct", - [ModelClass.MEDIUM]: - settings.MEDIUM_AKASH_CHAT_API_MODEL || - "Meta-Llama-3-3-70B-Instruct", - [ModelClass.LARGE]: - settings.LARGE_AKASH_CHAT_API_MODEL || - "Meta-Llama-3-1-405B-Instruct-FP8", + [ModelClass.SMALL]: { + name: + settings.SMALL_AKASH_CHAT_API_MODEL || + "Meta-Llama-3-2-3B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_AKASH_CHAT_API_MODEL || + "Meta-Llama-3-3-70B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_AKASH_CHAT_API_MODEL || + "Meta-Llama-3-1-405B-Instruct-FP8", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.6, + }, }, }, [ModelProviderName.LIVEPEER]: { - settings: { - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, - }, // livepeer endpoint is handled from the sdk model: { - [ModelClass.SMALL]: "", - [ModelClass.MEDIUM]: "", - [ModelClass.LARGE]: "", - [ModelClass.EMBEDDING]: "", - [ModelClass.IMAGE]: settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning", + [ModelClass.IMAGE]: { + name: + settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning", + }, }, }, }; export function getModel(provider: ModelProviderName, type: ModelClass) { - return models[provider].model[type]; + return models[provider].model[type].name; } export function getEndpoint(provider: ModelProviderName) { From b82e6aa4c4dec1c6bfdcb3332f7b885ef969f906 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 00:54:21 -0500 Subject: [PATCH 03/17] add getModelSettings function --- packages/core/src/generation.ts | 11 +++++++---- packages/core/src/models.ts | 7 +++++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index b5d2f5c9593..c2e660d545b 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -18,7 +18,7 @@ import { AutoTokenizer } from "@huggingface/transformers"; import Together from "together-ai"; import { ZodSchema } from "zod"; import { elizaLogger } from "./index.ts"; -import { getModel, models } from "./models.ts"; +import { getModel, models, getModelSettings } from "./models.ts"; import { parseBooleanFromText, parseJsonArrayFromText, @@ -39,6 +39,7 @@ import { ActionResponse, TelemetrySettings, TokenizerType, + imageModelSettings, } from "./types.ts"; import { fal } from "@fal-ai/client"; import { tavily } from "@tavily/core"; @@ -1144,9 +1145,11 @@ export const generateImage = async ( data?: string[]; error?: any; }> => { - const model = getModel(runtime.imageModelProvider, ModelClass.IMAGE); - const modelSettings = models[runtime.imageModelProvider].imageSettings; - + const modelSettings = getModelSettings( + runtime.imageModelProvider, + ModelClass.IMAGE + ) as imageModelSettings; + const model = modelSettings.name; elizaLogger.info("Generating image with options:", { imageModelProvider: model, }); diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index cbedf9bcf3c..37dbd2e645c 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -930,8 +930,11 @@ export const models: Models = { }, }; -export function getModel(provider: ModelProviderName, type: ModelClass) { - return models[provider].model[type].name; +export function getModelSettings( + provider: ModelProviderName, + type: ModelClass +) { + return models[provider].model[type]; } export function getEndpoint(provider: ModelProviderName) { From 30de625435c2429f55ef915d7afe0a747ee4e792 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 00:54:50 -0500 Subject: [PATCH 04/17] clean code --- packages/core/src/generation.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index c2e660d545b..ed81be67e13 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -18,7 +18,7 @@ import { AutoTokenizer } from "@huggingface/transformers"; import Together from "together-ai"; import { ZodSchema } from "zod"; import { elizaLogger } from "./index.ts"; -import { getModel, models, getModelSettings } from "./models.ts"; +import { models, getModelSettings } from "./models.ts"; import { parseBooleanFromText, parseJsonArrayFromText, From 6a110568351016325541f61ad9256eacaf9b9f36 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 01:08:46 -0500 Subject: [PATCH 05/17] seperate getImageModelSettings and getModelSettings --- packages/core/src/generation.ts | 74 +++++++++++++++------------------ packages/core/src/models.ts | 15 +++++-- 2 files changed, 45 insertions(+), 44 deletions(-) diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index ed81be67e13..27d5bfc6241 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -18,7 +18,7 @@ import { AutoTokenizer } from "@huggingface/transformers"; import Together from "together-ai"; import { ZodSchema } from "zod"; import { elizaLogger } from "./index.ts"; -import { models, getModelSettings } from "./models.ts"; +import { models, getModelSettings, getImageModelSettings } from "./models.ts"; import { parseBooleanFromText, parseJsonArrayFromText, @@ -180,7 +180,7 @@ export async function generateText({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; tools?: Record; onStepFinish?: (event: StepResult) => Promise | void; maxSteps?: number; @@ -202,7 +202,8 @@ export async function generateText({ const provider = runtime.modelProvider; const endpoint = runtime.character.modelEndpointOverride || models[provider].endpoint; - let model = models[provider].model[modelClass]; + const modelSettings = getModelSettings(runtime.modelProvider, modelClass); + let model = modelSettings.name; // allow character.json settings => secrets to override models // FIXME: add MODEL_MEDIUM support @@ -274,23 +275,20 @@ export async function generateText({ const modelConfiguration = runtime.character?.settings?.modelConfig; const temperature = - modelConfiguration?.temperature || - models[provider].settings.temperature; + modelConfiguration?.temperature || modelSettings.temperature; const frequency_penalty = modelConfiguration?.frequency_penalty || - models[provider].settings.frequency_penalty; + modelSettings.frequency_penalty; const presence_penalty = - modelConfiguration?.presence_penalty || - models[provider].settings.presence_penalty; + modelConfiguration?.presence_penalty || modelSettings.presence_penalty; const max_context_length = - modelConfiguration?.maxInputTokens || - models[provider].settings.maxInputTokens; + modelConfiguration?.maxInputTokens || modelSettings.maxInputTokens; const max_response_length = modelConfiguration?.max_response_length || - models[provider].settings.maxOutputTokens; + modelSettings.maxOutputTokens; const experimental_telemetry = modelConfiguration?.experimental_telemetry || - models[provider].settings.experimental_telemetry; + modelSettings.experimental_telemetry; const apiKey = runtime.token; @@ -303,7 +301,7 @@ export async function generateText({ let response: string; - const _stop = stop || models[provider].settings.stop; + const _stop = stop || modelSettings.stop; elizaLogger.debug( `Using provider: ${provider}, model: ${model}, temperature: ${temperature}, max response length: ${max_response_length}` ); @@ -831,7 +829,7 @@ export async function generateShouldRespond({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise<"RESPOND" | "IGNORE" | "STOP" | null> { let retryDelay = 1000; while (true) { @@ -914,15 +912,12 @@ export async function generateTrueOrFalse({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise { let retryDelay = 1000; - + const modelSettings = getModelSettings(runtime.modelProvider, modelClass); const stop = Array.from( - new Set([ - ...(models[runtime.modelProvider].settings.stop || []), - ["\n"], - ]) + new Set([...(modelSettings.stop || []), ["\n"]]) ) as string[]; while (true) { @@ -969,7 +964,7 @@ export async function generateTextArray({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise { if (!context) { elizaLogger.error("generateTextArray context is empty"); @@ -1005,7 +1000,7 @@ export async function generateObjectDeprecated({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise { if (!context) { elizaLogger.error("generateObjectDeprecated context is empty"); @@ -1041,7 +1036,7 @@ export async function generateObjectArray({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise { if (!context) { elizaLogger.error("generateObjectArray context is empty"); @@ -1089,10 +1084,10 @@ export async function generateMessageResponse({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise { - const provider = runtime.modelProvider; - const max_context_length = models[provider].settings.maxInputTokens; + const modelSettings = getModelSettings(runtime.modelProvider, modelClass); + const max_context_length = modelSettings.maxInputTokens; context = await trimTokens(context, max_context_length, runtime); let retryLength = 1000; // exponential backoff @@ -1145,10 +1140,7 @@ export const generateImage = async ( data?: string[]; error?: any; }> => { - const modelSettings = getModelSettings( - runtime.imageModelProvider, - ModelClass.IMAGE - ) as imageModelSettings; + const modelSettings = getImageModelSettings(runtime.imageModelProvider); const model = modelSettings.name; elizaLogger.info("Generating image with options:", { imageModelProvider: model, @@ -1563,14 +1555,14 @@ export const generateObject = async ({ } const provider = runtime.modelProvider; - const model = models[provider].model[modelClass]; - const temperature = models[provider].settings.temperature; - const frequency_penalty = models[provider].settings.frequency_penalty; - const presence_penalty = models[provider].settings.presence_penalty; - const max_context_length = models[provider].settings.maxInputTokens; - const max_response_length = models[provider].settings.maxOutputTokens; - const experimental_telemetry = - models[provider].settings.experimental_telemetry; + const modelSettings = getModelSettings(runtime.modelProvider, modelClass); + const model = modelSettings.name; + const temperature = modelSettings.temperature; + const frequency_penalty = modelSettings.frequency_penalty; + const presence_penalty = modelSettings.presence_penalty; + const max_context_length = modelSettings.maxInputTokens; + const max_response_length = modelSettings.maxOutputTokens; + const experimental_telemetry = modelSettings.experimental_telemetry; const apiKey = runtime.token; try { @@ -1582,7 +1574,7 @@ export const generateObject = async ({ maxTokens: max_response_length, frequencyPenalty: frequency_penalty, presencePenalty: presence_penalty, - stop: stop || models[provider].settings.stop, + stop: stop || modelSettings.stop, experimental_telemetry: experimental_telemetry, }; @@ -1621,7 +1613,7 @@ interface ProviderOptions { mode?: "auto" | "json" | "tool"; experimental_providerMetadata?: Record; modelOptions: ModelSettings; - modelClass: string; + modelClass: ModelClass; context: string; } @@ -1904,7 +1896,7 @@ export async function generateTweetActions({ }: { runtime: IAgentRuntime; context: string; - modelClass: string; + modelClass: ModelClass; }): Promise { let retryDelay = 1000; while (true) { diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 37dbd2e645c..e7744bef6d0 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -1,5 +1,10 @@ import settings from "./settings.ts"; -import { Models, ModelProviderName, ModelClass } from "./types.ts"; +import { + Models, + ModelProviderName, + ModelClass, + ModelSettins, +} from "./types.ts"; export const models: Models = { [ModelProviderName.OPENAI]: { @@ -933,8 +938,12 @@ export const models: Models = { export function getModelSettings( provider: ModelProviderName, type: ModelClass -) { - return models[provider].model[type]; +): ModelSettins { + return models[provider].model[type] as ModelSettins; +} + +export function getImageModelSettings(provider: ModelProviderName) { + return models[provider].model[ModelClass.IMAGE]; } export function getEndpoint(provider: ModelProviderName) { From 52c48cbffbe4f33c83245844f1882f1bf8797978 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 01:23:53 -0500 Subject: [PATCH 06/17] typo --- packages/core/src/models.ts | 4 ++-- packages/core/src/types.ts | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index e7744bef6d0..a3bb202a793 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -3,7 +3,7 @@ import { Models, ModelProviderName, ModelClass, - ModelSettins, + ModelSettings, } from "./types.ts"; export const models: Models = { @@ -939,7 +939,7 @@ export function getModelSettings( provider: ModelProviderName, type: ModelClass ): ModelSettins { - return models[provider].model[type] as ModelSettins; + return models[provider].model[type] as ModelSettings; } export function getImageModelSettings(provider: ModelProviderName) { diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index d0dbe6f987e..8bdccb8fe40 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -139,7 +139,7 @@ export enum ModelClass { /** * Model settings */ -type ModelSettins = { +export type ModelSettings = { /** Model name */ name: string; @@ -169,7 +169,7 @@ type ModelSettins = { }; /** Image model settings */ -type imageModelSettings = { +export type ImageModelSettings = { name: string; steps?: number; }; @@ -183,11 +183,11 @@ export type Model = { /** Model names by size class */ model: { - [ModelClass.SMALL]?: ModelSettins; - [ModelClass.MEDIUM]?: ModelSettins; - [ModelClass.LARGE]?: ModelSettins; - [ModelClass.EMBEDDING]?: ModelSettins; - [ModelClass.IMAGE]?: imageModelSettings; + [ModelClass.SMALL]?: ModelSettings; + [ModelClass.MEDIUM]?: ModelSettings; + [ModelClass.LARGE]?: ModelSettings; + [ModelClass.EMBEDDING]?: ModelSettings; + [ModelClass.IMAGE]?: ImageModelSettings; }; }; From 85452feac218fbfe89e4892eebbfc03bfb58fc38 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 01:24:02 -0500 Subject: [PATCH 07/17] clean code --- packages/core/src/generation.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index 27d5bfc6241..fc12934d3eb 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -39,7 +39,6 @@ import { ActionResponse, TelemetrySettings, TokenizerType, - imageModelSettings, } from "./types.ts"; import { fal } from "@fal-ai/client"; import { tavily } from "@tavily/core"; From 2af9a09b7a952f401f4f3c7ccf554a39782671c1 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 01:26:31 -0500 Subject: [PATCH 08/17] add return type of function --- packages/core/src/models.ts | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index a3bb202a793..03713d0b77e 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -4,6 +4,7 @@ import { ModelProviderName, ModelClass, ModelSettings, + ImageModelSettings, } from "./types.ts"; export const models: Models = { @@ -938,12 +939,16 @@ export const models: Models = { export function getModelSettings( provider: ModelProviderName, type: ModelClass -): ModelSettins { - return models[provider].model[type] as ModelSettings; +): ModelSettings | undefined { + return models[provider]?.model[type] as ModelSettings | undefined; } -export function getImageModelSettings(provider: ModelProviderName) { - return models[provider].model[ModelClass.IMAGE]; +export function getImageModelSettings( + provider: ModelProviderName +): ImageModelSettings | undefined { + return models[provider]?.model[ModelClass.IMAGE] as + | ImageModelSettings + | undefined; } export function getEndpoint(provider: ModelProviderName) { From f736ba3c2c7eb5ce9b45151f6c1d9b7b697c0571 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sat, 4 Jan 2025 01:42:16 -0500 Subject: [PATCH 09/17] adapt to new model structure --- .../src/actions/chat_with_attachments.ts | 9 ++++++--- .../src/actions/summarize_conversation.ts | 9 ++++++--- .../src/actions/chat_with_attachments.ts | 8 ++++++-- .../src/actions/summarize_conversation.ts | 8 ++++++-- packages/core/src/embedding.ts | 8 ++++---- packages/core/src/generation.ts | 19 ++++++++++++------- packages/plugin-node/src/services/image.ts | 13 +++++++++---- 7 files changed, 49 insertions(+), 25 deletions(-) diff --git a/packages/client-discord/src/actions/chat_with_attachments.ts b/packages/client-discord/src/actions/chat_with_attachments.ts index 67e0cc682db..c271ab005fa 100644 --- a/packages/client-discord/src/actions/chat_with_attachments.ts +++ b/packages/client-discord/src/actions/chat_with_attachments.ts @@ -1,4 +1,4 @@ -import { composeContext } from "@elizaos/core"; +import { composeContext, getModelSettings } from "@elizaos/core"; import { generateText, trimTokens } from "@elizaos/core"; import { models } from "@elizaos/core"; import { parseJSONObjectFromText } from "@elizaos/core"; @@ -185,8 +185,11 @@ const summarizeAction = { let currentSummary = ""; - const model = models[runtime.character.modelProvider]; - const chunkSize = model.settings.maxOutputTokens; + const modelSettings = getModelSettings( + runtime.modelProvider, + ModelClass.SMALL + ); + const chunkSize = modelSettings.maxOutputTokens; state.attachmentsWithText = attachmentsWithText; state.objective = objective; diff --git a/packages/client-discord/src/actions/summarize_conversation.ts b/packages/client-discord/src/actions/summarize_conversation.ts index d9c9be6f42c..87fcd1aeb29 100644 --- a/packages/client-discord/src/actions/summarize_conversation.ts +++ b/packages/client-discord/src/actions/summarize_conversation.ts @@ -1,4 +1,4 @@ -import { composeContext } from "@elizaos/core"; +import { composeContext, getModelSettings } from "@elizaos/core"; import { generateText, splitChunks, trimTokens } from "@elizaos/core"; import { getActorDetails } from "@elizaos/core"; import { models } from "@elizaos/core"; @@ -247,8 +247,11 @@ const summarizeAction = { let currentSummary = ""; - const model = models[runtime.character.settings.model]; - const chunkSize = model.settings.maxContextLength - 1000; + const modelSettings = getModelSettings( + runtime.modelProvider, + ModelClass.SMALL + ); + const chunkSize = modelSettings.maxOutputTokens - 1000; const chunks = await splitChunks(formattedMemories, chunkSize, 0); diff --git a/packages/client-slack/src/actions/chat_with_attachments.ts b/packages/client-slack/src/actions/chat_with_attachments.ts index 169add1222b..4cd7b577e86 100644 --- a/packages/client-slack/src/actions/chat_with_attachments.ts +++ b/packages/client-slack/src/actions/chat_with_attachments.ts @@ -3,6 +3,7 @@ import { generateText, trimTokens, parseJSONObjectFromText, + getModelSettings, } from "@elizaos/core"; import { models } from "@elizaos/core"; import { @@ -194,8 +195,11 @@ const summarizeAction: Action = { let currentSummary = ""; - const model = models[runtime.character.modelProvider]; - const chunkSize = model.settings.maxOutputTokens; + const modelSettings = getModelSettings( + runtime.modelProvider, + ModelClass.SMALL + ); + const chunkSize = modelSettings.maxOutputTokens; currentState.attachmentsWithText = attachmentsWithText; currentState.objective = objective; diff --git a/packages/client-slack/src/actions/summarize_conversation.ts b/packages/client-slack/src/actions/summarize_conversation.ts index ec98e1f10d9..6afd4d49e82 100644 --- a/packages/client-slack/src/actions/summarize_conversation.ts +++ b/packages/client-slack/src/actions/summarize_conversation.ts @@ -4,6 +4,7 @@ import { splitChunks, trimTokens, parseJSONObjectFromText, + getModelSettings, } from "@elizaos/core"; import { models } from "@elizaos/core"; import { getActorDetails } from "@elizaos/core"; @@ -265,8 +266,11 @@ const summarizeAction: Action = { let currentSummary = ""; - const model = models[runtime.character.modelProvider]; - const chunkSize = model.settings.maxOutputTokens; + const modelSettings = getModelSettings( + runtime.modelProvider, + ModelClass.SMALL + ); + const chunkSize = modelSettings.maxOutputTokens; const chunks = await splitChunks(formattedMemories, chunkSize, 0); diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index 659001b0c29..0f82295182a 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -1,5 +1,5 @@ import path from "node:path"; -import { models } from "./models.ts"; +import { getEndpoint, models } from "./models.ts"; import { IAgentRuntime, ModelProviderName } from "./types.ts"; import settings from "./settings.ts"; import elizaLogger from "./logger.ts"; @@ -202,7 +202,7 @@ export async function embed(runtime: IAgentRuntime, input: string) { model: config.model, endpoint: runtime.character.modelEndpointOverride || - models[ModelProviderName.OLLAMA].endpoint, + getEndpoint(ModelProviderName.OLLAMA), isOllama: true, dimensions: config.dimensions, }); @@ -213,7 +213,7 @@ export async function embed(runtime: IAgentRuntime, input: string) { model: config.model, endpoint: runtime.character.modelEndpointOverride || - models[ModelProviderName.GAIANET].endpoint || + getEndpoint(ModelProviderName.GAIANET) || settings.SMALL_GAIANET_SERVER_URL || settings.MEDIUM_GAIANET_SERVER_URL || settings.LARGE_GAIANET_SERVER_URL, @@ -239,7 +239,7 @@ export async function embed(runtime: IAgentRuntime, input: string) { model: config.model, endpoint: runtime.character.modelEndpointOverride || - models[runtime.character.modelProvider].endpoint, + getEndpoint(runtime.character.modelProvider), apiKey: runtime.token, dimensions: config.dimensions, }); diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index fc12934d3eb..0f15ff8dbbe 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -18,7 +18,12 @@ import { AutoTokenizer } from "@huggingface/transformers"; import Together from "together-ai"; import { ZodSchema } from "zod"; import { elizaLogger } from "./index.ts"; -import { models, getModelSettings, getImageModelSettings } from "./models.ts"; +import { + models, + getModelSettings, + getImageModelSettings, + getEndpoint, +} from "./models.ts"; import { parseBooleanFromText, parseJsonArrayFromText, @@ -200,7 +205,7 @@ export async function generateText({ const provider = runtime.modelProvider; const endpoint = - runtime.character.modelEndpointOverride || models[provider].endpoint; + runtime.character.modelEndpointOverride || getEndpoint(provider); const modelSettings = getModelSettings(runtime.modelProvider, modelClass); let model = modelSettings.name; @@ -563,7 +568,7 @@ export async function generateText({ case ModelProviderName.REDPILL: { elizaLogger.debug("Initializing RedPill model."); - const serverUrl = models[provider].endpoint; + const serverUrl = getEndpoint(provider); const openai = createOpenAI({ apiKey, baseURL: serverUrl, @@ -594,7 +599,7 @@ export async function generateText({ case ModelProviderName.OPENROUTER: { elizaLogger.debug("Initializing OpenRouter model."); - const serverUrl = models[provider].endpoint; + const serverUrl = getEndpoint(provider); const openrouter = createOpenAI({ apiKey, baseURL: serverUrl, @@ -628,7 +633,7 @@ export async function generateText({ elizaLogger.debug("Initializing Ollama model."); const ollamaProvider = createOllama({ - baseURL: models[provider].endpoint + "/api", + baseURL: getEndpoint(provider) + "/api", fetch: runtime.fetch, }); const ollama = ollamaProvider(model); @@ -686,7 +691,7 @@ export async function generateText({ case ModelProviderName.GAIANET: { elizaLogger.debug("Initializing GAIANET model."); - var baseURL = models[provider].endpoint; + var baseURL = getEndpoint(provider); if (!baseURL) { switch (modelClass) { case ModelClass.SMALL: @@ -1866,7 +1871,7 @@ async function handleOllama({ provider, }: ProviderOptions): Promise> { const ollamaProvider = createOllama({ - baseURL: models[provider].endpoint + "/api", + baseURL: getEndpoint(provider) + "/api", }); const ollama = ollamaProvider(model); return await aiGenerateObject({ diff --git a/packages/plugin-node/src/services/image.ts b/packages/plugin-node/src/services/image.ts index 44d88f9e7df..55c29db6d14 100644 --- a/packages/plugin-node/src/services/image.ts +++ b/packages/plugin-node/src/services/image.ts @@ -1,4 +1,4 @@ -import { elizaLogger, models } from "@elizaos/core"; +import { elizaLogger, getEndpoint, models } from "@elizaos/core"; import { Service } from "@elizaos/core"; import { IAgentRuntime, @@ -187,7 +187,12 @@ export class ImageDescriptionService ): Promise { for (let attempt = 0; attempt < 3; attempt++) { try { - const shouldUseBase64 = (isGif || isLocalFile)&& !(this.runtime.imageModelProvider === ModelProviderName.OPENAI); + const shouldUseBase64 = + (isGif || isLocalFile) && + !( + this.runtime.imageModelProvider === + ModelProviderName.OPENAI + ); const mimeType = isGif ? "png" : path.extname(imageUrl).slice(1) || "jpeg"; @@ -209,8 +214,8 @@ export class ImageDescriptionService // If model provider is openai, use the endpoint, otherwise use the default openai endpoint. const endpoint = this.runtime.imageModelProvider === ModelProviderName.OPENAI - ? models[this.runtime.imageModelProvider].endpoint - : "https://api.openai.com/v1"; + ? getEndpoint(this.runtime.imageModelProvider) + : "https://api.openai.com/v1"; const response = await fetch(endpoint + "/chat/completions", { method: "POST", headers: { From 3c753065611f2c51fa3334292bb1d6f9292eb7ce Mon Sep 17 00:00:00 2001 From: Odilitime Date: Sun, 5 Jan 2025 02:06:09 -0800 Subject: [PATCH 10/17] Add DAO donation ask & dev discord --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 58750861101..11066daffe4 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,17 @@ - 🎮 Video Game NPCs - 🧠 Trading +## 💰 If you plan to launch a token + +This framework is the number one open source project on github, we are enabling the next generation of human-machine interface but we still need your help to ensure the code is of the utmost quality with response rapid to critical issues that will affect our builder community at large. + +To ensure sustainable development and continued innovation, we ask contributions of 5-10% of initial token distributions from successful launches. + +All contributions are publicly tracked on-chain and used exclusively for ecosystem development. + +### ⚠️ Don't forget to tip the big guy 10%: ⚠️ +[AM84n1iLdxgVTAyENBcLdjXoyvjentTbu5Q6EpKV1PeG](https://solscan.io/account/AM84n1iLdxgVTAyENBcLdjXoyvjentTbu5Q6EpKV1PeG) + ## 🚀 Quick Start ### Prerequisites @@ -138,6 +149,7 @@ pnpm install --include=optional sharp - [GitHub Issues](https://github.com/elizaos/eliza/issues). Best for: bugs you encounter using Eliza, and feature proposals. - [Discord](https://discord.gg/ai16z). Best for: sharing your applications and hanging out with the community. +- [Developer Discord](https://discord.gg/3f67SH4rXT). Best for: getting help and plugin development. ## Contributors From 0889edd4894c75b6eea3dd5e3b6f25803e5c7454 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 08:51:29 -0500 Subject: [PATCH 11/17] remove unused variable --- packages/client-discord/src/actions/chat_with_attachments.ts | 1 - packages/client-discord/src/actions/summarize_conversation.ts | 1 - 2 files changed, 2 deletions(-) diff --git a/packages/client-discord/src/actions/chat_with_attachments.ts b/packages/client-discord/src/actions/chat_with_attachments.ts index c271ab005fa..e4bcad1a45a 100644 --- a/packages/client-discord/src/actions/chat_with_attachments.ts +++ b/packages/client-discord/src/actions/chat_with_attachments.ts @@ -1,6 +1,5 @@ import { composeContext, getModelSettings } from "@elizaos/core"; import { generateText, trimTokens } from "@elizaos/core"; -import { models } from "@elizaos/core"; import { parseJSONObjectFromText } from "@elizaos/core"; import { Action, diff --git a/packages/client-discord/src/actions/summarize_conversation.ts b/packages/client-discord/src/actions/summarize_conversation.ts index 87fcd1aeb29..3c05fcab80a 100644 --- a/packages/client-discord/src/actions/summarize_conversation.ts +++ b/packages/client-discord/src/actions/summarize_conversation.ts @@ -1,7 +1,6 @@ import { composeContext, getModelSettings } from "@elizaos/core"; import { generateText, splitChunks, trimTokens } from "@elizaos/core"; import { getActorDetails } from "@elizaos/core"; -import { models } from "@elizaos/core"; import { parseJSONObjectFromText } from "@elizaos/core"; import { Action, From 5fa0af12450ffe96ada7dc77ed739e44cfabf8a8 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 09:00:03 -0500 Subject: [PATCH 12/17] remove unused variable --- packages/core/src/embedding.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index 0f82295182a..117853df37b 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -1,5 +1,5 @@ import path from "node:path"; -import { getEndpoint, models } from "./models.ts"; +import { getEndpoint } from "./models.ts"; import { IAgentRuntime, ModelProviderName } from "./types.ts"; import settings from "./settings.ts"; import elizaLogger from "./logger.ts"; From e140845c5929945ba736860e0e672167c30bfc66 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 09:19:48 -0500 Subject: [PATCH 13/17] add EmbeddingModelSettings type --- packages/core/src/models.ts | 86 ++++++------------------------------- packages/core/src/types.ts | 8 +++- 2 files changed, 19 insertions(+), 75 deletions(-) diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 03713d0b77e..b4d962426df 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -5,6 +5,7 @@ import { ModelClass, ModelSettings, ImageModelSettings, + EmbeddingModelSettings, } from "./types.ts"; export const models: Models = { @@ -41,12 +42,7 @@ export const models: Models = { [ModelClass.EMBEDDING]: { name: settings.EMBEDDING_OPENAI_MODEL || "text-embedding-3-small", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, + dimensions: 1536, }, [ModelClass.IMAGE]: { name: settings.IMAGE_OPENAI_MODEL || "dall-e-3", @@ -193,12 +189,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: settings.EMBEDDING_GROK_MODEL || "grok-2-1212", // not sure about this one - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, }, }, }, @@ -235,12 +225,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: settings.EMBEDDING_GROQ_MODEL || "llama-3.1-8b-instant", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8000, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, }, }, }, @@ -273,11 +257,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: "togethercomputer/m2-bert-80M-32k-retrieval", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, }, [ModelClass.IMAGE]: { name: "black-forest-labs/FLUX.1-schnell", @@ -314,11 +293,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: "togethercomputer/m2-bert-80M-32k-retrieval", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, }, [ModelClass.IMAGE]: { name: "black-forest-labs/FLUX.1-schnell", @@ -354,11 +328,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: "togethercomputer/m2-bert-80M-32k-retrieval", - stop: ["<|eot_id|>", "<|eom_id|>"], - maxInputTokens: 32768, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, }, }, }, @@ -405,12 +374,6 @@ export const models: Models = { settings.EMBEDDING_GOOGLE_MODEL || settings.GOOGLE_MODEL || "text-embedding-004", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, }, }, }, @@ -459,12 +422,6 @@ export const models: Models = { [ModelClass.EMBEDDING]: { name: "text-embedding-3-small", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.0, - presence_penalty: 0.0, - temperature: 0.6, }, }, }, @@ -511,12 +468,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: "text-embedding-3-small", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, }, }, }, @@ -563,12 +514,7 @@ export const models: Models = { [ModelClass.EMBEDDING]: { name: settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.7, + dimensions: 1024, }, }, }, @@ -643,12 +589,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: "gte-large-en-v1.5", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.5, - presence_penalty: 0.5, - temperature: 0.8, }, [ModelClass.IMAGE]: { name: "stabilityai/stable-diffusion-xl-base-1.0", @@ -699,11 +639,7 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: settings.GAIANET_EMBEDDING_MODEL || "nomic-embed", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - repetition_penalty: 0.4, - temperature: 0.7, + dimensions: 768, }, }, }, @@ -785,12 +721,6 @@ export const models: Models = { }, [ModelClass.EMBEDDING]: { name: settings.VOLENGINE_EMBEDDING_MODEL || "doubao-embedding", - stop: [], - maxInputTokens: 128000, - maxOutputTokens: 8192, - frequency_penalty: 0.4, - presence_penalty: 0.4, - temperature: 0.6, }, }, }, @@ -951,6 +881,14 @@ export function getImageModelSettings( | undefined; } +export function getEmbeddingModelSettings( + provider: ModelProviderName +): EmbeddingModelSettings | undefined { + return models[provider]?.model[ModelClass.EMBEDDING] as + | EmbeddingModelSettings + | undefined; +} + export function getEndpoint(provider: ModelProviderName) { return models[provider].endpoint; } diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index 8bdccb8fe40..10a394df602 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -174,6 +174,12 @@ export type ImageModelSettings = { steps?: number; }; +/** Embedding model settings */ +export type EmbeddingModelSettings = { + name: string; + dimensions?: number; +}; + /** * Configuration for an AI model */ @@ -186,7 +192,7 @@ export type Model = { [ModelClass.SMALL]?: ModelSettings; [ModelClass.MEDIUM]?: ModelSettings; [ModelClass.LARGE]?: ModelSettings; - [ModelClass.EMBEDDING]?: ModelSettings; + [ModelClass.EMBEDDING]?: EmbeddingModelSettings; [ModelClass.IMAGE]?: ImageModelSettings; }; }; From bbb1f94a04e76b8cf0e4d24de66f2177c1161574 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 09:20:30 -0500 Subject: [PATCH 14/17] get the info from model settings instead of hardcoced number --- packages/core/src/embedding.ts | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index 117853df37b..33124e61df2 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -1,5 +1,9 @@ import path from "node:path"; -import { getEndpoint } from "./models.ts"; +import { + getEmbeddingModelSettings, + getEndpoint, + getModelSettings, +} from "./models.ts"; import { IAgentRuntime, ModelProviderName } from "./types.ts"; import settings from "./settings.ts"; import elizaLogger from "./logger.ts"; @@ -33,19 +37,20 @@ export type EmbeddingConfig = { export const getEmbeddingConfig = (): EmbeddingConfig => ({ dimensions: settings.USE_OPENAI_EMBEDDING?.toLowerCase() === "true" - ? 1536 // OpenAI + ? getEmbeddingModelSettings(ModelProviderName.OPENAI).dimensions : settings.USE_OLLAMA_EMBEDDING?.toLowerCase() === "true" - ? 1024 // Ollama mxbai-embed-large + ? getEmbeddingModelSettings(ModelProviderName.OLLAMA).dimensions : settings.USE_GAIANET_EMBEDDING?.toLowerCase() === "true" - ? 768 // GaiaNet + ? getEmbeddingModelSettings(ModelProviderName.GAIANET) + .dimensions : 384, // BGE model: settings.USE_OPENAI_EMBEDDING?.toLowerCase() === "true" - ? "text-embedding-3-small" + ? getEmbeddingModelSettings(ModelProviderName.OPENAI).name : settings.USE_OLLAMA_EMBEDDING?.toLowerCase() === "true" - ? settings.OLLAMA_EMBEDDING_MODEL || "mxbai-embed-large" + ? getEmbeddingModelSettings(ModelProviderName.OLLAMA).name : settings.USE_GAIANET_EMBEDDING?.toLowerCase() === "true" - ? settings.GAIANET_EMBEDDING_MODEL || "nomic-embed" + ? getEmbeddingModelSettings(ModelProviderName.GAIANET).name : "BGE-small-en-v1.5", provider: settings.USE_OPENAI_EMBEDDING?.toLowerCase() === "true" @@ -134,11 +139,17 @@ export function getEmbeddingZeroVector(): number[] { let embeddingDimension = 384; // Default BGE dimension if (settings.USE_OPENAI_EMBEDDING?.toLowerCase() === "true") { - embeddingDimension = 1536; // OpenAI dimension + embeddingDimension = getEmbeddingModelSettings( + ModelProviderName.OPENAI + ).dimensions; // OpenAI dimension } else if (settings.USE_OLLAMA_EMBEDDING?.toLowerCase() === "true") { - embeddingDimension = 1024; // Ollama mxbai-embed-large dimension + embeddingDimension = getEmbeddingModelSettings( + ModelProviderName.OLLAMA + ).dimensions; // Ollama mxbai-embed-large dimension } else if (settings.USE_GAIANET_EMBEDDING?.toLowerCase() === "true") { - embeddingDimension = 768; // GaiaNet dimension + embeddingDimension = getEmbeddingModelSettings( + ModelProviderName.GAIANET + ).dimensions; // GaiaNet dimension } return Array(embeddingDimension).fill(0); From 17240bd26528361b45d145f550e6ef24cd1911c9 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 09:27:58 -0500 Subject: [PATCH 15/17] remove unused variable --- packages/core/src/embedding.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/packages/core/src/embedding.ts b/packages/core/src/embedding.ts index 33124e61df2..b1dea7c685f 100644 --- a/packages/core/src/embedding.ts +++ b/packages/core/src/embedding.ts @@ -1,9 +1,5 @@ import path from "node:path"; -import { - getEmbeddingModelSettings, - getEndpoint, - getModelSettings, -} from "./models.ts"; +import { getEmbeddingModelSettings, getEndpoint } from "./models.ts"; import { IAgentRuntime, ModelProviderName } from "./types.ts"; import settings from "./settings.ts"; import elizaLogger from "./logger.ts"; From f3bfb38368819a61098b17821b4ac690ddbf2fd3 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 09:34:08 -0500 Subject: [PATCH 16/17] fix provider path --- packages/client-discord/src/actions/chat_with_attachments.ts | 2 +- packages/client-discord/src/actions/summarize_conversation.ts | 2 +- packages/client-slack/src/actions/chat_with_attachments.ts | 2 +- packages/client-slack/src/actions/summarize_conversation.ts | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/client-discord/src/actions/chat_with_attachments.ts b/packages/client-discord/src/actions/chat_with_attachments.ts index e4bcad1a45a..8f62a775100 100644 --- a/packages/client-discord/src/actions/chat_with_attachments.ts +++ b/packages/client-discord/src/actions/chat_with_attachments.ts @@ -185,7 +185,7 @@ const summarizeAction = { let currentSummary = ""; const modelSettings = getModelSettings( - runtime.modelProvider, + runtime.character.modelProvider, ModelClass.SMALL ); const chunkSize = modelSettings.maxOutputTokens; diff --git a/packages/client-discord/src/actions/summarize_conversation.ts b/packages/client-discord/src/actions/summarize_conversation.ts index 3c05fcab80a..accda5168ed 100644 --- a/packages/client-discord/src/actions/summarize_conversation.ts +++ b/packages/client-discord/src/actions/summarize_conversation.ts @@ -247,7 +247,7 @@ const summarizeAction = { let currentSummary = ""; const modelSettings = getModelSettings( - runtime.modelProvider, + runtime.character.modelProvider, ModelClass.SMALL ); const chunkSize = modelSettings.maxOutputTokens - 1000; diff --git a/packages/client-slack/src/actions/chat_with_attachments.ts b/packages/client-slack/src/actions/chat_with_attachments.ts index 4cd7b577e86..e059cc47b4b 100644 --- a/packages/client-slack/src/actions/chat_with_attachments.ts +++ b/packages/client-slack/src/actions/chat_with_attachments.ts @@ -196,7 +196,7 @@ const summarizeAction: Action = { let currentSummary = ""; const modelSettings = getModelSettings( - runtime.modelProvider, + runtime.character.modelProvider, ModelClass.SMALL ); const chunkSize = modelSettings.maxOutputTokens; diff --git a/packages/client-slack/src/actions/summarize_conversation.ts b/packages/client-slack/src/actions/summarize_conversation.ts index 6afd4d49e82..b487757cadd 100644 --- a/packages/client-slack/src/actions/summarize_conversation.ts +++ b/packages/client-slack/src/actions/summarize_conversation.ts @@ -267,7 +267,7 @@ const summarizeAction: Action = { let currentSummary = ""; const modelSettings = getModelSettings( - runtime.modelProvider, + runtime.character.modelProvider, ModelClass.SMALL ); const chunkSize = modelSettings.maxOutputTokens; From f3792e0372603528230a604e201da17d6d4993f1 Mon Sep 17 00:00:00 2001 From: Ting Chien Meng Date: Sun, 5 Jan 2025 12:15:04 -0500 Subject: [PATCH 17/17] resolve conflict --- README.md | 165 +----------------------------------------------------- 1 file changed, 1 insertion(+), 164 deletions(-) diff --git a/README.md b/README.md index 2052815e4e3..538a3a6b456 100644 --- a/README.md +++ b/README.md @@ -12,170 +12,7 @@ ## 🌍 README Translations -[中文说明](./README_CN.md) | [日本語の説明](./README_JA.md) | [한국어 설명](./README_KOR.md) | [Français](./README_FR.md) | [Português](./README_PTBR.md) | [Türkçe](./README_TR.md) | [Русский](./README_RU.md) | [Español](./README_ES.md) | [Italiano](./README_IT.md) | [ไทย](./README_TH.md) | [Deutsch](./README_DE.md) | [Tiếng Việt](./README_VI.md) | [עִברִית](https://github.com/elizaos/Elisa/blob/main/README_HE.md) | [Tagalog](./README_TG.md) | [Polski](./README_PL.md) | [Arabic](./README_AR.md) | [Hungarian](./README_HU.md) | [Srpski](./README_RS.md) - -## 🚩 Overview - -
- Eliza Diagram -
- -## ✨ Features - -- 🛠️ Full-featured Discord, Twitter and Telegram connectors -- 🔗 Support for every model (Llama, Grok, OpenAI, Anthropic, etc.) -- 👥 Multi-agent and room support -- 📚 Easily ingest and interact with your documents -- 💾 Retrievable memory and document store -- 🚀 Highly extensible - create your own actions and clients -- ☁️ Supports many models (local Llama, OpenAI, Anthropic, Groq, etc.) -- 📦 Just works! - -## Video Tutorials - -[AI Agent Dev School](https://www.youtube.com/watch?v=ArptLpQiKfI&list=PLx5pnFXdPTRzWla0RaOxALTSTnVq53fKL) - -## 🎯 Use Cases - -- 🤖 Chatbots -- 🕵️ Autonomous Agents -- 📈 Business Process Handling -- 🎮 Video Game NPCs -- 🧠 Trading - -## 💰 If you plan to launch a token - -This framework is the number one open source project on github, we are enabling the next generation of human-machine interface but we still need your help to ensure the code is of the utmost quality with response rapid to critical issues that will affect our builder community at large. - -To ensure sustainable development and continued innovation, we ask contributions of 5-10% of initial token distributions from successful launches. - -All contributions are publicly tracked on-chain and used exclusively for ecosystem development. - -### ⚠️ Don't forget to tip the big guy 10%: ⚠️ -[AM84n1iLdxgVTAyENBcLdjXoyvjentTbu5Q6EpKV1PeG](https://solscan.io/account/AM84n1iLdxgVTAyENBcLdjXoyvjentTbu5Q6EpKV1PeG) - -## 🚀 Quick Start - -### Prerequisites - -- [Python 2.7+](https://www.python.org/downloads/) -- [Node.js 23+](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) -- [pnpm](https://pnpm.io/installation) - -> **Note for Windows Users:** [WSL 2](https://learn.microsoft.com/en-us/windows/wsl/install-manual) is required. - -### Use the Starter (Recommended) - -```bash -git clone https://github.com/elizaos/eliza-starter.git -cd eliza-starter -cp .env.example .env -pnpm i && pnpm build && pnpm start -``` - -Once the agent is running, you should see the message to run "pnpm start:client" at the end. -Open another terminal and move to same directory and then run below command and follow the URL to chat to your agent. - -```bash -pnpm start:client -``` - -Then read the [Documentation](https://elizaos.github.io/eliza/) to learn how to customize your Eliza. - -### Manually Start Eliza (Only recommended if you know what you are doing) - -```bash -# Clone the repository -git clone https://github.com/elizaos/eliza.git - -# Checkout the latest release -# This project iterates fast, so we recommend checking out the latest release -git checkout $(git describe --tags --abbrev=0) -``` - -### Start Eliza with Gitpod - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/elizaos/eliza/tree/main) - -### Edit the .env file - -Copy .env.example to .env and fill in the appropriate values. - -``` -cp .env.example .env -``` - -Note: .env is optional. If you're planning to run multiple distinct agents, you can pass secrets through the character JSON -Note: .env is optional. If you're planning to run multiple distinct agents, you can pass secrets through the character JSON - -### Automatically Start Eliza - -This will run everything to set up the project and start the bot with the default character. - -```bash -sh scripts/start.sh -``` - -### Edit the character file - -1. Open `packages/core/src/defaultCharacter.ts` to modify the default character. Uncomment and edit. - -2. To load custom characters: - - Use `pnpm start --characters="path/to/your/character.json"` - - Multiple character files can be loaded simultaneously -3. Connect with X (Twitter) - - change `"clients": []` to `"clients": ["twitter"]` in the character file to connect with X - -### Manually Start Eliza - -```bash -pnpm i -pnpm build -pnpm start - -# The project iterates fast, sometimes you need to clean the project if you are coming back to the project -pnpm clean -``` - -#### Additional Requirements - -You may need to install Sharp. If you see an error when starting up, try installing it with the following command: - -``` -pnpm install --include=optional sharp -``` - -### Community & contact - -- [GitHub Issues](https://github.com/elizaos/eliza/issues). Best for: bugs you encounter using Eliza, and feature proposals. -- [Discord](https://discord.gg/ai16z). Best for: sharing your applications and hanging out with the community. -- [Developer Discord](https://discord.gg/3f67SH4rXT). Best for: getting help and plugin development. - -## Contributors - - - - - -## Star History - -[![Star History Chart](https://api.star-history.com/svg?repos=elizaos/eliza&type=Date)](https://star-history.com/#elizaos/eliza&Date) -======= -# Eliza 🤖 - -
- Eliza Banner -
- -
- -📖 [Documentation](https://elizaos.github.io/eliza/) | 🎯 [Examples](https://github.com/thejoven/awesome-eliza) - -
- -## 🌍 README Translations - -[中文说明](./README_CN.md) | [日本語の説明](./README_JA.md) | [한국어 설명](./README_KOR.md) | [Français](./README_FR.md) | [Português](./README_PTBR.md) | [Türkçe](./README_TR.md) | [Русский](./README_RU.md) | [Español](./README_ES.md) | [Italiano](./README_IT.md) | [ไทย](./README_TH.md) | [Deutsch](./README_DE.md) | [Tiếng Việt](./README_VI.md) | [עִברִית](https://github.com/elizaos/Elisa/blob/main/README_HE.md) | [Tagalog](./README_TG.md) | [Polski](./README_PL.md) | [Arabic](./README_AR.md) | [Hungarian](./README_HU.md) | [Srpski](./README_RS.md) +[中文说明](./README_CN.md) | [日本語の説明](./README_JA.md) | [한국어 설명](./README_KOR.md) | [Français](./README_FR.md) | [Português](./README_PTBR.md) | [Türkçe](./README_TR.md) | [Русский](./README_RU.md) | [Español](./README_ES.md) | [Italiano](./README_IT.md) | [ไทย](./README_TH.md) | [Deutsch](./README_DE.md) | [Tiếng Việt](./README_VI.md) | [עִברִית](https://github.com/elizaos/Elisa/blob/main/README_HE.md) | [Tagalog](./README_TG.md) | [Polski](./README_PL.md) | [Arabic](./README_AR.md) | [Hungarian](./README_HU.md) | [Srpski](./README_RS.md) | [Română](./README_RO.md) ## 🚩 Overview