diff --git a/.env.example b/.env.example index 93aa4a45c79..963a45cd508 100644 --- a/.env.example +++ b/.env.example @@ -87,6 +87,13 @@ EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small IMAGE_OPENAI_MODEL= # Default: dall-e-3 USE_OPENAI_EMBEDDING= # Set to TRUE for OpenAI/1536, leave blank for local +# Atoma SDK Configuration +ATOMASDK_BEARER_AUTH= # Atoma SDK Bearer Auth token +ATOMA_API_URL= # Default: https://api.atoma.network/v1 +SMALL_ATOMA_MODEL= # Default: meta-llama/Llama-3.3-70B-Instruct +MEDIUM_ATOMA_MODEL= # Default: meta-llama/Llama-3.3-70B-Instruct +LARGE_ATOMA_MODEL= # Default: meta-llama/Llama-3.3-70B-Instruct + # Eternal AI's Decentralized Inference API ETERNALAI_URL= ETERNALAI_MODEL= # Default: "NousResearch/Hermes-3-Llama-3.1-70B-FP8" diff --git a/agent/src/index.ts b/agent/src/index.ts index 581bd4e1d87..6ba56bfecbb 100644 --- a/agent/src/index.ts +++ b/agent/src/index.ts @@ -476,6 +476,11 @@ export function getTokenForProvider( character.settings?.secrets?.VENICE_API_KEY || settings.VENICE_API_KEY ); + case ModelProviderName.ATOMA: + return ( + character.settings?.secrets?.ATOMASDK_BEARER_AUTH || + settings.ATOMASDK_BEARER_AUTH + ); case ModelProviderName.AKASH_CHAT_API: return ( character.settings?.secrets?.AKASH_CHAT_API_KEY || diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index d53e64ef8d2..bc5570d7798 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -1032,6 +1032,36 @@ export async function generateText({ break; } + case ModelProviderName.ATOMA: { + elizaLogger.debug("Initializing Atoma model."); + const atoma = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); + + const { text: atomaResponse } = await aiGenerateText({ + model: atoma.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + tools: tools, + onStepFinish: onStepFinish, + maxSteps: maxSteps, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + experimental_telemetry: experimental_telemetry, + }); + + response = atomaResponse; + elizaLogger.debug("Received response from Atoma model."); + break; + } + case ModelProviderName.GALADRIEL: { elizaLogger.debug("Initializing Galadriel model."); const headers = {}; @@ -2417,4 +2447,4 @@ export async function generateTweetActions({ await new Promise((resolve) => setTimeout(resolve, retryDelay)); retryDelay *= 2; } -} +} \ No newline at end of file diff --git a/packages/core/src/models.ts b/packages/core/src/models.ts index 903f865724f..2c6046d9c5f 100644 --- a/packages/core/src/models.ts +++ b/packages/core/src/models.ts @@ -998,6 +998,38 @@ export const models: Models = { }, }, }, + [ModelProviderName.ATOMA]: { + endpoint: settings.ATOMA_API_URL || "https://api.atoma.network/v1", + model: { + [ModelClass.SMALL]: { + name: + settings.SMALL_ATOMA_MODEL || + "meta-llama/Llama-3.3-70B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.7, + }, + [ModelClass.MEDIUM]: { + name: + settings.MEDIUM_ATOMA_MODEL || + "meta-llama/Llama-3.3-70B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.7, + }, + [ModelClass.LARGE]: { + name: + settings.LARGE_ATOMA_MODEL || + "meta-llama/Llama-3.3-70B-Instruct", + stop: [], + maxInputTokens: 128000, + maxOutputTokens: 8192, + temperature: 0.7, + }, + }, + }, }; export function getModelSettings( diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index 26338791cbf..e34da93d06b 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -229,6 +229,7 @@ export type Models = { [ModelProviderName.LIVEPEER]: Model; [ModelProviderName.DEEPSEEK]: Model; [ModelProviderName.INFERA]: Model; + [ModelProviderName.ATOMA]: Model; }; /** @@ -264,6 +265,7 @@ export enum ModelProviderName { LETZAI = "letzai", DEEPSEEK = "deepseek", INFERA = "infera", + ATOMA = "atoma", } /**