Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add image generation capability to Telegram messaging (PR491 Resubmission) #1505

Closed
wants to merge 9 commits into from
Closed
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ IMAGE_GEN= # Set to TRUE to enable image generation
USE_OPENAI_EMBEDDING= # Set to TRUE for OpenAI/1536, leave blank for local
USE_OLLAMA_EMBEDDING= # Set to TRUE for OLLAMA/1024, leave blank for local

#Generation Prompts
SYSTEM_PROMPT= # Leave blank for empty system prompt or defined in character config
IMAGE_GENERATION_PROMPT= # Leave blank for default image generation prompt or defined in character config

# OpenRouter Models
OPENROUTER_MODEL= # Default: uses hermes 70b/405b
SMALL_OPENROUTER_MODEL=
Expand Down Expand Up @@ -187,6 +191,7 @@ TELEGRAM_BOT_TOKEN=

# Together Configuration
TOGETHER_API_KEY=
TOGETHER_IMAGE_MODEL= #Leave blank for default black-forest-labs/FLUX.1-schnell

# Server Configuration
SERVER_PORT=3000
Expand Down
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ tweets/
*.onnx
*.wav
*.mp3
*.png
*.jpg
*.jpeg
*.webp

logs/

Expand Down
3 changes: 3 additions & 0 deletions agent/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ import Database from "better-sqlite3";
import fs from "fs";
import path from "path";
import { fileURLToPath } from "url";
import { character } from "./character.ts";
import { imageGenerationPlugin } from "@ai16z/plugin-image-generation";
import type { DirectClient } from "@ai16z/client-direct";
import yargs from "yargs";

const __filename = fileURLToPath(import.meta.url); // get the resolved path to the file
Expand Down
3 changes: 3 additions & 0 deletions mise.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[tools]
node = "23.1.0"
pnpm = "latest"
22 changes: 13 additions & 9 deletions packages/client-telegram/src/messageManager.ts
Original file line number Diff line number Diff line change
@@ -1,24 +1,26 @@
import { Message } from "@telegraf/types";
import { Context, Telegraf } from "telegraf";

import { composeContext, elizaLogger, ServiceType } from "@elizaos/core";
import { getEmbeddingZeroVector } from "@elizaos/core";
import {
composeContext,
ServiceType,
Content,
HandlerCallback,
IAgentRuntime,
getEmbeddingZeroVector,
IImageDescriptionService,
Memory,
ModelClass,
State,
UUID,
Media,
elizaLogger,
stringToUuid,
generateMessageResponse,
generateShouldRespond,
messageCompletionFooter,
shouldRespondFooter,
} from "@elizaos/core";
import { stringToUuid } from "@elizaos/core";

import { generateMessageResponse, generateShouldRespond } from "@elizaos/core";
import { messageCompletionFooter, shouldRespondFooter } from "@elizaos/core";

import { cosineSimilarity } from "./utils";
import {
MESSAGE_CONSTANTS,
Expand Down Expand Up @@ -136,10 +138,11 @@ Note that {{agentName}} is capable of reading/seeing/hearing various forms of me

{{recentMessages}}

# Task: Generate a post/reply in the voice, style and perspective of {{agentName}} (@{{twitterUserName}}) while using the thread of tweets as additional context:
# Task: Generate a reply in the voice and style of {{agentName}}, aka @{{twitterUserName}}
Write a very short reply that is from the perspective of {{agentName}}. Try to write something totally different than previous posts. Do not add commentary or acknowledge this request, just write the reply. Use the thread of tweets as additional context:
Current Post:
{{currentPost}}
Thread of Tweets You Are Replying To:
Thread of messages you are replying to:

{{formattedConversation}}
` + messageCompletionFooter;
Expand Down Expand Up @@ -682,6 +685,7 @@ export class MessageManager {
content: Content,
replyToMessageId?: number
): Promise<Message.TextMessage[]> {

if (content.attachments && content.attachments.length > 0) {
content.attachments.map(async (attachment: Media) => {
if (attachment.contentType.startsWith("image")) {
Expand Down
39 changes: 26 additions & 13 deletions packages/core/src/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -942,22 +942,36 @@ export const generateImage = async (
data?: string[];
error?: any;
}> => {
const model = getModel(runtime.imageModelProvider, ModelClass.IMAGE);
const modelSettings = models[runtime.imageModelProvider].imageSettings;
const imageModelProvider =
runtime.character.imageModelProvider ?? runtime.character.modelProvider;
const model = getModel(imageModelProvider, ModelClass.IMAGE);
const modelSettings = models[imageModelProvider].imageSettings;

elizaLogger.info("Generating image with options:", {
imageModelProvider: model,
});

const apiKey =
runtime.imageModelProvider === runtime.modelProvider
? runtime.token
: (runtime.getSetting("HEURIST_API_KEY") ??
runtime.getSetting("TOGETHER_API_KEY") ??
runtime.getSetting("FAL_API_KEY") ??
runtime.getSetting("OPENAI_API_KEY") ??
runtime.getSetting("VENICE_API_KEY"));

let apiKey = runtime.token;
switch (imageModelProvider) {
case ModelProviderName.HEURIST:
apiKey = runtime.getSetting("HEURIST_API_KEY");
break;
case ModelProviderName.TOGETHER:
apiKey = runtime.getSetting("TOGETHER_API_KEY");
break;
case ModelProviderName.FAL:
apiKey = runtime.getSetting("FAL_API_KEY");
break;
case ModelProviderName.LLAMACLOUD:
apiKey = runtime.getSetting("LLAMACLOUD_API_KEY");
break;
case ModelProviderName.VENICE:
apiKey = runtime.getSetting("VENICE_API_KEY");
break;
case ModelProviderName.OPENAI:
apiKey = runtime.getSetting("OPENAI_API_KEY");
break;
}
try {
if (runtime.imageModelProvider === ModelProviderName.HEURIST) {
const response = await fetch(
Expand Down Expand Up @@ -1003,7 +1017,7 @@ export const generateImage = async (
) {
const together = new Together({ apiKey: apiKey as string });
const response = await together.images.create({
model: "black-forest-labs/FLUX.1-schnell",
model: runtime.getSetting("TOGETHER_IMAGE_MODEL") ?? "black-forest-labs/FLUX.1-schnell",
prompt: data.prompt,
width: data.width,
height: data.height,
Expand All @@ -1022,7 +1036,6 @@ export const generateImage = async (
throw new Error("Invalid response format from Together AI");
}

// Rest of the code remains the same...
const base64s = await Promise.all(
togetherResponse.data.map(async (image) => {
if (!image.url) {
Expand Down
5 changes: 4 additions & 1 deletion packages/core/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -638,11 +638,13 @@ export type Character = {

/** Optional system prompt */
system?: string;
/** Optional image generation prompt */
imageGenerationPrompt?: string;

/** Model provider to use */
modelProvider: ModelProviderName;

/** Image model provider to use, if different from modelProvider */
/** Optional image model provider to use, if different from modelProvider */
imageModelProvider?: ModelProviderName;

/** Optional model endpoint override */
Expand All @@ -655,6 +657,7 @@ export type Character = {
messageHandlerTemplate?: string;
shouldRespondTemplate?: string;
continueMessageHandlerTemplate?: string;
imagePromptTemplate? :string;
evaluationTemplate?: string;
twitterSearchTemplate?: string;
twitterActionTemplate?: string;
Expand Down
Loading
Loading