Skip to content

Commit 4f4316e

Browse files
authored
Merge pull request #348 from wjw12/main
change default configuration of Heurist
2 parents 0ddcd2f + e28c98b commit 4f4316e

File tree

6 files changed

+28
-17
lines changed

6 files changed

+28
-17
lines changed

.env.example

+5-1
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,12 @@ LARGE_OLLAMA_MODEL= #default hermes3:70b
5555
# For asking Claude stuff
5656
ANTHROPIC_API_KEY=
5757

58-
# Heurist API
58+
# Heurist API (Get API Key at https://heurist.ai/dev-access)
5959
HEURIST_API_KEY=
60+
SMALL_HEURIST_LANGUAGE_MODEL=
61+
MEDIUM_HEURIST_LANGUAGE_MODEL=
62+
LARGE_HEURIST_LANGUAGE_MODEL=
63+
HEURIST_IMAGE_MODEL=
6064

6165
WALLET_PRIVATE_KEY=EXAMPLE_WALLET_PRIVATE_KEY
6266
WALLET_PUBLIC_KEY=EXAMPLE_WALLET_PUBLIC_KEY

docs/docs/advanced/fine-tuning.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ const llamaLocalSettings = {
273273
const heuristSettings = {
274274
settings: {
275275
stop: [],
276-
maxInputTokens: 128000,
276+
maxInputTokens: 32768,
277277
maxOutputTokens: 8192,
278278
repetition_penalty: 0.0,
279279
temperature: 0.7,
@@ -283,11 +283,11 @@ const heuristSettings = {
283283
},
284284
endpoint: "https://llm-gateway.heurist.xyz",
285285
model: {
286-
[ModelClass.SMALL]: "meta-llama/llama-3-70b-instruct",
287-
[ModelClass.MEDIUM]: "meta-llama/llama-3-70b-instruct",
288-
[ModelClass.LARGE]: "meta-llama/llama-3.1-405b-instruct",
286+
[ModelClass.SMALL]: "hermes-3-llama3.1-8b",
287+
[ModelClass.MEDIUM]: "mistralai/mixtral-8x7b-instruct",
288+
[ModelClass.LARGE]: "nvidia/llama-3.1-nemotron-70b-instruct",
289289
[ModelClass.EMBEDDING]: "", // Add later
290-
[ModelClass.IMAGE]: "PepeXL",
290+
[ModelClass.IMAGE]: "FLUX.1-dev",
291291
},
292292
};
293293
```

docs/docs/api/type-aliases/Models.md

+4
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,10 @@
4848

4949
> **redpill**: [`Model`](Model.md)
5050
51+
### heurist
52+
53+
> **heurist**: [`Model`](Model.md)
54+
5155
## Defined in
5256

5357
[packages/core/src/types.ts:105](https://github.com/ai16z/eliza/blob/7fcf54e7fb2ba027d110afcc319c0b01b3f181dc/packages/core/src/types.ts#L105)

docs/docs/guides/configuration.md

+10-7
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,9 @@ ANTHROPIC_API_KEY=
7070
# Together.ai Settings
7171
TOGETHER_API_KEY=
7272

73+
# Heurist Settings
74+
HEURIST_API_KEY=
75+
7376
# Local Model Settings
7477
XAI_MODEL=meta-llama/Llama-3.1-7b-instruct
7578
```
@@ -80,12 +83,12 @@ Configure image generation in your character file:
8083

8184
```json
8285
{
83-
"modelProvider": "HEURIST",
86+
"modelProvider": "heurist",
8487
"settings": {
8588
"imageSettings": {
8689
"steps": 20,
87-
"width": 512,
88-
"height": 512
90+
"width": 1024,
91+
"height": 1024
8992
}
9093
}
9194
}
@@ -96,13 +99,13 @@ Example usage:
9699
```typescript
97100
const result = await generateImage(
98101
{
99-
prompt: "pepe_frog, meme, web comic, cartoon, 3d render",
100-
width: 512,
101-
height: 512,
102+
prompt: "A cute anime girl with big breasts and straight long black hair wearing orange T-shirt. The T-shirt has \"ai16z\" texts in the front. The girl is looking at the viewer",
103+
width: 1024,
104+
height: 1024,
102105
numIterations: 20, // optional
103106
guidanceScale: 3, // optional
104107
seed: -1, // optional
105-
modelId: "PepeXL", // optional
108+
modelId: "FLUX.1-dev", // optional
106109
},
107110
runtime,
108111
);

docs/docs/quickstart.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,9 @@ Before getting started with Eliza, ensure you have:
6767

6868
Eliza supports multiple AI models:
6969

70-
- **Heurist**: Set `modelProvider: "HEURIST"` in your character file
71-
- LLM: Uses Llama models (more available LLM models [here](https://heurist.mintlify.app/developer/supported-models))
72-
- Image Generation: Uses PepeXL model (more info of available models [here](https://heurist.mintlify.app/developer/image-generation-api))
70+
- **Heurist**: Set `modelProvider: "heurist"` in your character file. Most models are uncensored.
71+
- LLM: Select available LLMs [here](https://docs.heurist.ai/dev-guide/supported-models#large-language-models-llms) and configure `SMALL_HEURIST_LANGUAGE_MODEL`,`MEDIUM_HEURIST_LANGUAGE_MODEL`,`LARGE_HEURIST_LANGUAGE_MODEL`
72+
- Image Generation: Select available Stable Diffusion or Flux models [here](https://docs.heurist.ai/dev-guide/supported-models#image-generation-models) and configure `HEURIST_IMAGE_MODEL` (default is FLUX.1-dev)
7373
- **Llama**: Set `XAI_MODEL=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
7474
- **Grok**: Set `XAI_MODEL=grok-beta`
7575
- **OpenAI**: Set `XAI_MODEL=gpt-4o-mini` or `gpt-4o`

packages/core/src/generation.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -771,7 +771,7 @@ export const generateImage = async (
771771
seed: data.seed || -1,
772772
},
773773
},
774-
model_id: data.modelId || "PepeXL", // Default to SD 1.5 if not specified
774+
model_id: data.modelId || "FLUX.1-dev",
775775
}),
776776
}
777777
);

0 commit comments

Comments
 (0)