Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
actions-user committed Feb 10, 2025
2 parents e7514d2 + a029b43 commit 52ef4d3
Show file tree
Hide file tree
Showing 9 changed files with 63 additions and 23 deletions.
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2023-2024 Zhang Yifei
Copyright (c) 2023-2025 NextChat

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
11 changes: 6 additions & 5 deletions app/client/platforms/bytedance.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import {
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { fetch } from "@/app/utils/stream";

export interface OpenAIListModelResponse {
Expand Down Expand Up @@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi {
}

async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content: getMessageTextContent(v),
}));
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}

const modelConfig = {
...useAppConfig.getState().modelConfig,
Expand Down
7 changes: 6 additions & 1 deletion app/client/platforms/deepseek.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import {
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
useAccessStore,
Expand Down Expand Up @@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi {

// console.log(chatPayload);

const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");

// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
);

if (shouldStream) {
Expand Down
19 changes: 16 additions & 3 deletions app/client/platforms/google.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import {
ApiPath,
Google,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
ChatOptions,
getHeaders,
Expand Down Expand Up @@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi {
.join("\n\n");
};

let content = "";
if (Array.isArray(res)) {
res.map((item) => {
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
});
}

return (
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message ||
""
);
Expand Down Expand Up @@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi {
headers: getHeaders(),
};

const isThinking = options.config.model.includes("-thinking");
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
);

if (shouldStream) {
Expand Down
9 changes: 7 additions & 2 deletions app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
Azure,
REQUEST_TIMEOUT_MS,
ServiceProvider,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
ChatMessageTool,
Expand Down Expand Up @@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi {
let requestPayload: RequestPayload | DalleRequestPayload;

const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3");
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
Expand Down Expand Up @@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
isDalle3 || isO1OrO3
? REQUEST_TIMEOUT_MS_FOR_THINKING
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
);

const res = await fetch(chatPath, chatPayload);
Expand Down
12 changes: 6 additions & 6 deletions app/client/platforms/siliconflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import {
ApiPath,
SILICONFLOW_BASE_URL,
SiliconFlow,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
useAccessStore,
Expand Down Expand Up @@ -120,11 +120,11 @@ export class SiliconflowApi implements LLMApi {

// console.log(chatPayload);

// make a fetch request
const requestTimeoutId = setTimeout(() => {
console.error("[Request] SiliconFlow API timeout");
controller.abort();
}, 10 * REQUEST_TIMEOUT_MS);
// Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS_FOR_THINKING,
);

if (shouldStream) {
const [tools, funcs] = usePluginStore
Expand Down
4 changes: 2 additions & 2 deletions app/client/platforms/xai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import {
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";

Expand Down Expand Up @@ -62,7 +62,7 @@ export class XAIApi implements LLMApi {
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}

Expand Down
10 changes: 8 additions & 2 deletions app/components/emoji.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
if (
modelName.startsWith("gpt") ||
modelName.startsWith("chatgpt") ||
modelName.startsWith("dall-e") ||
modelName.startsWith("dalle") ||
modelName.startsWith("o1") ||
modelName.startsWith("o3")
) {
Expand All @@ -80,9 +82,13 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
LlmIcon = BotIconGrok;
} else if (modelName.startsWith("hunyuan")) {
LlmIcon = BotIconHunyuan;
} else if (modelName.startsWith("doubao")) {
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao;
} else if (modelName.startsWith("glm")) {
} else if (
modelName.startsWith("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {
LlmIcon = BotIconChatglm;
}

Expand Down
12 changes: 11 additions & 1 deletion app/constant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
export const STORAGE_KEY = "chatgpt-next-web";

export const REQUEST_TIMEOUT_MS = 60000;
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;

export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";

Expand Down Expand Up @@ -589,7 +590,16 @@ const iflytekModels = [

const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"];

const xAIModes = ["grok-beta"];
const xAIModes = [
"grok-beta",
"grok-2",
"grok-2-1212",
"grok-2-latest",
"grok-vision-beta",
"grok-2-vision-1212",
"grok-2-vision",
"grok-2-vision-latest",
];

const chatglmModels = [
"glm-4-plus",
Expand Down

0 comments on commit 52ef4d3

Please sign in to comment.