feat: 支持模型设置

This commit is contained in:
arvinxx
2023-07-16 21:41:55 +08:00
parent d95027d42b
commit 170567a87f
8 changed files with 89 additions and 143 deletions

View File

@@ -64,6 +64,7 @@
"@lobehub/ui": "^1",
"@vercel/analytics": "^1",
"ahooks": "^3",
"ai": "^2",
"antd": "^5",
"antd-style": "^3",
"brotli-wasm": "^1",
@@ -78,6 +79,7 @@
"nanoid": "^4",
"next": "13.4.7",
"next-i18next": "^14",
"openai-edge": "^1",
"polished": "^4",
"react": "^18",
"react-dom": "^18",

View File

@@ -1,122 +0,0 @@
import { ChatOpenAI } from 'langchain/chat_models/openai';
import { AIChatMessage, HumanChatMessage, SystemChatMessage } from 'langchain/schema';
import { OpenAIChatMessage } from '@/types/openai';
const isDev = process.env.NODE_ENV === 'development';
const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL;
/**
* @title OpenAI Stream Payload
*/
export interface OpenAIStreamPayload {
/**
* @title 控制生成文本中的惩罚系数,用于减少重复性
* @default 0
*/
frequency_penalty?: number;
/**
* @title 生成文本的最大长度
*/
max_tokens?: number;
/**
* @title 聊天信息列表
*/
messages: OpenAIChatMessage[];
/**
* @title 模型名称
*/
model: string;
/**
* @title 返回的文本数量
*/
n?: number;
/**
* @title 控制生成文本中的惩罚系数,用于减少主题的变化
* @default 0
*/
presence_penalty?: number;
/**
* @title 是否开启流式请求
* @default true
*/
stream?: boolean;
/**
* @title 生成文本的随机度量,用于控制文本的创造性和多样性
* @default 0.5
*/
temperature: number;
/**
* @title 控制生成文本中最高概率的单个令牌
* @default 1
*/
top_p?: number;
}
export function OpenAIStream(payload: OpenAIStreamPayload) {
const { messages, ...params } = payload;
// 将 payload 中的消息转换为 ChatOpenAI 所需的 HumanChatMessage、SystemChatMessage 和 AIChatMessage 类型
const chatMessages = messages.map((m) => {
switch (m.role) {
default:
case 'user': {
return new HumanChatMessage(m.content);
}
case 'system': {
return new SystemChatMessage(m.content);
}
case 'assistant': {
return new AIChatMessage(m.content);
}
}
});
// 使用 TextEncoder 将字符串转换为字节数组,以便在 ReadableStream 中发送
const encoder = new TextEncoder();
// 初始化换行符计数器
return new ReadableStream({
async start(controller) {
let newlineCounter = 0;
const chat = new ChatOpenAI(
{
streaming: true,
...params,
callbacks: [
{
handleLLMNewToken(token) {
// 如果 message 是换行符,且 newlineCounter 小于 2那么跳过该换行符
if (newlineCounter < 2 && token === '\n') {
return;
}
// 将 message 编码为字节并添加到流中
const queue = encoder.encode(token);
controller.enqueue(queue);
newlineCounter++;
},
},
],
// 暂时设定不重试 ,后续看是否需要支持重试
maxRetries: 0,
verbose: true,
},
isDev && OPENAI_PROXY_URL ? { basePath: OPENAI_PROXY_URL } : undefined,
);
try {
// 使用转换后的聊天消息作为输入开始聊天
await chat.call(chatMessages);
// 完成后,关闭流
controller.close();
} catch (error) {
// 如果在执行过程中发生错误,向流发送错误
controller.error(error);
}
},
});
}

View File

@@ -1,15 +1,31 @@
import { OpenAIStream, OpenAIStreamPayload } from './OpenAIStream';
import { OpenAIStream, StreamingTextResponse } from 'ai';
import { Configuration, OpenAIApi } from 'openai-edge';
if (!process.env.OPENAI_API_KEY) {
throw new Error('Missing env var from OpenAI');
}
export const config = {
runtime: 'edge',
};
export default async function handler(request: Request) {
const payload = (await request.json()) as OpenAIStreamPayload;
return new Response(OpenAIStream(payload));
import { OpenAIStreamPayload } from '@/types/openai';
const isDev = process.env.NODE_ENV === 'development';
const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL;
// Create an OpenAI API client (that's edge friendly!)
const config = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(config, isDev && OPENAI_PROXY_URL ? OPENAI_PROXY_URL : undefined);
export const runtime = 'edge';
export default async function handler(req: Request) {
// Extract the `messages` from the body of the request
const { messages, ...params } = (await req.json()) as OpenAIStreamPayload;
console.log(params);
const response = await openai.createChatCompletion({
stream: true,
...params,
messages: messages.map((m) => ({ content: m.content, role: m.role })),
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
}

View File

@@ -1,5 +1,5 @@
import { TextArea } from '@lobehub/ui';
import { Button, Collapse, InputNumber, Segmented, Slider } from 'antd';
import { Collapse, InputNumber, Segmented, Slider } from 'antd';
import isEqual from 'fast-deep-equal';
import { useTranslation } from 'next-i18next';
import { Flexbox } from 'react-layout-kit';
@@ -45,19 +45,20 @@ const AgentConfig = () => {
value,
}))}
size={'large'}
value={config.model}
/>
</FormItem>
<FormItem label={t('agentPrompt')}>
<Flexbox gap={16}>
<TextArea
onChange={(e) => {
updateAgentConfig({ systemRole: e.target.value });
}}
placeholder={t('agentPromptPlaceholder')}
style={{ minHeight: 160 }}
type={'block'}
value={config.systemRole}
/>
<Flexbox direction={'horizontal-reverse'}>
<Button type={'primary'}>{t('updatePrompt')}</Button>
</Flexbox>
</Flexbox>
</FormItem>
<Collapse

View File

@@ -1,4 +1,4 @@
import { OpenAIStreamPayload } from '@/pages/api/OpenAIStream';
import { OpenAIStreamPayload } from '@/types/openai';
// 自动起名
export const promptSummaryAgentName = (content: string): Partial<OpenAIStreamPayload> => ({

View File

@@ -1,5 +1,4 @@
import { OpenAIStreamPayload } from '@/pages/api/OpenAIStream';
import { OpenAIChatMessage } from '@/types/openai';
import { OpenAIChatMessage, OpenAIStreamPayload } from '@/types/openai';
export const promptSummaryTitle = (
messages: OpenAIChatMessage[],

View File

@@ -1,6 +1,6 @@
import { merge } from 'lodash-es';
import type { OpenAIStreamPayload } from '@/pages/api/OpenAIStream';
import type { OpenAIStreamPayload } from '@/types/openai';
import { URLS } from './url';
@@ -13,9 +13,12 @@ export const fetchChatModel = (
) => {
const payload = merge(
{
frequency_penalty: 0,
model: 'gpt-3.5-turbo',
presence_penalty: 0,
stream: true,
temperature: 0.6,
top_p: 1,
},
params,
);

View File

@@ -13,3 +13,50 @@ export interface OpenAIChatMessage {
*/
role: LLMRoleType;
}
/**
* @title OpenAI Stream Payload
*/
export interface OpenAIStreamPayload {
/**
* @title 控制生成文本中的惩罚系数,用于减少重复性
* @default 0
*/
frequency_penalty?: number;
/**
* @title 生成文本的最大长度
*/
max_tokens?: number;
/**
* @title 聊天信息列表
*/
messages: OpenAIChatMessage[];
/**
* @title 模型名称
*/
model: string;
/**
* @title 返回的文本数量
*/
n?: number;
/**
* @title 控制生成文本中的惩罚系数,用于减少主题的变化
* @default 0
*/
presence_penalty?: number;
/**
* @title 是否开启流式请求
* @default true
*/
stream?: boolean;
/**
* @title 生成文本的随机度量,用于控制文本的创造性和多样性
* @default 0.5
*/
temperature: number;
/**
* @title 控制生成文本中最高概率的单个令牌
* @default 1
*/
top_p?: number;
}