feat: support ai context

This commit is contained in:
Gabe
2025-09-03 20:43:07 +08:00
parent b631703aa6
commit 343edcdbad
9 changed files with 241 additions and 55 deletions

39
src/apis/history.js Normal file
View File

@@ -0,0 +1,39 @@
import { DEFAULT_CONTEXT_SIZE } from "../config";
const historyMap = new Map();
const MsgHistory = (maxSize = DEFAULT_CONTEXT_SIZE) => {
const messages = [];
const add = (...msgs) => {
messages.push(...msgs.filter(Boolean));
const extra = messages.length - maxSize;
if (extra > 0) {
messages.splice(0, extra);
}
};
const getAll = () => {
return [...messages];
};
const clear = () => {
messages.length = 0;
};
return {
add,
getAll,
clear,
};
};
export const getMsgHistory = (translator, maxSize) => {
if (historyMap.has(translator)) {
return historyMap.get(translator);
}
const msgHistory = MsgHistory(maxSize);
historyMap.set(translator, msgHistory);
return msgHistory;
};

View File

@@ -26,6 +26,7 @@ import {
OPT_TRANS_CUSTOMIZE_3,
OPT_TRANS_CUSTOMIZE_4,
OPT_TRANS_CUSTOMIZE_5,
OPT_TRANS_CONTEXT,
INPUT_PLACE_FROM,
INPUT_PLACE_TO,
INPUT_PLACE_TEXT,
@@ -39,6 +40,7 @@ import interpreter from "../libs/interpreter";
import { parseJsonObj, extractJson } from "../libs/utils";
import { kissLog } from "../libs/log";
import { fetchData } from "../libs/fetch";
import { getMsgHistory } from "./history";
const keyMap = new Map();
const urlMap = new Map();
@@ -286,12 +288,17 @@ const genOpenAI = ({
customHeader,
customBody,
docInfo,
hisMsgs,
}) => {
systemPrompt = genSystemPrompt({ systemPrompt, from, to });
userPrompt = genUserPrompt({ userPrompt, from, to, texts, docInfo });
customHeader = parseJsonObj(customHeader);
customBody = parseJsonObj(customBody);
const userMsg = {
role: "user",
content: userPrompt,
};
const data = {
model,
messages: [
@@ -299,10 +306,8 @@ const genOpenAI = ({
role: "system",
content: systemPrompt,
},
{
role: "user",
content: userPrompt,
},
...hisMsgs,
userMsg,
],
temperature,
max_completion_tokens: maxTokens,
@@ -320,7 +325,7 @@ const genOpenAI = ({
body: JSON.stringify(data),
};
return [url, init];
return [url, init, userMsg];
};
const genGemini = ({
@@ -337,6 +342,7 @@ const genGemini = ({
customHeader,
customBody,
docInfo,
hisMsgs,
}) => {
url = url
.replaceAll(INPUT_PLACE_MODEL, model)
@@ -346,13 +352,14 @@ const genGemini = ({
customHeader = parseJsonObj(customHeader);
customBody = parseJsonObj(customBody);
const userMsg = { role: "user", parts: [{ text: userPrompt }] };
const data = {
system_instruction: {
parts: {
text: systemPrompt,
},
},
contents: [{ role: "user", parts: [{ text: userPrompt }] }],
contents: [...hisMsgs, userMsg],
generationConfig: {
maxOutputTokens: maxTokens,
temperature,
@@ -392,7 +399,7 @@ const genGemini = ({
body: JSON.stringify(data),
};
return [url, init];
return [url, init, userMsg];
};
const genGemini2 = ({
@@ -409,12 +416,17 @@ const genGemini2 = ({
customHeader,
customBody,
docInfo,
hisMsgs,
}) => {
systemPrompt = genSystemPrompt({ systemPrompt, from, to });
userPrompt = genUserPrompt({ userPrompt, from, to, texts, docInfo });
customHeader = parseJsonObj(customHeader);
customBody = parseJsonObj(customBody);
const userMsg = {
role: "user",
content: userPrompt,
};
const data = {
model,
messages: [
@@ -422,10 +434,8 @@ const genGemini2 = ({
role: "system",
content: systemPrompt,
},
{
role: "user",
content: userPrompt,
},
...hisMsgs,
userMsg,
],
temperature,
max_tokens: maxTokens,
@@ -442,7 +452,7 @@ const genGemini2 = ({
body: JSON.stringify(data),
};
return [url, init];
return [url, init, userMsg];
};
const genClaude = ({
@@ -459,21 +469,21 @@ const genClaude = ({
customHeader,
customBody,
docInfo,
hisMsgs,
}) => {
systemPrompt = genSystemPrompt({ systemPrompt, from, to });
userPrompt = genUserPrompt({ userPrompt, from, to, texts, docInfo });
customHeader = parseJsonObj(customHeader);
customBody = parseJsonObj(customBody);
const userMsg = {
role: "user",
content: userPrompt,
};
const data = {
model,
system: systemPrompt,
messages: [
{
role: "user",
content: userPrompt,
},
],
messages: [...hisMsgs, userMsg],
temperature,
max_tokens: maxTokens,
...customBody,
@@ -491,7 +501,7 @@ const genClaude = ({
body: JSON.stringify(data),
};
return [url, init];
return [url, init, userMsg];
};
const genOpenRouter = ({
@@ -508,12 +518,17 @@ const genOpenRouter = ({
customHeader,
customBody,
docInfo,
hisMsgs,
}) => {
systemPrompt = genSystemPrompt({ systemPrompt, from, to });
userPrompt = genUserPrompt({ userPrompt, from, to, texts, docInfo });
customHeader = parseJsonObj(customHeader);
customBody = parseJsonObj(customBody);
const userMsg = {
role: "user",
content: userPrompt,
};
const data = {
model,
messages: [
@@ -521,10 +536,8 @@ const genOpenRouter = ({
role: "system",
content: systemPrompt,
},
{
role: "user",
content: userPrompt,
},
...hisMsgs,
userMsg,
],
temperature,
max_tokens: maxTokens,
@@ -541,7 +554,7 @@ const genOpenRouter = ({
body: JSON.stringify(data),
};
return [url, init];
return [url, init, userMsg];
};
const genOllama = ({
@@ -559,12 +572,17 @@ const genOllama = ({
customHeader,
customBody,
docInfo,
hisMsgs,
}) => {
systemPrompt = genSystemPrompt({ systemPrompt, from, to });
userPrompt = genUserPrompt({ userPrompt, from, to, texts, docInfo });
customHeader = parseJsonObj(customHeader);
customBody = parseJsonObj(customBody);
const userMsg = {
role: "user",
content: userPrompt,
};
const data = {
model,
messages: [
@@ -572,10 +590,8 @@ const genOllama = ({
role: "system",
content: systemPrompt,
},
{
role: "user",
content: userPrompt,
},
...hisMsgs,
userMsg,
],
temperature,
max_tokens: maxTokens,
@@ -596,7 +612,7 @@ const genOllama = ({
init.headers.Authorization = `Bearer ${key}`;
}
return [url, init];
return [url, init, userMsg];
};
const genCloudflareAI = ({ texts, from, to, url, key }) => {
@@ -618,10 +634,27 @@ const genCloudflareAI = ({ texts, from, to, url, key }) => {
return [url, init];
};
const genCustom = ({ texts, from, to, url, key, reqHook, docInfo }) => {
const genCustom = ({
texts,
from,
to,
url,
key,
reqHook,
docInfo,
hisMsgs,
}) => {
if (reqHook?.trim()) {
interpreter.run(`exports.reqHook = ${reqHook}`);
return interpreter.exports.reqHook({ texts, from, to, url, key, docInfo });
return interpreter.exports.reqHook({
texts,
from,
to,
url,
key,
docInfo,
hisMsgs,
});
}
const data = { texts, from, to };
@@ -730,8 +763,10 @@ export const genTransReq = (translator, args) => {
export const parseTransRes = (
translator,
res,
{ texts, from, to, resHook, thinkIgnore }
{ texts, from, to, resHook, thinkIgnore, history, userMsg }
) => {
let modelMsg = "";
switch (translator) {
case OPT_TRANS_GOOGLE:
return [[res?.sentences?.map((item) => item.trans).join(" "), res?.src]];
@@ -783,22 +818,48 @@ export const parseTransRes = (
case OPT_TRANS_OPENAI_3:
case OPT_TRANS_GEMINI_2:
case OPT_TRANS_OPENROUTER:
modelMsg = res?.choices?.[0]?.message;
if (history && userMsg && modelMsg) {
history.add(userMsg, {
role: modelMsg.role,
content: modelMsg.content,
});
}
return parseAIRes(res?.choices?.[0]?.message?.content ?? "");
case OPT_TRANS_GEMINI:
modelMsg = res?.candidates?.[0]?.content;
if (history && userMsg && modelMsg) {
history.add(userMsg, modelMsg);
}
return parseAIRes(res?.candidates?.[0]?.content?.parts?.[0]?.text ?? "");
case OPT_TRANS_CLAUDE:
modelMsg = { role: res?.role, content: res?.content?.text };
if (history && userMsg && modelMsg) {
history.add(userMsg, {
role: modelMsg.role,
content: modelMsg.content,
});
}
return parseAIRes(res?.content?.[0]?.text ?? "");
case OPT_TRANS_CLOUDFLAREAI:
return [[res?.result?.translated_text]];
case OPT_TRANS_OLLAMA:
case OPT_TRANS_OLLAMA_2:
case OPT_TRANS_OLLAMA_3:
let resText = res?.response ?? "";
modelMsg = res?.choices?.[0]?.message;
const deepModels = thinkIgnore.split(",").filter((model) => model.trim());
if (deepModels.some((model) => res?.model?.startsWith(model))) {
resText = res?.response.replace(/<think>[\s\S]*<\/think>/i, "");
modelMsg?.content.replace(/<think>[\s\S]*<\/think>/i, "");
}
return parseAIRes(resText);
if (history && userMsg && modelMsg) {
history.add(userMsg, {
role: modelMsg.role,
content: modelMsg.content,
});
}
return parseAIRes(modelMsg?.content);
case OPT_TRANS_CUSTOMIZE:
case OPT_TRANS_CUSTOMIZE_2:
case OPT_TRANS_CUSTOMIZE_3:
@@ -806,7 +867,18 @@ export const parseTransRes = (
case OPT_TRANS_CUSTOMIZE_5:
if (resHook?.trim()) {
interpreter.run(`exports.resHook = ${resHook}`);
return interpreter.exports.resHook({ res, texts, from, to });
if (history) {
const [translations, modelMsg] = interpreter.exports.resHook({
res,
texts,
from,
to,
});
userMsg && modelMsg && history.add(userMsg, modelMsg);
return translations;
} else {
return interpreter.exports.resHook({ res, texts, from, to });
}
} else {
return res?.map((item) => [item.text, item.src]);
}
@@ -830,11 +902,19 @@ export const handleTranslate = async ({
apiSetting,
usePool,
}) => {
const [input, init] = await genTransReq(translator, {
let history = null;
let hisMsgs = [];
if (apiSetting.useContext && OPT_TRANS_CONTEXT.has(translator)) {
history = getMsgHistory(translator, apiSetting.contextSize);
hisMsgs = history.getAll();
}
const [input, init, userMsg] = await genTransReq(translator, {
texts,
from,
to,
docInfo,
hisMsgs,
...apiSetting,
});
@@ -853,6 +933,8 @@ export const handleTranslate = async ({
texts,
from,
to,
history,
userMsg,
...apiSetting,
});
};

View File

@@ -4,6 +4,7 @@ export const DEFAULT_FETCH_INTERVAL = 100; // 默认任务间隔时间
export const DEFAULT_BATCH_INTERVAL = 1000; // 批处理请求间隔时间
export const DEFAULT_BATCH_SIZE = 10; // 每次最多发送段落数量
export const DEFAULT_BATCH_LENGTH = 10000; // 每次发送最大文字数量
export const DEFAULT_CONTEXT_SIZE = 3; // 上下文会话数量
export const INPUT_PLACE_URL = "{{url}}"; // 占位符
export const INPUT_PLACE_FROM = "{{from}}"; // 占位符
@@ -69,6 +70,7 @@ export const OPT_TRANS_ALL = [
OPT_TRANS_CUSTOMIZE_5,
];
// 可使用批处理的翻译引擎
export const OPT_TRANS_BATCH = new Set([
OPT_TRANS_GOOGLE_2,
OPT_TRANS_MICROSOFT,
@@ -91,6 +93,25 @@ export const OPT_TRANS_BATCH = new Set([
OPT_TRANS_CUSTOMIZE_5,
]);
// 可使用上下文的翻译引擎
export const OPT_TRANS_CONTEXT = new Set([
OPT_TRANS_OPENAI,
OPT_TRANS_OPENAI_2,
OPT_TRANS_OPENAI_3,
OPT_TRANS_GEMINI,
OPT_TRANS_GEMINI_2,
OPT_TRANS_CLAUDE,
OPT_TRANS_OLLAMA,
OPT_TRANS_OLLAMA_2,
OPT_TRANS_OLLAMA_3,
OPT_TRANS_OPENROUTER,
OPT_TRANS_CUSTOMIZE,
OPT_TRANS_CUSTOMIZE_2,
OPT_TRANS_CUSTOMIZE_3,
OPT_TRANS_CUSTOMIZE_4,
OPT_TRANS_CUSTOMIZE_5,
]);
export const OPT_LANGDETECTOR_ALL = [
OPT_TRANS_GOOGLE,
OPT_TRANS_MICROSOFT,
@@ -344,13 +365,14 @@ Fail-safe: {"translations":[]}`,
resHook: "", // response 钩子函数
fetchLimit: DEFAULT_FETCH_LIMIT, // 最大请求数量
fetchInterval: DEFAULT_FETCH_INTERVAL, // 请求间隔时间
httpTimeout: DEFAULT_HTTP_TIMEOUT, // 请求超时时间
httpTimeout: DEFAULT_HTTP_TIMEOUT * 30, // 请求超时时间
batchInterval: DEFAULT_BATCH_INTERVAL, // 批处理请求间隔时间
batchSize: DEFAULT_BATCH_SIZE, // 每次最多发送段落数量
batchLength: DEFAULT_BATCH_LENGTH, // 每次发送最大文字数量
useBatchFetch: false, // 是否启用聚合发送请求
useRichText: false, // 是否启用富文本翻译
useContext: false, // 是否启用智能上下文
contextSize: DEFAULT_CONTEXT_SIZE, // 智能上下文保留会话数
temperature: 0,
maxTokens: 20480,
think: false,

View File

@@ -248,9 +248,9 @@ export const I18N = {
zh_TW: `重新翻譯間隔時間 (100-5000ms)`,
},
http_timeout: {
zh: `请求超时时间 (5000-30000ms)`,
en: `Request Timeout Time (5000-30000ms)`,
zh_TW: `請求逾時時間 (5000-30000ms)`,
zh: `请求超时时间 (5000-60000ms)`,
en: `Request Timeout Time (5000-60000ms)`,
zh_TW: `請求逾時時間 (5000-60000ms)`,
},
custom_header: {
zh: `自定义Header参数`,
@@ -274,9 +274,9 @@ export const I18N = {
zh_TW: `最小翻譯字元數 (1-100)`,
},
max_translate_length: {
zh: `最大翻译字符数 (100-10000)`,
en: `Maximum number Of Translated Characters (100-10000)`,
zh_TW: `最大翻譯字元數 (100-10000)`,
zh: `最大翻译字符数 (100-100000)`,
en: `Maximum number Of Translated Characters (100-100000)`,
zh_TW: `最大翻譯字元數 (100-100000)`,
},
num_of_newline_characters: {
zh: `换行字符数 (1-1000)`,
@@ -1248,4 +1248,14 @@ export const I18N = {
en: `Maximum text length for aggregation requests (1000-100000)`,
zh_TW: `聚合請求最大文字長度(1000-100000)`,
},
use_context: {
zh: `是否启用智能上下文`,
en: `Whether to enable AI context`,
zh_TW: `是否啟用智慧上下文`,
},
context_size: {
zh: `上下文会话数量(1-20)`,
en: `Number of context sessions(1-20)`,
zh_TW: `上下文會話數量(1-20)`,
},
};

View File

@@ -19,7 +19,7 @@ export const DEFAULT_SHORTCUTS = {
};
export const TRANS_MIN_LENGTH = 5; // 最短翻译长度
export const TRANS_MAX_LENGTH = 5000; // 最长翻译长度
export const TRANS_MAX_LENGTH = 10000; // 最长翻译长度
export const TRANS_NEWLINE_LENGTH = 20; // 换行字符数
export const DEFAULT_BLACKLIST = [
"https://fishjar.github.io/kiss-translator/options.html",

View File

@@ -2,7 +2,7 @@ import {
DEFAULT_BATCH_INTERVAL,
DEFAULT_BATCH_SIZE,
DEFAULT_BATCH_LENGTH,
} from "../../config";
} from "../config";
/**
* 批处理队列
@@ -10,7 +10,7 @@ import {
* @param {*} param1
* @returns
*/
const batchQueue = (
const BatchQueue = (
{ taskFn, ...args },
{
batchInterval = DEFAULT_BATCH_INTERVAL,
@@ -141,7 +141,7 @@ export const getBatchQueue = (args, opts) => {
return queueMap.get(key);
}
const queue = batchQueue(args, opts);
const queue = BatchQueue(args, opts);
queueMap.set(key, queue);
return queue;
};
@@ -150,7 +150,7 @@ export const getBatchQueue = (args, opts) => {
* 清除所有任务
*/
export const clearAllBatchQueue = () => {
for (const queue of queueMap.entries()) {
for (const queue of queueMap.values()) {
queue.destroy();
}
};

View File

@@ -8,7 +8,7 @@ import { kissLog } from "./log";
* @param {*} _retryInteral
* @returns
*/
const taskPool = (_interval = 100, _limit = 100, _retryInteral = 1000) => {
const TaskPool = (_interval = 100, _limit = 100, _retryInteral = 1000) => {
const pool = [];
const maxRetry = 2; // 最大重试次数
let maxCount = _limit; // 最大数量
@@ -34,7 +34,7 @@ const taskPool = (_interval = 100, _limit = 100, _retryInteral = 1000) => {
if (retry < maxRetry) {
const retryTimer = setTimeout(() => {
clearTimeout(retryTimer);
pool.push({ args, resolve, reject, retry: retry + 1 });
pool.push({ fn, args, resolve, reject, retry: retry + 1 });
}, _retryInteral);
} else {
reject(err);
@@ -85,7 +85,7 @@ let fetchPool;
*/
export const getFetchPool = (interval, limit) => {
if (!fetchPool) {
fetchPool = taskPool(
fetchPool = TaskPool(
interval ?? DEFAULT_FETCH_INTERVAL,
limit ?? DEFAULT_FETCH_LIMIT
);

View File

@@ -35,9 +35,11 @@ import {
DEFAULT_FETCH_INTERVAL,
DEFAULT_HTTP_TIMEOUT,
OPT_TRANS_BATCH,
OPT_TRANS_CONTEXT,
DEFAULT_BATCH_INTERVAL,
DEFAULT_BATCH_SIZE,
DEFAULT_BATCH_LENGTH,
DEFAULT_CONTEXT_SIZE,
} from "../../config";
import { useState } from "react";
import { useI18n } from "../../hooks/I18n";
@@ -148,6 +150,8 @@ function ApiFields({ translator, api, updateApi, resetApi }) {
batchInterval = DEFAULT_BATCH_INTERVAL,
batchSize = DEFAULT_BATCH_SIZE,
batchLength = DEFAULT_BATCH_LENGTH,
useContext = false,
contextSize = DEFAULT_CONTEXT_SIZE,
} = api;
const handleChange = (e) => {
@@ -160,7 +164,7 @@ function ApiFields({ translator, api, updateApi, resetApi }) {
value = limitNumber(value, 0, 5000);
break;
case "httpTimeout":
value = limitNumber(value, 5000, 30000);
value = limitNumber(value, 5000, 60000);
break;
case "temperature":
value = limitFloat(value, 0, 2);
@@ -177,6 +181,9 @@ function ApiFields({ translator, api, updateApi, resetApi }) {
case "batchLength":
value = limitNumber(value, 1000, 100000);
break;
case "contextSize":
value = limitNumber(value, 1, 20);
break;
default:
}
updateApi({
@@ -455,6 +462,32 @@ function ApiFields({ translator, api, updateApi, resetApi }) {
</>
)}
{OPT_TRANS_CONTEXT.has(translator) && (
<>
<TextField
select
size="small"
name="useContext"
value={useContext}
label={i18n("use_context")}
onChange={handleChange}
>
<MenuItem value={false}>{i18n("disable")}</MenuItem>
<MenuItem value={true}>{i18n("enable")}</MenuItem>
</TextField>
{useBatchFetch && (
<TextField
size="small"
label={i18n("context_size")}
type="number"
name="contextSize"
value={contextSize}
onChange={handleChange}
/>
)}
</>
)}
<TextField
size="small"
label={i18n("fetch_limit")}

View File

@@ -67,13 +67,13 @@ export default function Settings() {
value = limitNumber(value, 1, 100);
break;
case "maxLength":
value = limitNumber(value, 100, 10000);
value = limitNumber(value, 100, 100000);
break;
case "newlineLength":
value = limitNumber(value, 1, 1000);
break;
case "httpTimeout":
value = limitNumber(value, 5000, 30000);
value = limitNumber(value, 5000, 60000);
break;
case "touchTranslate":
value = limitNumber(value, 0, 4);