feat: Add AI API Custom Params
This commit is contained in:
@@ -240,6 +240,7 @@ const genOpenAI = ({
|
|||||||
model,
|
model,
|
||||||
temperature,
|
temperature,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
|
apiCustomParams,
|
||||||
}) => {
|
}) => {
|
||||||
// 兼容历史上作为systemPrompt的prompt,如果prompt中不包含带翻译文本,则添加文本到prompt末尾
|
// 兼容历史上作为systemPrompt的prompt,如果prompt中不包含带翻译文本,则添加文本到prompt末尾
|
||||||
// if (!prompt.includes(INPUT_PLACE_TEXT)) {
|
// if (!prompt.includes(INPUT_PLACE_TEXT)) {
|
||||||
@@ -254,6 +255,8 @@ const genOpenAI = ({
|
|||||||
.replaceAll(INPUT_PLACE_TO, to)
|
.replaceAll(INPUT_PLACE_TO, to)
|
||||||
.replaceAll(INPUT_PLACE_TEXT, text);
|
.replaceAll(INPUT_PLACE_TEXT, text);
|
||||||
|
|
||||||
|
apiCustomParams = JSON.parse("{" + apiCustomParams + "}");
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
model,
|
model,
|
||||||
messages: [
|
messages: [
|
||||||
@@ -268,6 +271,7 @@ const genOpenAI = ({
|
|||||||
],
|
],
|
||||||
temperature,
|
temperature,
|
||||||
max_completion_tokens: maxTokens,
|
max_completion_tokens: maxTokens,
|
||||||
|
...apiCustomParams,
|
||||||
};
|
};
|
||||||
|
|
||||||
const init = {
|
const init = {
|
||||||
@@ -294,6 +298,7 @@ const genGemini = ({
|
|||||||
model,
|
model,
|
||||||
temperature,
|
temperature,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
|
apiCustomParams,
|
||||||
}) => {
|
}) => {
|
||||||
url = url
|
url = url
|
||||||
.replaceAll(INPUT_PLACE_MODEL, model)
|
.replaceAll(INPUT_PLACE_MODEL, model)
|
||||||
@@ -307,6 +312,8 @@ const genGemini = ({
|
|||||||
.replaceAll(INPUT_PLACE_TO, to)
|
.replaceAll(INPUT_PLACE_TO, to)
|
||||||
.replaceAll(INPUT_PLACE_TEXT, text);
|
.replaceAll(INPUT_PLACE_TEXT, text);
|
||||||
|
|
||||||
|
apiCustomParams = JSON.parse("{" + apiCustomParams + "}");
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
system_instruction: {
|
system_instruction: {
|
||||||
parts: {
|
parts: {
|
||||||
@@ -325,6 +332,7 @@ const genGemini = ({
|
|||||||
// topP: 0.8,
|
// topP: 0.8,
|
||||||
// topK: 10,
|
// topK: 10,
|
||||||
},
|
},
|
||||||
|
...apiCustomParams,
|
||||||
};
|
};
|
||||||
|
|
||||||
const init = {
|
const init = {
|
||||||
@@ -349,6 +357,7 @@ const genGemini2 = ({
|
|||||||
model,
|
model,
|
||||||
temperature,
|
temperature,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
|
apiCustomParams,
|
||||||
}) => {
|
}) => {
|
||||||
systemPrompt = systemPrompt
|
systemPrompt = systemPrompt
|
||||||
.replaceAll(INPUT_PLACE_FROM, from)
|
.replaceAll(INPUT_PLACE_FROM, from)
|
||||||
@@ -359,6 +368,8 @@ const genGemini2 = ({
|
|||||||
.replaceAll(INPUT_PLACE_TO, to)
|
.replaceAll(INPUT_PLACE_TO, to)
|
||||||
.replaceAll(INPUT_PLACE_TEXT, text);
|
.replaceAll(INPUT_PLACE_TEXT, text);
|
||||||
|
|
||||||
|
apiCustomParams = JSON.parse("{" + apiCustomParams + "}");
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
model,
|
model,
|
||||||
messages: [
|
messages: [
|
||||||
@@ -373,6 +384,7 @@ const genGemini2 = ({
|
|||||||
],
|
],
|
||||||
temperature,
|
temperature,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
|
...apiCustomParams,
|
||||||
};
|
};
|
||||||
|
|
||||||
const init = {
|
const init = {
|
||||||
@@ -398,6 +410,7 @@ const genClaude = ({
|
|||||||
model,
|
model,
|
||||||
temperature,
|
temperature,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
|
apiCustomParams,
|
||||||
}) => {
|
}) => {
|
||||||
systemPrompt = systemPrompt
|
systemPrompt = systemPrompt
|
||||||
.replaceAll(INPUT_PLACE_FROM, from)
|
.replaceAll(INPUT_PLACE_FROM, from)
|
||||||
@@ -408,6 +421,8 @@ const genClaude = ({
|
|||||||
.replaceAll(INPUT_PLACE_TO, to)
|
.replaceAll(INPUT_PLACE_TO, to)
|
||||||
.replaceAll(INPUT_PLACE_TEXT, text);
|
.replaceAll(INPUT_PLACE_TEXT, text);
|
||||||
|
|
||||||
|
apiCustomParams = JSON.parse("{" + apiCustomParams + "}");
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
model,
|
model,
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
@@ -419,6 +434,7 @@ const genClaude = ({
|
|||||||
],
|
],
|
||||||
temperature,
|
temperature,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
|
...apiCustomParams,
|
||||||
};
|
};
|
||||||
|
|
||||||
const init = {
|
const init = {
|
||||||
@@ -444,6 +460,7 @@ const genOllama = ({
|
|||||||
systemPrompt,
|
systemPrompt,
|
||||||
userPrompt,
|
userPrompt,
|
||||||
model,
|
model,
|
||||||
|
apiCustomParams,
|
||||||
}) => {
|
}) => {
|
||||||
systemPrompt = systemPrompt
|
systemPrompt = systemPrompt
|
||||||
.replaceAll(INPUT_PLACE_FROM, from)
|
.replaceAll(INPUT_PLACE_FROM, from)
|
||||||
@@ -454,12 +471,15 @@ const genOllama = ({
|
|||||||
.replaceAll(INPUT_PLACE_TO, to)
|
.replaceAll(INPUT_PLACE_TO, to)
|
||||||
.replaceAll(INPUT_PLACE_TEXT, text);
|
.replaceAll(INPUT_PLACE_TEXT, text);
|
||||||
|
|
||||||
|
apiCustomParams = JSON.parse("{" + apiCustomParams + "}");
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
model,
|
model,
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
prompt: userPrompt,
|
prompt: userPrompt,
|
||||||
think: think,
|
think: think,
|
||||||
stream: false,
|
stream: false,
|
||||||
|
...apiCustomParams,
|
||||||
};
|
};
|
||||||
|
|
||||||
const init = {
|
const init = {
|
||||||
|
|||||||
@@ -228,6 +228,14 @@ export const I18N = {
|
|||||||
zh: `请求超时时间 (5000-30000ms)`,
|
zh: `请求超时时间 (5000-30000ms)`,
|
||||||
en: `Request Timeout Time (5000-30000ms)`,
|
en: `Request Timeout Time (5000-30000ms)`,
|
||||||
},
|
},
|
||||||
|
api_custom_params: {
|
||||||
|
zh: `API自定义参数`,
|
||||||
|
en: `API Custom Params`,
|
||||||
|
},
|
||||||
|
api_custom_params_help: {
|
||||||
|
zh: `使用JSON格式,例如 "top_p": 0.7`,
|
||||||
|
en: `Use JSON format, for example "top_p": 0.7`,
|
||||||
|
},
|
||||||
min_translate_length: {
|
min_translate_length: {
|
||||||
zh: `最小翻译字符数 (1-100)`,
|
zh: `最小翻译字符数 (1-100)`,
|
||||||
en: `Minimum number Of Translated Characters (1-100)`,
|
en: `Minimum number Of Translated Characters (1-100)`,
|
||||||
|
|||||||
@@ -563,6 +563,7 @@ const defaultOpenaiApi = {
|
|||||||
model: "gpt-4",
|
model: "gpt-4",
|
||||||
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
||||||
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
||||||
|
apiCustomParams: "",
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
maxTokens: 256,
|
maxTokens: 256,
|
||||||
fetchLimit: 1,
|
fetchLimit: 1,
|
||||||
@@ -577,6 +578,7 @@ const defaultOllamaApi = {
|
|||||||
model: "llama3.1",
|
model: "llama3.1",
|
||||||
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
||||||
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
||||||
|
apiCustomParams: "",
|
||||||
think: false,
|
think: false,
|
||||||
thinkIgnore: `qwen3,deepseek-r1`,
|
thinkIgnore: `qwen3,deepseek-r1`,
|
||||||
fetchLimit: 1,
|
fetchLimit: 1,
|
||||||
@@ -677,6 +679,7 @@ export const DEFAULT_TRANS_APIS = {
|
|||||||
model: "gemini-2.5-flash",
|
model: "gemini-2.5-flash",
|
||||||
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
||||||
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
||||||
|
apiCustomParams: "",
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
maxTokens: 2048,
|
maxTokens: 2048,
|
||||||
fetchLimit: 1,
|
fetchLimit: 1,
|
||||||
@@ -691,6 +694,7 @@ export const DEFAULT_TRANS_APIS = {
|
|||||||
model: "gemini-2.0-flash",
|
model: "gemini-2.0-flash",
|
||||||
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
||||||
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
||||||
|
apiCustomParams: "",
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
maxTokens: 2048,
|
maxTokens: 2048,
|
||||||
fetchLimit: 1,
|
fetchLimit: 1,
|
||||||
@@ -705,6 +709,7 @@ export const DEFAULT_TRANS_APIS = {
|
|||||||
model: "claude-3-haiku-20240307",
|
model: "claude-3-haiku-20240307",
|
||||||
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
systemPrompt: `You are a professional, authentic machine translation engine.`,
|
||||||
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
|
||||||
|
apiCustomParams: "",
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
maxTokens: 1024,
|
maxTokens: 1024,
|
||||||
fetchLimit: 1,
|
fetchLimit: 1,
|
||||||
|
|||||||
@@ -125,6 +125,7 @@ function ApiFields({ translator, api, updateApi, resetApi }) {
|
|||||||
model = "",
|
model = "",
|
||||||
systemPrompt = "",
|
systemPrompt = "",
|
||||||
userPrompt = "",
|
userPrompt = "",
|
||||||
|
apiCustomParams = "",
|
||||||
think = false,
|
think = false,
|
||||||
thinkIgnore = "",
|
thinkIgnore = "",
|
||||||
fetchLimit = DEFAULT_FETCH_LIMIT,
|
fetchLimit = DEFAULT_FETCH_LIMIT,
|
||||||
@@ -274,6 +275,16 @@ function ApiFields({ translator, api, updateApi, resetApi }) {
|
|||||||
multiline
|
multiline
|
||||||
maxRows={10}
|
maxRows={10}
|
||||||
/>
|
/>
|
||||||
|
<TextField
|
||||||
|
size="small"
|
||||||
|
label={i18n("api_custom_params")}
|
||||||
|
name="apiCustomParams"
|
||||||
|
value={apiCustomParams}
|
||||||
|
onChange={handleChange}
|
||||||
|
multiline
|
||||||
|
maxRows={10}
|
||||||
|
helperText={i18n("api_custom_params_help")}
|
||||||
|
/>
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user