diff --git a/src/apis/trans.js b/src/apis/trans.js index 0c73902..62b70b9 100644 --- a/src/apis/trans.js +++ b/src/apis/trans.js @@ -240,6 +240,7 @@ const genOpenAI = ({ model, temperature, maxTokens, + apiCustomParams, }) => { // 兼容历史上作为systemPrompt的prompt,如果prompt中不包含带翻译文本,则添加文本到prompt末尾 // if (!prompt.includes(INPUT_PLACE_TEXT)) { @@ -254,6 +255,8 @@ const genOpenAI = ({ .replaceAll(INPUT_PLACE_TO, to) .replaceAll(INPUT_PLACE_TEXT, text); + apiCustomParams = JSON.parse("{" + apiCustomParams + "}"); + const data = { model, messages: [ @@ -268,6 +271,7 @@ const genOpenAI = ({ ], temperature, max_completion_tokens: maxTokens, + ...apiCustomParams, }; const init = { @@ -294,6 +298,7 @@ const genGemini = ({ model, temperature, maxTokens, + apiCustomParams, }) => { url = url .replaceAll(INPUT_PLACE_MODEL, model) @@ -307,6 +312,8 @@ const genGemini = ({ .replaceAll(INPUT_PLACE_TO, to) .replaceAll(INPUT_PLACE_TEXT, text); + apiCustomParams = JSON.parse("{" + apiCustomParams + "}"); + const data = { system_instruction: { parts: { @@ -325,6 +332,7 @@ const genGemini = ({ // topP: 0.8, // topK: 10, }, + ...apiCustomParams, }; const init = { @@ -349,6 +357,7 @@ const genGemini2 = ({ model, temperature, maxTokens, + apiCustomParams, }) => { systemPrompt = systemPrompt .replaceAll(INPUT_PLACE_FROM, from) @@ -359,6 +368,8 @@ const genGemini2 = ({ .replaceAll(INPUT_PLACE_TO, to) .replaceAll(INPUT_PLACE_TEXT, text); + apiCustomParams = JSON.parse("{" + apiCustomParams + "}"); + const data = { model, messages: [ @@ -373,6 +384,7 @@ const genGemini2 = ({ ], temperature, max_tokens: maxTokens, + ...apiCustomParams, }; const init = { @@ -398,6 +410,7 @@ const genClaude = ({ model, temperature, maxTokens, + apiCustomParams, }) => { systemPrompt = systemPrompt .replaceAll(INPUT_PLACE_FROM, from) @@ -408,6 +421,8 @@ const genClaude = ({ .replaceAll(INPUT_PLACE_TO, to) .replaceAll(INPUT_PLACE_TEXT, text); + apiCustomParams = JSON.parse("{" + apiCustomParams + "}"); + const data = { model, system: systemPrompt, @@ -419,6 +434,7 @@ const genClaude = ({ ], temperature, max_tokens: maxTokens, + ...apiCustomParams, }; const init = { @@ -444,6 +460,7 @@ const genOllama = ({ systemPrompt, userPrompt, model, + apiCustomParams, }) => { systemPrompt = systemPrompt .replaceAll(INPUT_PLACE_FROM, from) @@ -454,12 +471,15 @@ const genOllama = ({ .replaceAll(INPUT_PLACE_TO, to) .replaceAll(INPUT_PLACE_TEXT, text); + apiCustomParams = JSON.parse("{" + apiCustomParams + "}"); + const data = { model, system: systemPrompt, prompt: userPrompt, think: think, stream: false, + ...apiCustomParams, }; const init = { diff --git a/src/config/i18n.js b/src/config/i18n.js index cb7de61..487e1c7 100644 --- a/src/config/i18n.js +++ b/src/config/i18n.js @@ -228,6 +228,14 @@ export const I18N = { zh: `请求超时时间 (5000-30000ms)`, en: `Request Timeout Time (5000-30000ms)`, }, + api_custom_params: { + zh: `API自定义参数`, + en: `API Custom Params`, + }, + api_custom_params_help: { + zh: `使用JSON格式,例如 "top_p": 0.7`, + en: `Use JSON format, for example "top_p": 0.7`, + }, min_translate_length: { zh: `最小翻译字符数 (1-100)`, en: `Minimum number Of Translated Characters (1-100)`, diff --git a/src/config/index.js b/src/config/index.js index c232c2d..1273ebc 100644 --- a/src/config/index.js +++ b/src/config/index.js @@ -563,6 +563,7 @@ const defaultOpenaiApi = { model: "gpt-4", systemPrompt: `You are a professional, authentic machine translation engine.`, userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`, + apiCustomParams: "", temperature: 0, maxTokens: 256, fetchLimit: 1, @@ -577,6 +578,7 @@ const defaultOllamaApi = { model: "llama3.1", systemPrompt: `You are a professional, authentic machine translation engine.`, userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`, + apiCustomParams: "", think: false, thinkIgnore: `qwen3,deepseek-r1`, fetchLimit: 1, @@ -677,6 +679,7 @@ export const DEFAULT_TRANS_APIS = { model: "gemini-2.5-flash", systemPrompt: `You are a professional, authentic machine translation engine.`, userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`, + apiCustomParams: "", temperature: 0, maxTokens: 2048, fetchLimit: 1, @@ -691,6 +694,7 @@ export const DEFAULT_TRANS_APIS = { model: "gemini-2.0-flash", systemPrompt: `You are a professional, authentic machine translation engine.`, userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`, + apiCustomParams: "", temperature: 0, maxTokens: 2048, fetchLimit: 1, @@ -705,6 +709,7 @@ export const DEFAULT_TRANS_APIS = { model: "claude-3-haiku-20240307", systemPrompt: `You are a professional, authentic machine translation engine.`, userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`, + apiCustomParams: "", temperature: 0, maxTokens: 1024, fetchLimit: 1, diff --git a/src/views/Options/Apis.js b/src/views/Options/Apis.js index 1ca0e05..1bc3b88 100644 --- a/src/views/Options/Apis.js +++ b/src/views/Options/Apis.js @@ -125,6 +125,7 @@ function ApiFields({ translator, api, updateApi, resetApi }) { model = "", systemPrompt = "", userPrompt = "", + apiCustomParams = "", think = false, thinkIgnore = "", fetchLimit = DEFAULT_FETCH_LIMIT, @@ -274,6 +275,16 @@ function ApiFields({ translator, api, updateApi, resetApi }) { multiline maxRows={10} /> + )}