fix: modify systemPrompt & userPrompt

This commit is contained in:
Gabe
2024-11-30 00:41:29 +08:00
parent 121d523e02
commit 6855332092
3 changed files with 58 additions and 75 deletions

View File

@@ -193,23 +193,23 @@ const genOpenAI = ({
url,
key,
systemPrompt,
prompt,
userPrompt,
model,
temperature,
maxTokens,
}) => {
// 兼容历史上作为systemPrompt的prompt如果prompt中不包含带翻译文本则添加文本到prompt末尾
if (!prompt.includes(INPUT_PLACE_TEXT)) {
prompt += `\nSource Text: ${INPUT_PLACE_TEXT}`;
}
prompt = prompt
// if (!prompt.includes(INPUT_PLACE_TEXT)) {
// prompt += `\nSource Text: ${INPUT_PLACE_TEXT}`;
// }
systemPrompt = systemPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
systemPrompt = systemPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
const data = {
model,
@@ -220,7 +220,7 @@ const genOpenAI = ({
},
{
role: "user",
content: prompt,
content: userPrompt,
},
],
temperature,
@@ -240,26 +240,30 @@ const genOpenAI = ({
return [url, init];
};
const genGemini = ({ text, from, to, url, key, prompt, model }) => {
const genGemini = ({ text, from, to, url, key, systemPrompt, userPrompt, model }) => {
url = url
.replaceAll(INPUT_PLACE_MODEL, model)
.replaceAll(INPUT_PLACE_KEY, key);
prompt = prompt
systemPrompt = systemPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
const data = {
contents: [
{
// role: "user",
parts: [
{
text: prompt,
},
],
},
],
system_instruction: {
parts: {
text: systemPrompt,
}
},
contents: {
parts: {
text: userPrompt,
}
}
};
const init = {
@@ -280,19 +284,19 @@ const genClaude = ({
url,
key,
systemPrompt,
prompt,
userPrompt,
model,
temperature,
maxTokens,
}) => {
prompt = prompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
systemPrompt = systemPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
const data = {
model,
@@ -300,7 +304,7 @@ const genClaude = ({
messages: [
{
role: "user",
content: prompt,
content: userPrompt,
},
],
temperature,
@@ -320,16 +324,20 @@ const genClaude = ({
return [url, init];
};
const genOllama = ({ text, from, to, url, key, system,prompt, model }) => {
prompt = prompt
const genOllama = ({ text, from, to, url, key, systemPrompt, userPrompt, model }) => {
systemPrompt = systemPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
userPrompt = userPrompt
.replaceAll(INPUT_PLACE_FROM, from)
.replaceAll(INPUT_PLACE_TO, to)
.replaceAll(INPUT_PLACE_TEXT, text);
const data = {
model,
system,
prompt,
system: systemPrompt,
prompt: userPrompt,
stream: false,
};

View File

@@ -527,8 +527,8 @@ const defaultOpenaiApi = {
url: "https://api.openai.com/v1/chat/completions",
key: "",
model: "gpt-4",
prompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
systemPrompt: `You are a professional, authentic machine translation engine. You will be provided with a sentence in ${INPUT_PLACE_FROM}, and your task is to translate it into ${INPUT_PLACE_TO}.`,
systemPrompt: `You are a professional, authentic machine translation engine.`,
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
temperature: 0,
maxTokens: 256,
fetchLimit: 1,
@@ -538,8 +538,8 @@ const defaultOllamaApi = {
url: "http://localhost:11434/api/generate",
key: "",
model: "llama3.1",
system:"You are a professional, authentic machine translation engine.",
prompt: `Translate the following text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO},output translation directly without any additional text:\n\n${INPUT_PLACE_TEXT}`,
systemPrompt: `You are a professional, authentic machine translation engine.`,
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
fetchLimit: 1,
fetchInterval: 500,
};
@@ -593,7 +593,8 @@ export const DEFAULT_TRANS_APIS = {
url: `https://generativelanguage.googleapis.com/v1/models/${INPUT_PLACE_MODEL}:generateContent?key=${INPUT_PLACE_KEY}`,
key: "",
model: "gemini-pro",
prompt: `Translate the following text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}:\n\n${INPUT_PLACE_TEXT}`,
systemPrompt: `You are a professional, authentic machine translation engine.`,
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
fetchLimit: 1,
fetchInterval: 500,
},
@@ -601,8 +602,8 @@ export const DEFAULT_TRANS_APIS = {
url: "https://api.anthropic.com/v1/messages",
key: "",
model: "claude-3-haiku-20240307",
prompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
systemPrompt: `You are a professional, authentic machine translation engine.`,
userPrompt: `Translate the following source text from ${INPUT_PLACE_FROM} to ${INPUT_PLACE_TO}. Output translation directly without any additional text.\n\nSource Text: ${INPUT_PLACE_TEXT}\n\nTranslated Text:`,
temperature: 0,
maxTokens: 1024,
fetchLimit: 1,

View File

@@ -115,9 +115,8 @@ function ApiFields({ translator }) {
url = "",
key = "",
model = "",
system = "",
prompt = "",
systemPrompt = "",
userPrompt = "",
fetchLimit = DEFAULT_FETCH_LIMIT,
fetchInterval = DEFAULT_FETCH_INTERVAL,
dictNo = "",
@@ -215,6 +214,7 @@ function ApiFields({ translator }) {
)}
{(translator.startsWith(OPT_TRANS_OPENAI) ||
translator.startsWith(OPT_TRANS_OLLAMA) ||
translator === OPT_TRANS_CLAUDE ||
translator === OPT_TRANS_GEMINI) && (
<>
@@ -227,37 +227,18 @@ function ApiFields({ translator }) {
/>
<TextField
size="small"
label={"PROMPT"}
name="prompt"
value={prompt}
label={"SYSTEM PROMPT"}
name="systemPrompt"
value={systemPrompt}
onChange={handleChange}
multiline
maxRows={10}
/>
</>
)}
{(translator.startsWith(OPT_TRANS_OLLAMA)) && (
<>
<TextField
size="small"
label={"MODEL"}
name="model"
value={model}
onChange={handleChange}
/>
<TextField
size="small"
label={"SYSTEM MESSAGE"}
name="system"
value={system}
onChange={handleChange}
/>
<TextField
size="small"
label={"PROMPT"}
name="prompt"
value={prompt}
label={"USER PROMPT"}
name="userPrompt"
value={userPrompt}
onChange={handleChange}
multiline
maxRows={10}
@@ -268,13 +249,6 @@ function ApiFields({ translator }) {
{(translator.startsWith(OPT_TRANS_OPENAI) ||
translator === OPT_TRANS_CLAUDE) && (
<>
<TextField
size="small"
label={"SYSTEM PROMPT"}
name="systemPrompt"
value={systemPrompt}
onChange={handleChange}
/>
<TextField
size="small"
label={"Temperature"}