柿崎透真
feat: first application
55fc0a1
raw
history blame
3.96 kB
from __future__ import annotations
from typing import Any
from typing_extensions import TypedDict
from neollm import MyL3M2, MyLLM
from neollm.types import LLMSettings, Messages, Response
_MyLLM = MyLLM[Any, Any]
_MyL3M2 = MyL3M2[Any, Any]
class PromptCheckerInput(TypedDict):
myllm: _MyLLM | _MyL3M2
model: str
platform: str
llm_settings: LLMSettings | None
class APromptCheckerInput(TypedDict):
myllm: _MyLLM
class APromptChecker(MyLLM[APromptCheckerInput, str]):
def _preprocess(self, inputs: APromptCheckerInput) -> Messages:
system_prompt = (
"あなたは、AIへの指示(プロンプト)をより良くすることが仕事です。\n"
"あなたは言語能力が非常に高く、仕事も丁寧なので小さなミスも気づくことができる天才です。"
"誤字脱字・論理的でない点・指示が不明確な点を箇条書きで指摘し、より良いプロンプトを提案してください。\n"
"# 出力例: \n"
"[指示の誤字脱字/文法ミス]\n"
"- ...\n"
"- ...\n"
"[指示が論理的でない点]\n"
"- ...\n"
"- ...\n"
"[指示が不明確な点]\n"
"- ...\n"
"- ...\n"
"[その他気になる点]\n"
"- ...\n"
"- ...\n"
"[提案]\n"
"- ...\n"
"- ...\n"
)
if inputs["myllm"].messages is None:
return []
user_prompt = "# プロンプト\n" + "\n".join(
# [f"<{message['role']}>\n{message['content']}\n" for message in inputs.messages]
[str(message) for message in inputs["myllm"].messages]
)
messages: Messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
return messages
def _postprocess(self, response: Response) -> str:
if response.choices[0].message.content is None:
return "contentがないンゴ"
return response.choices[0].message.content
def _ruleprocess(self, inputs: APromptCheckerInput) -> str | None:
if inputs["myllm"].messages is None:
return "ruleprocessが走って、リクエストしてないよ!"
return None
def __call__(self, inputs: APromptCheckerInput) -> str:
outputs: str = super().__call__(inputs)
return outputs
class PromptsChecker(MyL3M2[PromptCheckerInput, None]):
def _link(self, inputs: PromptCheckerInput) -> None:
if isinstance(inputs["myllm"], MyL3M2):
for myllm in inputs["myllm"].myllm_list:
prompts_checker = PromptsChecker(parent=self, verbose=True)
prompts_checker(
inputs={
"myllm": myllm,
"model": inputs["model"],
"platform": inputs["platform"],
"llm_settings": inputs["llm_settings"],
}
)
elif isinstance(inputs["myllm"], MyLLM):
a_prompt_checker = APromptChecker(
parent=self,
llm_settings=inputs["llm_settings"],
verbose=True,
platform=inputs["platform"],
model=inputs["model"],
)
a_prompt_checker(inputs={"myllm": inputs["myllm"]})
def __call__(self, inputs: PromptCheckerInput) -> None:
super().__call__(inputs)
def check_prompt(
myllm: _MyLLM | _MyL3M2,
llm_settings: LLMSettings | None = None,
model: str = "gpt-4o-mini-2024-07-18",
platform: str = "openai",
) -> MyL3M2[Any, Any]:
prompt_checker_2 = PromptsChecker(verbose=True)
prompt_checker_2(inputs={"myllm": myllm, "llm_settings": llm_settings, "model": model, "platform": platform})
return prompt_checker_2