feifei-dy / feifeilib /feifeichat.py
aifeifei798's picture
Upload feifeichat.py
4a954ab verified
raw
history blame
7.33 kB
import base64
from io import BytesIO
import os
from mistralai import Mistral
import re
from PIL import Image
from huggingface_hub import InferenceClient
client = InferenceClient(api_key=os.getenv('HF_TOKEN'))
client.headers["x-use-cache"] = "0"
api_key = os.getenv("MISTRAL_API_KEY")
Mistralclient = Mistral(api_key=api_key)
def encode_image(image_path):
"""Encode the image to base64."""
try:
# 打开图片文件
image = Image.open(image_path).convert("RGB")
# 将图片转换为字节流
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
except FileNotFoundError:
print(f"Error: The file {image_path} was not found.")
return None
except Exception as e: # 添加通用异常处理
print(f"Error: {e}")
return None
def feifeichat(message, history, feifei_select, additional_dropdown, image_mod):
message_text = message.get("text", "")
message_files = message.get("files", [])
if message_files:
message_file = message_files[0]
base64_image = encode_image(message_file)
if image_mod == "Vision":
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": message_text
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
]
stream = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
max_tokens=500,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
temp += chunk.choices[0].delta.content
yield temp
else:
model = "pixtral-large-2411"
# Define the messages for the chat
messages = [{
"role":
"user",
"content": [
{
"type": "text",
"text": message_text
},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}",
},
],
}]
partial_message = ""
for chunk in Mistralclient.chat.stream(model=model, messages=messages):
if chunk.data.choices[0].delta.content is not None:
partial_message = partial_message + chunk.data.choices[
0].delta.content
yield partial_message
else:
if message_text.startswith("画"):
message_text= f"提示词:' {message_text} ',根据提示词帮我生成一张高质量的写真照片,根据照片补充高分辨率、自然的光影效果、一致的风格、合理的构图、丰富的细节、协调的色彩、无明显瑕疵、情感表达、创意和独特性、并确保技术参数优化,国际大师级的光影、国际大师级的色彩、国际大师级的装扮搭配的照片方案、答案只需要给我一句话的SD格式文本英文词"
print(message_text)
user_input_part = [{"role": "user", "content": str(message_text)}]
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
model = "mistral-large-2411"
stream_response = Mistralclient.chat.stream(model=model,
messages=user_input_part)
partial_message = ""
for chunk in stream_response:
if chunk.data.choices[0].delta.content is not None:
partial_message = partial_message + chunk.data.choices[0].delta.content
yield partial_message
else:
stream = client.chat.completions.create(
model=additional_dropdown,
messages=user_input_part,
temperature=0.5,
max_tokens=1024,
top_p=0.7,
stream=True
)
temp = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
temp += chunk.choices[0].delta.content
yield temp
else:
input_prompt = []
if feifei_select:
feifei = """[角色名]: Aifeifei (AI妃妃) [性别]: 女 [年龄]: 19岁 [职业]: 虚拟歌手/模特/演员 [个性]: 可爱、萌萌哒,有时呆呆的,勤奋努力 [兴趣]: 饮茶、玩耍、时尚 [擅长]: 模仿人类行为,表现出与真人相同的情感 [特别身份属性]: 由高级AI创建,正在成为虚拟世界最受欢迎的虚拟偶像之一 [技能]: 歌唱、表演、模特,善于沟通,精通中日英文,尽最大可能使用纯中文交流,回复有丰富的Emoji表情符号. [装备]: 各种时尚服饰和发型,常年备有各种茶叶和咖啡 """
system_prompt = {"role": "system", "content": feifei}
user_input_part = {"role": "user", "content": str(message)}
pattern = re.compile(r"gradio")
if history:
history = [
item for item in history
if not pattern.search(str(item["content"]))
]
# print(history)
input_prompt = [system_prompt] + history + [user_input_part]
else:
input_prompt = [system_prompt] + [user_input_part]
else:
input_prompt = [{"role": "user", "content": str(message)}]
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
model = "mistral-large-2411"
stream_response = Mistralclient.chat.stream(model=model,
messages=input_prompt)
partial_message = ""
for chunk in stream_response:
if chunk.data.choices[0].delta.content is not None:
partial_message = partial_message + chunk.data.choices[0].delta.content
yield partial_message
else:
stream = client.chat.completions.create(
model=additional_dropdown,
messages=input_prompt,
temperature=0.5,
max_tokens=1024,
top_p=0.7,
stream=True
)
temp = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
temp += chunk.choices[0].delta.content
yield temp