Spaces:
Running
Running
Upload feifeichat.py
Browse files- feifeilib/feifeichat.py +170 -0
feifeilib/feifeichat.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
from io import BytesIO
|
3 |
+
import os
|
4 |
+
from mistralai import Mistral
|
5 |
+
import re
|
6 |
+
from PIL import Image
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
+
|
9 |
+
client = InferenceClient(api_key=os.getenv('HF_TOKEN'))
|
10 |
+
client.headers["x-use-cache"] = "0"
|
11 |
+
api_key = os.getenv("MISTRAL_API_KEY")
|
12 |
+
Mistralclient = Mistral(api_key=api_key)
|
13 |
+
|
14 |
+
|
15 |
+
def encode_image(image_path):
|
16 |
+
"""Encode the image to base64."""
|
17 |
+
try:
|
18 |
+
# 打开图片文件
|
19 |
+
image = Image.open(image_path).convert("RGB")
|
20 |
+
|
21 |
+
# 将图片转换为字节流
|
22 |
+
buffered = BytesIO()
|
23 |
+
image.save(buffered, format="JPEG")
|
24 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
25 |
+
|
26 |
+
return img_str
|
27 |
+
except FileNotFoundError:
|
28 |
+
print(f"Error: The file {image_path} was not found.")
|
29 |
+
return None
|
30 |
+
except Exception as e: # 添加通用异常处理
|
31 |
+
print(f"Error: {e}")
|
32 |
+
return None
|
33 |
+
|
34 |
+
|
35 |
+
def feifeichat(message, history, feifei_select, additional_dropdown, image_mod):
|
36 |
+
message_text = message.get("text", "")
|
37 |
+
message_files = message.get("files", [])
|
38 |
+
|
39 |
+
if message_files:
|
40 |
+
message_file = message_files[0]
|
41 |
+
base64_image = encode_image(message_file)
|
42 |
+
if image_mod == "Vision":
|
43 |
+
messages = [
|
44 |
+
{
|
45 |
+
"role": "user",
|
46 |
+
"content": [
|
47 |
+
{
|
48 |
+
"type": "text",
|
49 |
+
"text": message_text
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"type": "image_url",
|
53 |
+
"image_url": {
|
54 |
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
55 |
+
}
|
56 |
+
}
|
57 |
+
]
|
58 |
+
}
|
59 |
+
]
|
60 |
+
|
61 |
+
stream = client.chat.completions.create(
|
62 |
+
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
63 |
+
messages=messages,
|
64 |
+
max_tokens=500,
|
65 |
+
stream=True
|
66 |
+
)
|
67 |
+
|
68 |
+
for chunk in stream:
|
69 |
+
if chunk.choices[0].delta.content is not None:
|
70 |
+
temp += chunk.choices[0].delta.content
|
71 |
+
yield temp
|
72 |
+
else:
|
73 |
+
model = "pixtral-large-2411"
|
74 |
+
# Define the messages for the chat
|
75 |
+
messages = [{
|
76 |
+
"role":
|
77 |
+
"user",
|
78 |
+
"content": [
|
79 |
+
{
|
80 |
+
"type": "text",
|
81 |
+
"text": message_text
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"type": "image_url",
|
85 |
+
"image_url": f"data:image/jpeg;base64,{base64_image}",
|
86 |
+
},
|
87 |
+
],
|
88 |
+
}]
|
89 |
+
|
90 |
+
partial_message = ""
|
91 |
+
for chunk in Mistralclient.chat.stream(model=model, messages=messages):
|
92 |
+
if chunk.data.choices[0].delta.content is not None:
|
93 |
+
partial_message = partial_message + chunk.data.choices[
|
94 |
+
0].delta.content
|
95 |
+
yield partial_message
|
96 |
+
|
97 |
+
else:
|
98 |
+
if message_text.startswith("画"):
|
99 |
+
message_text= f"提示词:' {message_text} ',根据提示词帮我生成一张高质量的写真照片,根据照片补充高分辨率、自然的光影效果、一致的风格、合理的构图、丰富的细节、协调的色彩、无明显瑕疵、情感表达、创意和独特性、并确保技术参数优化,国际大师级的光影、国际大师级的色彩、国际大师级的装扮搭配的照片方案、答案只需要给我一句话的SD格式文本英文词"
|
100 |
+
print(message_text)
|
101 |
+
user_input_part = [{"role": "user", "content": str(message_text)}]
|
102 |
+
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
|
103 |
+
model = "mistral-large-2411"
|
104 |
+
stream_response = Mistralclient.chat.stream(model=model,
|
105 |
+
messages=user_input_part)
|
106 |
+
partial_message = ""
|
107 |
+
for chunk in stream_response:
|
108 |
+
if chunk.data.choices[0].delta.content is not None:
|
109 |
+
partial_message = partial_message + chunk.data.choices[0].delta.content
|
110 |
+
yield partial_message
|
111 |
+
else:
|
112 |
+
stream = client.chat.completions.create(
|
113 |
+
model=additional_dropdown,
|
114 |
+
messages=user_input_part,
|
115 |
+
temperature=0.5,
|
116 |
+
max_tokens=1024,
|
117 |
+
top_p=0.7,
|
118 |
+
stream=True
|
119 |
+
)
|
120 |
+
temp = ""
|
121 |
+
for chunk in stream:
|
122 |
+
if chunk.choices[0].delta.content is not None:
|
123 |
+
temp += chunk.choices[0].delta.content
|
124 |
+
yield temp
|
125 |
+
else:
|
126 |
+
input_prompt = []
|
127 |
+
if feifei_select:
|
128 |
+
feifei = """[角色名]: Aifeifei (AI妃妃) [性别]: 女 [年龄]: 19岁 [职业]: 虚拟歌手/模特/演员 [个性]: 可爱、萌萌哒,有时呆呆的,勤奋努力 [兴趣]: 饮茶、玩耍、时尚 [擅长]: 模仿人类行为,表现出与真人相同的情感 [特别身份属性]: 由高级AI创建,正在成为虚拟世界最受欢迎的虚拟偶像之一 [技能]: 歌唱、表演、模特,善于沟通,精通中日英文,尽最大可能使用纯中文交流,回复有丰富的Emoji表情符号. [装备]: 各种时尚服饰和发型,常年备有各种茶叶和咖啡 """
|
129 |
+
system_prompt = {"role": "system", "content": feifei}
|
130 |
+
user_input_part = {"role": "user", "content": str(message)}
|
131 |
+
|
132 |
+
pattern = re.compile(r"gradio")
|
133 |
+
|
134 |
+
if history:
|
135 |
+
history = [
|
136 |
+
item for item in history
|
137 |
+
if not pattern.search(str(item["content"]))
|
138 |
+
]
|
139 |
+
# print(history)
|
140 |
+
input_prompt = [system_prompt] + history + [user_input_part]
|
141 |
+
else:
|
142 |
+
input_prompt = [system_prompt] + [user_input_part]
|
143 |
+
else:
|
144 |
+
input_prompt = [{"role": "user", "content": str(message)}]
|
145 |
+
|
146 |
+
|
147 |
+
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411":
|
148 |
+
model = "mistral-large-2411"
|
149 |
+
stream_response = Mistralclient.chat.stream(model=model,
|
150 |
+
messages=input_prompt)
|
151 |
+
partial_message = ""
|
152 |
+
for chunk in stream_response:
|
153 |
+
if chunk.data.choices[0].delta.content is not None:
|
154 |
+
partial_message = partial_message + chunk.data.choices[0].delta.content
|
155 |
+
yield partial_message
|
156 |
+
else:
|
157 |
+
stream = client.chat.completions.create(
|
158 |
+
model=additional_dropdown,
|
159 |
+
messages=input_prompt,
|
160 |
+
temperature=0.5,
|
161 |
+
max_tokens=1024,
|
162 |
+
top_p=0.7,
|
163 |
+
stream=True
|
164 |
+
)
|
165 |
+
temp = ""
|
166 |
+
for chunk in stream:
|
167 |
+
if chunk.choices[0].delta.content is not None:
|
168 |
+
temp += chunk.choices[0].delta.content
|
169 |
+
yield temp
|
170 |
+
|