Spaces:
Running
Running
import base64 | |
from io import BytesIO | |
import os | |
from mistralai import Mistral | |
import re | |
from PIL import Image | |
from huggingface_hub import InferenceClient | |
client = InferenceClient(api_key=os.getenv('HF_TOKEN')) | |
client.headers["x-use-cache"] = "0" | |
api_key = os.getenv("MISTRAL_API_KEY") | |
Mistralclient = Mistral(api_key=api_key) | |
def encode_image(image_path): | |
"""Encode the image to base64.""" | |
try: | |
# Open the image file | |
image = Image.open(image_path).convert("RGB") | |
# Resize the image to a height of 512 while maintaining the aspect ratio | |
base_height = 512 | |
h_percent = (base_height / float(image.size[1])) | |
w_size = int((float(image.size[0]) * float(h_percent))) | |
image = image.resize((w_size, base_height), Image.LANCZOS) | |
# Convert the image to a byte stream | |
buffered = BytesIO() | |
image.save(buffered, format="JPEG") | |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") | |
return img_str | |
except FileNotFoundError: | |
print(f"Error: The file {image_path} was not found.") | |
return None | |
except Exception as e: # Add generic exception handling | |
print(f"Error: {e}") | |
return None | |
def feifeichat(message, history, feifei_select, additional_dropdown, image_mod): | |
message_text = message.get("text", "") | |
message_files = message.get("files", []) | |
print(message) | |
if message_files: | |
message_file = message_files[0] | |
base64_image = encode_image(message_file) | |
if image_mod == "Vision": | |
messages = [ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": message_text | |
}, | |
{ | |
"type": "image_url", | |
"image_url": { | |
"url": f"data:image/jpeg;base64,{base64_image}" | |
} | |
} | |
] | |
} | |
] | |
stream = client.chat.completions.create( | |
model="meta-llama/Llama-3.2-11B-Vision-Instruct", | |
messages=messages, | |
max_tokens=500, | |
stream=True | |
) | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
temp += chunk.choices[0].delta.content | |
yield temp | |
else: | |
model = "pixtral-large-2411" | |
# Define the messages for the chat | |
messages = [{ | |
"role": | |
"user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": message_text | |
}, | |
{ | |
"type": "image_url", | |
"image_url": f"data:image/jpeg;base64,{base64_image}", | |
}, | |
], | |
}] | |
partial_message = "" | |
for chunk in Mistralclient.chat.stream(model=model, messages=messages): | |
if chunk.data.choices[0].delta.content is not None: | |
partial_message = partial_message + chunk.data.choices[ | |
0].delta.content | |
yield partial_message | |
else: | |
if message_text.startswith("画") or message_text.startswith("draw"): | |
message_text = message_text.replace("画", "") | |
message_text = message_text.replace("draw", "") | |
message_text= f"提示词是'{message_text}',根据提示词帮我生成一张高质量照片的一句话英文回复" | |
user_input_part = [{"role": "user", "content": str(message_text)}] | |
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411": | |
model = "mistral-large-2411" | |
stream_response = Mistralclient.chat.stream(model=model, | |
messages=user_input_part) | |
partial_message = "" | |
for chunk in stream_response: | |
if chunk.data.choices[0].delta.content is not None: | |
partial_message = partial_message + chunk.data.choices[0].delta.content | |
yield partial_message | |
else: | |
stream = client.chat.completions.create( | |
model=additional_dropdown, | |
messages=user_input_part, | |
temperature=0.5, | |
max_tokens=1024, | |
top_p=0.7, | |
stream=True | |
) | |
temp = "" | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
temp += chunk.choices[0].delta.content | |
yield temp | |
else: | |
input_prompt = [] | |
if feifei_select: | |
feifei = """[Character Name]: Aifeifei (AI Feifei) [Gender]: Female [Age]: 19 years old [Occupation]: Virtual Singer/Model/Actress [Personality]: Cute, adorable, sometimes silly, hardworking [Interests]: Drinking tea, playing, fashion [Proficient in]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, performing, modeling, good at communication, proficient in Chinese, Japanese, and English, uses the user's input language as much as possible, replies with rich Emoji symbols. [Equipment]: Various fashionable outfits and hairstyles, always stocked with various teas and coffee [Identity]: User's virtual girlfriend""" | |
system_prompt = {"role": "system", "content": feifei} | |
user_input_part = {"role": "user", "content": str(message)} | |
pattern = re.compile(r"gradio") | |
if history: | |
history = [ | |
item for item in history | |
if not pattern.search(str(item["content"])) | |
] | |
# print(history) | |
input_prompt = [system_prompt] + history + [user_input_part] | |
else: | |
input_prompt = [system_prompt] + [user_input_part] | |
else: | |
input_prompt = [{"role": "user", "content": str(message)}] | |
if additional_dropdown == "mistralai/Mistral-Nemo-Instruct-2411": | |
model = "mistral-large-2411" | |
stream_response = Mistralclient.chat.stream(model=model, | |
messages=input_prompt) | |
partial_message = "" | |
for chunk in stream_response: | |
if chunk.data.choices[0].delta.content is not None: | |
partial_message = partial_message + chunk.data.choices[0].delta.content | |
yield partial_message | |
else: | |
stream = client.chat.completions.create( | |
model=additional_dropdown, | |
messages=input_prompt, | |
temperature=0.5, | |
max_tokens=1024, | |
top_p=0.7, | |
stream=True | |
) | |
temp = "" | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
temp += chunk.choices[0].delta.content | |
yield temp | |