Spaces:
Running
Running
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from PIL import Image | |
class XylariaChat: | |
def __init__(self): | |
# Securely load HuggingFace token | |
self.hf_token = os.getenv("HF_TOKEN") | |
if not self.hf_token: | |
raise ValueError("HuggingFace token not found in environment variables") | |
# Initialize the inference clients | |
self.chat_client = InferenceClient( | |
model="Qwen/QwQ-32B-Preview", | |
api_key=self.hf_token | |
) | |
self.image_client = InferenceClient( | |
model="SG161222/RealVisXL_V4.0", | |
api_key=self.hf_token | |
) | |
# Initialize conversation history and persistent memory | |
self.conversation_history = [] | |
self.persistent_memory = {} | |
def get_response(self, user_input): | |
"""Get text-based response from the model.""" | |
messages = [ | |
{"role": "system", "content": "Your name is Xylaria 1.4 Senoa, an advanced ai model developed by sk md saad amin"}, | |
*self.conversation_history, | |
{"role": "user", "content": user_input} | |
] | |
try: | |
response_stream = self.chat_client.chat.completions.create( | |
messages=messages, | |
temperature=0.5, | |
max_tokens=10240, | |
top_p=0.7, | |
stream=True | |
) | |
return response_stream | |
except Exception as e: | |
return f"Error generating response: {str(e)}" | |
def generate_image(self, prompt): | |
"""Generate image based on prompt.""" | |
try: | |
# Create an image from the prompt | |
image = self.image_client.text_to_image(prompt) | |
return image | |
except Exception as e: | |
return f"Error generating image: {str(e)}" | |
def create_interface(self): | |
def streaming_response(message, chat_history): | |
"""Handle text response streaming.""" | |
response_stream = self.get_response(message) | |
if isinstance(response_stream, str): | |
return "", chat_history + [[message, response_stream]] | |
full_response = "" | |
updated_history = chat_history + [[message, ""]] | |
for chunk in response_stream: | |
if chunk.choices[0].delta.content: | |
full_response += chunk.choices[0].delta.content | |
updated_history[-1][1] = full_response | |
yield "", updated_history | |
self.conversation_history.append({"role": "user", "content": message}) | |
self.conversation_history.append({"role": "assistant", "content": full_response}) | |
if len(self.conversation_history) > 10: | |
self.conversation_history = self.conversation_history[-10:] | |
def generate_image_response(prompt): | |
"""Handle image generation.""" | |
if not prompt.strip(): | |
return None | |
return self.generate_image(prompt) | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot(label="Xylaria 1.4 Senoa", height=500) | |
with gr.Row(): | |
txt = gr.Textbox(show_label=False, placeholder="Type your message...", scale=8) | |
send_btn = gr.Button("💬", scale=1) | |
img_btn = gr.Button("🖼️", scale=1) | |
clear_btn = gr.Button("Clear Conversation") | |
clear_memory_btn = gr.Button("Clear Memory") | |
send_btn.click(fn=streaming_response, inputs=[txt, chatbot], outputs=[txt, chatbot]) | |
txt.submit(fn=streaming_response, inputs=[txt, chatbot], outputs=[txt, chatbot]) | |
img_btn.click(fn=generate_image_response, inputs=txt, outputs=chatbot) | |
clear_btn.click(fn=lambda: None, inputs=None, outputs=chatbot) | |
clear_memory_btn.click(fn=lambda: None, inputs=None, outputs=[]) | |
return demo | |
# Launch the interface | |
def main(): | |
chat = XylariaChat() | |
interface = chat.create_interface() | |
interface.launch(share=True, debug=True) | |
if __name__ == "__main__": | |
main() | |