Spaces:
Running
Running
File size: 4,183 Bytes
24342ea a184be7 65a6bd0 caf6b1d e1ff28f a184be7 caf6b1d a184be7 24342ea caf6b1d 24342ea a184be7 24342ea a184be7 caf6b1d a184be7 caf6b1d a184be7 24342ea a184be7 caf6b1d a184be7 caf6b1d a184be7 caf6b1d a184be7 caf6b1d a184be7 caf6b1d a184be7 caf6b1d a184be7 98f0993 caf6b1d 98f0993 caf6b1d a184be7 caf6b1d a184be7 caf6b1d a184be7 caf6b1d 24342ea a184be7 caf6b1d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
from PIL import Image
class XylariaChat:
def __init__(self):
# Securely load HuggingFace token
self.hf_token = os.getenv("HF_TOKEN")
if not self.hf_token:
raise ValueError("HuggingFace token not found in environment variables")
# Initialize the inference clients
self.chat_client = InferenceClient(
model="Qwen/QwQ-32B-Preview",
api_key=self.hf_token
)
self.image_client = InferenceClient(
model="SG161222/RealVisXL_V4.0",
api_key=self.hf_token
)
# Initialize conversation history and persistent memory
self.conversation_history = []
self.persistent_memory = {}
def get_response(self, user_input):
"""Get text-based response from the model."""
messages = [
{"role": "system", "content": "Your name is Xylaria 1.4 Senoa, an advanced ai model developed by sk md saad amin"},
*self.conversation_history,
{"role": "user", "content": user_input}
]
try:
response_stream = self.chat_client.chat.completions.create(
messages=messages,
temperature=0.5,
max_tokens=10240,
top_p=0.7,
stream=True
)
return response_stream
except Exception as e:
return f"Error generating response: {str(e)}"
def generate_image(self, prompt):
"""Generate image based on prompt."""
try:
# Create an image from the prompt
image = self.image_client.text_to_image(prompt)
return image
except Exception as e:
return f"Error generating image: {str(e)}"
def create_interface(self):
def streaming_response(message, chat_history):
"""Handle text response streaming."""
response_stream = self.get_response(message)
if isinstance(response_stream, str):
return "", chat_history + [[message, response_stream]]
full_response = ""
updated_history = chat_history + [[message, ""]]
for chunk in response_stream:
if chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
updated_history[-1][1] = full_response
yield "", updated_history
self.conversation_history.append({"role": "user", "content": message})
self.conversation_history.append({"role": "assistant", "content": full_response})
if len(self.conversation_history) > 10:
self.conversation_history = self.conversation_history[-10:]
def generate_image_response(prompt):
"""Handle image generation."""
if not prompt.strip():
return None
return self.generate_image(prompt)
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="Xylaria 1.4 Senoa", height=500)
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Type your message...", scale=8)
send_btn = gr.Button("💬", scale=1)
img_btn = gr.Button("🖼️", scale=1)
clear_btn = gr.Button("Clear Conversation")
clear_memory_btn = gr.Button("Clear Memory")
send_btn.click(fn=streaming_response, inputs=[txt, chatbot], outputs=[txt, chatbot])
txt.submit(fn=streaming_response, inputs=[txt, chatbot], outputs=[txt, chatbot])
img_btn.click(fn=generate_image_response, inputs=txt, outputs=chatbot)
clear_btn.click(fn=lambda: None, inputs=None, outputs=chatbot)
clear_memory_btn.click(fn=lambda: None, inputs=None, outputs=[])
return demo
# Launch the interface
def main():
chat = XylariaChat()
interface = chat.create_interface()
interface.launch(share=True, debug=True)
if __name__ == "__main__":
main()
|