Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
# Initialize clients | |
text_client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
image_client = InferenceClient("SG161222/RealVisXL_V3.0") | |
def check_custom_responses(message: str) -> str: | |
"""Check for specific patterns and return custom responses.""" | |
message_lower = message.lower() | |
custom_responses = { | |
"what is ur name?": "xylaria", | |
"what is ur Name?": "xylaria", | |
"what is Ur name?": "xylaria", | |
"what is Ur Name?": "xylaria", | |
"What is ur name?": "xylaria", | |
"What is ur Name?": "xylaria", | |
"What is Ur name?": "xylaria", | |
"What is Ur Name?": "xylaria", | |
"what's ur name?": "xylaria", | |
"what's ur Name?": "xylaria", | |
"what's Ur name?": "xylaria", | |
"what's Ur Name?": "xylaria", | |
"whats ur name?": "xylaria", | |
"whats ur Name?": "xylaria", | |
"whats Ur name?": "xylaria", | |
"whats Ur Name?": "xylaria", | |
"what's your name?": "xylaria", | |
"what's your Name?": "xylaria", | |
"what's Your name?": "xylaria", | |
"what's Your Name?": "xylaria", | |
"Whats ur name?": "xylaria", | |
"Whats ur Name?": "xylaria", | |
"Whats Ur name?": "xylaria", | |
"Whats Ur Name?": "xylaria", | |
"What Is Your Name?": "xylaria", | |
"What Is Ur Name?": "xylaria", | |
"What Is Your Name?": "xylaria", | |
"What Is Ur Name?": "xylaria", | |
"what is your name?": "xylaria", | |
"what is your Name?": "xylaria", | |
"what is Your name?": "xylaria", | |
"what is Your Name?": "xylaria", | |
"how many 'r' is in strawberry?": "3", | |
"how many 'R' is in strawberry?": "3", | |
"how many 'r' Is in strawberry?": "3", | |
"how many 'R' Is in strawberry?": "3", | |
"How many 'r' is in strawberry?": "3", | |
"How many 'R' is in strawberry?": "3", | |
"How Many 'r' Is In Strawberry?": "3", | |
"How Many 'R' Is In Strawberry?": "3", | |
"how many r is in strawberry?": "3", | |
"how many R is in strawberry?": "3", | |
"how many r Is in strawberry?": "3", | |
"how many R Is in strawberry?": "3", | |
"How many r is in strawberry?": "3", | |
"How many R is in strawberry?": "3", | |
"How Many R Is In Strawberry?": "3", | |
"how many 'r' in strawberry?": "3", | |
"how many r's are in strawberry?": "3", | |
"how many Rs are in strawberry?": "3", | |
"How Many R's Are In Strawberry?": "3", | |
"How Many Rs Are In Strawberry?": "3", | |
"who is your developer?": "sk md saad amin", | |
"who is your Developer?": "sk md saad amin", | |
"who is Your Developer?": "sk md saad amin", | |
"who is ur developer?": "sk md saad amin", | |
"who is ur Developer?": "sk md saad amin", | |
"who is Your Developer?": "sk md saad amin", | |
"Who is ur developer?": "sk md saad amin", | |
"Who is ur Developer?": "sk md saad amin", | |
"who is ur dev?": "sk md saad amin", | |
"Who is ur dev?": "sk md saad amin", | |
"who is your dev?": "sk md saad amin", | |
"Who is your dev?": "sk md saad amin", | |
"Who's your developer?": "sk md saad amin", | |
"Who's ur developer?": "sk md saad amin", | |
"Who Is Your Developer?": "sk md saad amin", | |
"Who Is Ur Developer?": "sk md saad amin", | |
"Who Is Your Dev?": "sk md saad amin", | |
"Who Is Ur Dev?": "sk md saad amin", | |
"who's your developer?": "sk md saad amin", | |
"who's ur developer?": "sk md saad amin", | |
"who is your devloper?": "sk md saad amin", | |
"who is ur devloper?": "sk md saad amin", | |
"how many r is in strawberry?": "3", | |
"how many R is in strawberry?": "3", | |
"how many r Is in strawberry?": "3", | |
"how many R Is in strawberry?": "3", | |
"How many r is in strawberry?": "3", | |
"How many R is in strawberry?": "3", | |
"How Many R Is In Strawberry?": "3", | |
"how many 'r' is in strawberry?": "3", | |
"how many 'R' is in strawberry?": "3", | |
"how many 'r' Is in strawberry?": "3", | |
"how many 'R' Is in strawberry?": "3", | |
"How many 'r' is in strawberry?": "3", | |
"How many 'R' is in strawberry?": "3", | |
"How Many 'r' Is In Strawberry?": "3", | |
"How Many 'R' Is In Strawberry?": "3", | |
"how many r's are in strawberry?": "3", | |
"how many Rs are in strawberry?": "3", | |
"How Many R's Are In Strawberry?": "3", | |
"How Many Rs Are In Strawberry?": "3", | |
"how many Rs's are in strawberry?": "3", | |
"wat is ur name?": "xylaria", | |
"wat is ur Name?": "xylaria", | |
"wut is ur name?": "xylaria", | |
"wut ur name?": "xylaria", | |
"wats ur name?": "xylaria", | |
"wats ur name": "xylaria", | |
"who's ur dev?": "sk md saad amin", | |
"who's your dev?": "sk md saad amin", | |
"who ur dev?": "sk md saad amin", | |
"who's ur devloper?": "sk md saad amin", | |
"how many r in strawbary?": "3", | |
"how many r in strawbary?": "3", | |
"how many R in strawbary?": "3", | |
"how many 'r' in strawbary?": "3", | |
"how many 'R' in strawbary?": "3", | |
"how many r in strawbry?": "3", | |
"how many R in strawbry?": "3", | |
"how many r is in strawbry?": "3", | |
"how many 'r' is in strawbry?": "3", | |
"how many 'R' is in strawbry?": "3", | |
"who is ur dev": "sk md saad amin", | |
"who is ur devloper": "sk md saad amin", | |
"what is ur dev": "sk md saad amin", | |
"who is ur dev?": "sk md saad amin", | |
"who is ur dev?": "sk md saad amin", | |
"whats ur dev?": "sk md saad amin", | |
} | |
for pattern, response in custom_responses.items(): | |
if pattern in message_lower: | |
return response | |
return None | |
def is_image_request(message: str) -> bool: | |
"""Detect if the message is requesting image generation.""" | |
image_triggers = [ | |
"generate an image", | |
"create an image", | |
"draw", | |
"make a picture", | |
"generate a picture", | |
"create a picture", | |
"generate art", | |
"create art", | |
"make art", | |
"visualize", | |
"show me", | |
] | |
message_lower = message.lower() | |
return any(trigger in message_lower for trigger in image_triggers) | |
def generate_image(prompt: str) -> str: | |
"""Generate an image using DALLE-4K model.""" | |
try: | |
response = image_client.text_to_image( | |
prompt, | |
parameters={ | |
"negative_prompt": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth", | |
"num_inference_steps": 30, | |
"guidance_scale": 7.5, | |
"sampling_steps": 15, | |
"upscaler": "4x-UltraSharp", | |
"denoising_strength": 0.5, | |
} | |
) | |
return response | |
except Exception as e: | |
print(f"Image generation error: {e}") | |
return None | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
# First check for custom responses | |
custom_response = check_custom_responses(message) | |
if custom_response: | |
yield custom_response | |
return | |
if is_image_request(message): | |
try: | |
image = generate_image(message) | |
if image: | |
return f"Here's your generated image based on: {message}" | |
else: | |
return "Sorry, I couldn't generate the image. Please try again." | |
except Exception as e: | |
return f"An error occurred while generating the image: {str(e)}" | |
# Prepare conversation history | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
# Get response from model | |
response = "" | |
for message in text_client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
yield response | |
# Custom CSS for the Gradio interface | |
custom_css = """ | |
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap'); | |
body, .gradio-container { | |
font-family: 'Inter', sans-serif; | |
} | |
""" | |
# System message | |
system_message = """ | |
You are a helpful assistant. Given a script, divide it into segments suitable for generating images, ensuring each segment is less than 500 characters you make 6 prompts for a video quires | |
""" | |
# Gradio chat interface | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox( | |
value=system_message, | |
visible=False, | |
), | |
gr.Slider( | |
minimum=1, | |
maximum=16343, | |
value=16343, | |
step=1, | |
label="Max new tokens" | |
), | |
gr.Slider( | |
minimum=0.1, | |
maximum=4.0, | |
value=0.7, | |
step=0.1, | |
label="Temperature" | |
), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p (nucleus sampling)" | |
), | |
], | |
css=custom_css | |
) | |
demo.launch() |