Phone_C / app.py
gaur3009's picture
Update app.py
0f68170 verified
raw
history blame
5.37 kB
import gradio as gr
import requests
from PIL import Image
from io import BytesIO
from tqdm import tqdm
import time
repo = "artificialguybr/TshirtDesignRedmond-V2"
# Generate design based on prompts
def infer(color_prompt, phone_type_prompt, design_prompt):
prompt = (
f"A single vertical {color_prompt} colored {phone_type_prompt} back cover featuring a bold {design_prompt} design on the front, hanging on the plain wall. The soft light and shadows, creating a striking contrast against the minimal background, evoking modern sophistication."
)
full_prompt = f"{prompt}"
print("Generating image with prompt:", full_prompt)
api_url = f"https://api-inference.huggingface.co/models/{repo}"
headers = {}
payload = {
"inputs": full_prompt,
"parameters": {
"negative_prompt": "(worst quality, low quality, lowres, oversaturated, grayscale, bad photo:1.4)",
"num_inference_steps": 30,
"scheduler": "DPMSolverMultistepScheduler",
},
}
error_count = 0
pbar = tqdm(total=None, desc="Loading model")
while True:
response = requests.post(api_url, headers=headers, json=payload)
if response.status_code == 200:
speech_text = f"Your image is generated with the color {color_prompt}, mobile type {phone_type_prompt}, and design {design_prompt}."
return Image.open(BytesIO(response.content)), speech_text
elif response.status_code == 503:
time.sleep(1)
pbar.update(1)
elif response.status_code == 500 and error_count < 5:
time.sleep(1)
error_count += 1
else:
raise Exception(f"API Error: {response.status_code}")
# Custom CSS for Apple-like design
custom_css = """
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
margin: 0;
padding: 0;
background: linear-gradient(135deg, #f7f8fa, #dfe2e6);
color: #333;
}
.navbar {
background-color: #f8f9fa;
padding: 10px 20px;
display: flex;
justify-content: space-between;
align-items: center;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.navbar a {
color: #0071e3;
text-decoration: none;
font-weight: 500;
margin: 0 15px;
transition: color 0.3s;
}
.navbar a:hover {
color: #0056b3;
}
.avatar-container {
text-align: center;
margin-bottom: 20px;
position: relative;
animation: head-move 3s infinite;
}
.avatar-img {
width: 150px;
height: 150px;
border-radius: 50%;
animation: blink 3s infinite, scale 5s infinite;
}
@keyframes blink {
0%, 100% { opacity: 1; }
50% { opacity: 0.7; }
}
@keyframes scale {
0%, 100% { transform: scale(1); }
50% { transform: scale(1.05); }
}
"""
# JavaScript for text-to-speech and animations
custom_js = """
<script>
document.addEventListener('DOMContentLoaded', function () {
// Add navigation bar
const navbar = document.createElement('div');
navbar.classList.add('navbar');
navbar.innerHTML = `
<a href="#">Home</a>
<a href="#">Design</a>
<a href="#">About</a>
`;
document.body.prepend(navbar);
// Add AI assistant avatar and greeting
const avatarContainer = document.createElement('div');
avatarContainer.classList.add('avatar-container');
const avatarImg = document.createElement('img');
avatarImg.src = 'https://th.bing.com/th/id/OIP.zeeoSeLcH19kuQ1ABNOGCwHaHU?rs=1&pid=ImgDetMain';
avatarImg.alt = "AI Assistant Avatar";
avatarImg.classList.add('avatar-img');
avatarContainer.appendChild(avatarImg);
const greeting = document.createElement('h2');
const currentHour = new Date().getHours();
greeting.textContent = currentHour < 12 ? "Good Morning!" : currentHour < 18 ? "Good Afternoon!" : "Good Evening!";
avatarContainer.appendChild(greeting);
document.body.prepend(avatarContainer);
// Text-to-speech functionality
function speak(text) {
const synth = window.speechSynthesis;
const utterance = new SpeechSynthesisUtterance(text);
synth.speak(utterance);
}
document.addEventListener('gradio_event:output_update', (event) => {
const outputText = event.detail?.text || '';
if (outputText) {
speak(outputText);
}
});
});
</script>
"""
# Gradio interface
with gr.Blocks(css=custom_css) as interface:
gr.HTML(custom_js)
gr.Markdown("# **AI Phone Cover Designer**")
with gr.Row():
with gr.Column(scale=1):
color_prompt = gr.Textbox(label="Color", placeholder="E.g., Red")
phone_type_prompt = gr.Textbox(label="Mobile type", placeholder="E.g., iPhone, Samsung")
design_prompt = gr.Textbox(label="Design Details", placeholder="E.g., Bold stripes with geometric patterns")
chatbot = gr.Chatbot()
generate_button = gr.Button("Generate Design")
with gr.Column(scale=1):
output_image = gr.Image(label="Generated Design")
output_message = gr.Textbox(label="AI Assistant Message", interactive=False)
generate_button.click(
infer,
inputs=[color_prompt, phone_type_prompt, design_prompt],
outputs=[output_image, output_message],
)
# Launch the app
interface.launch(debug=True)