File size: 5,366 Bytes
3519dec
 
 
 
 
 
 
 
 
0f68170
23585ff
3519dec
23585ff
 
3519dec
 
 
 
3397572
3519dec
 
 
3397572
3519dec
23585ff
3519dec
 
 
 
 
 
 
 
23585ff
 
3519dec
 
 
 
 
 
 
 
 
04a6d22
0f68170
3519dec
 
0f68170
3519dec
 
0f68170
 
3519dec
0f68170
8df44fc
0f68170
 
 
 
 
 
 
 
 
 
 
 
3519dec
0f68170
 
3519dec
04a6d22
 
 
5a48237
 
3397572
23585ff
04a6d22
 
23585ff
5a48237
04a6d22
23585ff
 
 
 
5a48237
 
 
23585ff
3519dec
 
0f68170
3519dec
 
 
0f68170
 
 
 
 
 
 
 
 
 
 
5a48237
 
04a6d22
23585ff
8f3cb2a
23585ff
5a48237
0f68170
69c49d3
0f68170
 
5a48237
 
0f68170
 
5a48237
 
 
 
 
 
 
0f68170
5a48237
 
3519dec
 
 
 
0f68170
3519dec
 
0f68170
23585ff
 
 
 
 
 
3519dec
23585ff
 
 
3519dec
23585ff
 
 
 
 
3519dec
 
0f68170
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import gradio as gr
import requests
from PIL import Image
from io import BytesIO
from tqdm import tqdm
import time

repo = "artificialguybr/TshirtDesignRedmond-V2"

# Generate design based on prompts
def infer(color_prompt, phone_type_prompt, design_prompt):
    prompt = (
        f"A single vertical {color_prompt} colored {phone_type_prompt} back cover featuring a bold {design_prompt} design on the front, hanging on the plain wall. The soft light and shadows, creating a striking contrast against the minimal background, evoking modern sophistication."
    )
    full_prompt = f"{prompt}"

    print("Generating image with prompt:", full_prompt)
    api_url = f"https://api-inference.huggingface.co/models/{repo}"
    headers = {}
    payload = {
        "inputs": full_prompt,
        "parameters": {
            "negative_prompt": "(worst quality, low quality, lowres, oversaturated, grayscale, bad photo:1.4)",
            "num_inference_steps": 30,
            "scheduler": "DPMSolverMultistepScheduler",
        },
    }

    error_count = 0
    pbar = tqdm(total=None, desc="Loading model")
    while True:
        response = requests.post(api_url, headers=headers, json=payload)
        if response.status_code == 200:
            speech_text = f"Your image is generated with the color {color_prompt}, mobile type {phone_type_prompt}, and design {design_prompt}."
            return Image.open(BytesIO(response.content)), speech_text
        elif response.status_code == 503:
            time.sleep(1)
            pbar.update(1)
        elif response.status_code == 500 and error_count < 5:
            time.sleep(1)
            error_count += 1
        else:
            raise Exception(f"API Error: {response.status_code}")


# Custom CSS for Apple-like design
custom_css = """
body {
    font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
    margin: 0;
    padding: 0;
    background: linear-gradient(135deg, #f7f8fa, #dfe2e6);
    color: #333;
}
.navbar {
    background-color: #f8f9fa;
    padding: 10px 20px;
    display: flex;
    justify-content: space-between;
    align-items: center;
    box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.navbar a {
    color: #0071e3;
    text-decoration: none;
    font-weight: 500;
    margin: 0 15px;
    transition: color 0.3s;
}
.navbar a:hover {
    color: #0056b3;
}
.avatar-container {
    text-align: center;
    margin-bottom: 20px;
    position: relative;
    animation: head-move 3s infinite;
}
.avatar-img {
    width: 150px;
    height: 150px;
    border-radius: 50%;
    animation: blink 3s infinite, scale 5s infinite;
}
@keyframes blink {
    0%, 100% { opacity: 1; }
    50% { opacity: 0.7; }
}
@keyframes scale {
    0%, 100% { transform: scale(1); }
    50% { transform: scale(1.05); }
}
"""

# JavaScript for text-to-speech and animations
custom_js = """
<script>
document.addEventListener('DOMContentLoaded', function () {
    // Add navigation bar
    const navbar = document.createElement('div');
    navbar.classList.add('navbar');
    navbar.innerHTML = `
        <a href="#">Home</a>
        <a href="#">Design</a>
        <a href="#">About</a>
    `;
    document.body.prepend(navbar);

    // Add AI assistant avatar and greeting
    const avatarContainer = document.createElement('div');
    avatarContainer.classList.add('avatar-container');
    const avatarImg = document.createElement('img');
    avatarImg.src = 'https://th.bing.com/th/id/OIP.zeeoSeLcH19kuQ1ABNOGCwHaHU?rs=1&pid=ImgDetMain';
    avatarImg.alt = "AI Assistant Avatar";
    avatarImg.classList.add('avatar-img');
    avatarContainer.appendChild(avatarImg);
    const greeting = document.createElement('h2');
    const currentHour = new Date().getHours();
    greeting.textContent = currentHour < 12 ? "Good Morning!" : currentHour < 18 ? "Good Afternoon!" : "Good Evening!";
    avatarContainer.appendChild(greeting);
    document.body.prepend(avatarContainer);

    // Text-to-speech functionality
    function speak(text) {
        const synth = window.speechSynthesis;
        const utterance = new SpeechSynthesisUtterance(text);
        synth.speak(utterance);
    }
    document.addEventListener('gradio_event:output_update', (event) => {
        const outputText = event.detail?.text || '';
        if (outputText) {
            speak(outputText);
        }
    });
});
</script>
"""

# Gradio interface
with gr.Blocks(css=custom_css) as interface:
    gr.HTML(custom_js)
    gr.Markdown("# **AI Phone Cover Designer**")
    with gr.Row():
        with gr.Column(scale=1):
            color_prompt = gr.Textbox(label="Color", placeholder="E.g., Red")
            phone_type_prompt = gr.Textbox(label="Mobile type", placeholder="E.g., iPhone, Samsung")
            design_prompt = gr.Textbox(label="Design Details", placeholder="E.g., Bold stripes with geometric patterns")
            chatbot = gr.Chatbot()
            generate_button = gr.Button("Generate Design")
        with gr.Column(scale=1):
            output_image = gr.Image(label="Generated Design")
            output_message = gr.Textbox(label="AI Assistant Message", interactive=False)

    generate_button.click(
        infer,
        inputs=[color_prompt, phone_type_prompt, design_prompt],
        outputs=[output_image, output_message],
    )

# Launch the app
interface.launch(debug=True)