File size: 5,939 Bytes
de6cf94
 
 
 
 
 
 
 
 
 
 
89b2b66
de6cf94
 
 
 
 
 
 
 
 
 
 
 
89b2b66
 
de6cf94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89b2b66
 
 
 
 
 
 
 
de6cf94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89b2b66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
de6cf94
 
 
89b2b66
de6cf94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89b2b66
de6cf94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89b2b66
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import gradio as gr
import os
from groq import Groq

# Initialize Groq client
api_key = os.getenv("GROQ_API_KEY")
client = Groq(api_key=api_key)

# Initialize conversation history
conversation_history = []

def chat_with_bot_stream(user_input, temperature, top_p):
    global conversation_history
    conversation_history.append({"role": "user", "content": user_input})
    
    if len(conversation_history) == 1:
        conversation_history.insert(0, {
            "role": "system",
            "content": "You are an expert in storyboarding. Provide structured and insightful responses to queries about creating and refining storyboards."
        })
    
    completion = client.chat.completions.create(
        model="llama3-70b-8192",
        messages=conversation_history,
        temperature=temperature,
        top_p=top_p,
        max_tokens=1024,
        stream=True,
        stop=None,
    )
    
    response_content = ""
    for chunk in completion:
        response_content += chunk.choices[0].delta.content or ""
    
    conversation_history.append({"role": "assistant", "content": response_content})
    
    return [(msg["content"] if msg["role"] == "user" else None, 
             msg["content"] if msg["role"] == "assistant" else None) 
            for msg in conversation_history]

# Function to generate a storyboard
def generate_storyboard(scenario, temperature, top_p):
    if not scenario.strip():
        return "Please provide a scenario to generate the storyboard."
    
    messages = [
        {"role": "system", "content": "You are an AI storyteller. Generate a storyboard in a structured table with six scenes."},
        {"role": "user", "content": f"Generate a 6-scene storyboard for: {scenario}"}
    ]
    
    completion = client.chat.completions.create(
        model="llama3-70b-8192",
        messages=messages,
        temperature=temperature,
        top_p=top_p,
        max_tokens=1024,
        stream=False,
        stop=None,
    )
    return completion.choices[0].message.content

TITLE = """
<style>
h1 { text-align: center; font-size: 24px; margin-bottom: 10px; }
</style>
<h1>๐Ÿ“– Storyboard Assistant</h1>
"""

example_scenarios = [
    "A futuristic cityscape under AI governance.",
    "A detective solving a mystery in a cyberpunk world.",
    "A young explorer discovering an ancient civilization.",
    "A spaceship crew encountering an unknown planet.",
    "A medieval knight navigating political intrigue."
]

example_questions = [
    "How do I create a compelling storyboard?",
    "What are the key elements of a good visual story?",
    "How can AI help with storyboarding?",
    "What are common mistakes in storyboarding?",
    "How do I structure a scene effectively?"
]

temperature_component = gr.Slider(
    minimum=0,
    maximum=1,
    value=1,
    step=0.01,
    label="Temperature",
    info="Controls randomness. Lower values make responses more deterministic."
)

top_p_component = gr.Slider(
    minimum=0,
    maximum=1,
    value=1,
    step=0.01,
    label="Top-P Sampling",
    info="Limits token selection to tokens with a cumulative probability up to P."
)

with gr.Blocks(theme=gr.themes.Glass(primary_hue="violet", secondary_hue="emerald", neutral_hue="stone")) as demo:
    with gr.Tabs():
        with gr.TabItem("๐Ÿ’ฌChat"):
            gr.HTML(TITLE)
            chatbot = gr.Chatbot(label="Storyboard Chatbot")
            with gr.Row():
                user_input = gr.Textbox(
                    label="Your Message",
                    placeholder="Type your question here...",
                    lines=1
                )
                send_button = gr.Button("โœ‹Ask Question")
            
            with gr.Accordion("๐Ÿ› ๏ธ Customize Chatbot", open=False):
                temperature_component.render()
                top_p_component.render()
            
            with gr.Accordion("๐Ÿงช Example Questions", open=False):
                example_radio = gr.Radio(
                    choices=example_questions,
                    label="Example Queries",
                    info="Select an example question or enter your own."
                )
                example_radio.change(
                    fn=lambda q: q if q else "No question selected.",
                    inputs=[example_radio],
                    outputs=[user_input]
                )
            
            # Chatbot functionality
            send_button.click(
                fn=chat_with_bot_stream,
                inputs=[user_input, temperature_component, top_p_component],
                outputs=chatbot,
                queue=True
            ).then(
                fn=lambda: "",
                inputs=None,
                outputs=user_input
            )
        
        with gr.TabItem("๐Ÿ“– Generate Storyboard"):
            gr.Markdown("## Generate a Storyboard")
            scenario_input = gr.Textbox(label="Enter your scenario")
            example_radio = gr.Radio(
                choices=example_scenarios,
                label="Example Scenarios",
                info="Select an example scenario or enter your own."
            )
            generate_btn = gr.Button("Generate Storyboard")
            storyboard_output = gr.Textbox(label="Generated Storyboard", interactive=False)
            
            with gr.Accordion("๐Ÿ› ๏ธ Customize Storyboard", open=False):
                temperature_component.render()
                top_p_component.render()
            
            generate_btn.click(
                generate_storyboard, 
                inputs=[scenario_input, temperature_component, top_p_component], 
                outputs=storyboard_output
            )
            
            example_radio.change(
                fn=lambda scenario: scenario if scenario else "No scenario selected.",
                inputs=[example_radio],
                outputs=[scenario_input]
            )

demo.launch()