File size: 2,232 Bytes
edf2a04
 
 
831bb79
ebe55ae
dc647b3
 
651516a
ff5d575
fb4f3f2
 
5becc54
fb4f3f2
b768e87
fb4f3f2
 
b768e87
 
831bb79
fb4f3f2
 
831bb79
 
b0e1b1f
b768e87
 
 
 
b0e1b1f
fb4f3f2
 
b768e87
 
 
fb4f3f2
 
831bb79
fb4f3f2
 
831bb79
fb4f3f2
831bb79
b768e87
831bb79
f1e4e34
534795e
b0e1b1f
fb4f3f2
 
 
 
534795e
 
 
b768e87
 
831bb79
b768e87
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import os
import requests
from gradio import Error

SYSTEM_PROMPT = "As an LLM, your job is to generate detailed prompts that start with generate the image, for image generation models based on user input. Be descriptive and specific, but also make sure your prompts are clear and concise."
TITLE = "Image Prompter"
EXAMPLE_INPUT = "A Man Riding A Horse in Space"

HF_TOKEN = os.getenv("HF_TOKEN")
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}

def build_input_prompt(message, chatbot, system_prompt):
    input_prompt = system_prompt + "\n\n" + message
    return input_prompt

def post_request(model_url, payload):
    response = requests.post(model_url, headers=HEADERS, json=payload)
    response.raise_for_status()
    return response.json()

def predict(model_url, message, system_prompt):
    input_prompt = build_input_prompt(message, [], system_prompt)
    data = {
        "prompt": input_prompt,
        "max_new_tokens": 256,
        "temperature": 0.7,
        "top_p": 0.95
    }

    try:
        response_data = post_request(model_url, data)
        bot_message = response_data["generated_text"]
        return bot_message
    except requests.HTTPError as e:
        error_msg = f"Request failed with status code {e.response.status_code}"
        raise Error(error_msg)
    except json.JSONDecodeError as e:
        error_msg = f"Failed to decode response as JSON: {str(e)}"
        raise Error(error_msg)

def test_preview_chatbot(message):
    model_url = "https://huggingface.co/chat/models/llama/llama-3b"
    response = predict(model_url, message, SYSTEM_PROMPT)
    return response

welcome_preview_message = f"""
Expand your imagination and broaden your horizons with LLM. Welcome to **{TITLE}**!:\nThis is a chatbot that can generate detailed prompts for image generation models based on simple and short user input.\nSay something like: 
"{EXAMPLE_INPUT}"
"""

chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)

demo = gr.Interface(
    fn=test_preview_chatbot, 
    inputs="text", 
    outputs="text", 
    title=TITLE, 
    description="Image Prompter"
)

demo.launch()