File size: 2,880 Bytes
1650be4
 
f902fc6
1650be4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f902fc6
 
 
 
 
 
 
 
1650be4
 
 
 
 
 
 
 
 
 
 
 
f902fc6
 
1650be4
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import spaces
import gradio as gr
from joycaption import stream_chat_mod, get_text_model, change_text_model, get_repo_gguf

JC_TITLE_MD = "<h1><center>JoyCaption Pre-Alpha Mod</center></h1>"
JC_DESC_MD = """This space is mod of [fancyfeast/joy-caption-pre-alpha](https://huggingface.co/spaces/fancyfeast/joy-caption-pre-alpha),

 [Wi-zz/joy-caption-pre-alpha](https://huggingface.co/Wi-zz/joy-caption-pre-alpha)"""

css = """

.info {text-align:center; display:inline-flex; align-items:center !important}

"""

with gr.Blocks() as demo:
    gr.HTML(JC_TITLE_MD)
    with gr.Row():
        with gr.Column():
            with gr.Group():
                jc_input_image = gr.Image(type="pil", label="Input Image", sources=["upload", "clipboard"], height=384)
                with gr.Accordion("Advanced", open=False):
                    with gr.Row():
                        jc_text_model = gr.Dropdown(label="LLM Model", info="You can enter a huggingface model repo_id to want to use.",
                                                    choices=get_text_model(), value=get_text_model()[0],
                                                    allow_custom_value=True, interactive=True, min_width=320)
                        jc_gguf = gr.Dropdown(label=f"GGUF Filename", choices=[], value="",
                                              allow_custom_value=True, min_width=320, visible=False)
                        jc_nf4 = gr.Checkbox(label="Use NF4 quantization", value=True)
                        jc_text_model_button = gr.Button("Load Model", variant="secondary")
                    jc_use_inference_client = gr.Checkbox(label="Use Inference Client", value=False, visible=False)
                    with gr.Row():
                        jc_tokens = gr.Slider(minimum=1, maximum=4096, value=300, step=1, label="Max tokens")
                        jc_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.5, step=0.1, label="Temperature")
                        jc_topk = gr.Slider(minimum=0, maximum=100, value=40, step=10, label="Top-k")
            jc_run_button = gr.Button("Caption", variant="primary")

        with gr.Column():
            jc_output_caption = gr.Textbox(label="Caption", show_copy_button=True)
    gr.Markdown(JC_DESC_MD, elem_classes="info")

    jc_run_button.click(fn=stream_chat_mod, inputs=[jc_input_image, jc_tokens, jc_topk, jc_temperature], outputs=[jc_output_caption])
    jc_text_model_button.click(change_text_model, [jc_text_model, jc_use_inference_client, jc_gguf, jc_nf4], [jc_text_model], show_api=False)
    #jc_text_model.change(get_repo_gguf, [jc_text_model], [jc_gguf], show_api=False)
    jc_use_inference_client.change(change_text_model, [jc_text_model, jc_use_inference_client], [jc_text_model], show_api=False)

if __name__ == "__main__":
    demo.queue()
    demo.launch()