MegaTronX commited on
Commit
d6f7262
·
verified ·
1 Parent(s): c4511bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -94
app.py CHANGED
@@ -1,94 +1,98 @@
1
- import os
2
- if os.environ.get("SPACES_ZERO_GPU") is not None: import spaces
3
- import gradio as gr
4
- from joycaption import stream_chat_mod, get_text_model, change_text_model, get_repo_gguf
5
-
6
- JC_TITLE_MD = "<h1><center>JoyCaption Alpha Two Mod</center></h1>"
7
- JC_DESC_MD = """This space is mod of [fancyfeast/joy-caption-alpha-two](https://huggingface.co/spaces/fancyfeast/joy-caption-alpha-two),
8
- [Wi-zz/joy-caption-pre-alpha](https://huggingface.co/Wi-zz/joy-caption-pre-alpha).
9
- Thanks to [dominic1021](https://huggingface.co/dominic1021), [IceHibiki](https://huggingface.co/IceHibiki)."""
10
-
11
- css = """
12
- .info {text-align:center; !important}
13
- """
14
-
15
- with gr.Blocks(fill_width=True, css=css, delete_cache=(60, 3600)) as demo:
16
- gr.HTML(JC_TITLE_MD)
17
- with gr.Row():
18
- with gr.Column():
19
- with gr.Group():
20
- jc_input_image = gr.Image(type="pil", label="Input Image", sources=["upload", "clipboard"], height=384)
21
- with gr.Accordion("Options", open=False):
22
- with gr.Row():
23
- jc_caption_type = gr.Dropdown(
24
- choices=["Descriptive", "Descriptive (Informal)", "Training Prompt", "MidJourney", "Booru tag list", "Booru-like tag list", "Art Critic", "Product Listing", "Social Media Post"],
25
- label="Caption Type",
26
- value="Descriptive",
27
- )
28
- jc_caption_length = gr.Dropdown(
29
- choices=["any", "very short", "short", "medium-length", "long", "very long"] +
30
- [str(i) for i in range(20, 261, 10)],
31
- label="Caption Length",
32
- value="long",
33
- )
34
- jc_extra_options = gr.CheckboxGroup(
35
- choices=[
36
- "If there is a person/character in the image you must refer to them as {name}.",
37
- "Do NOT include information about people/characters that cannot be changed (like ethnicity, gender, etc), but do still include changeable attributes (like hair style).",
38
- "Include information about lighting.",
39
- "Include information about camera angle.",
40
- "Include information about whether there is a watermark or not.",
41
- "Include information about whether there are JPEG artifacts or not.",
42
- "If it is a photo you MUST include information about what camera was likely used and details such as aperture, shutter speed, ISO, etc.",
43
- "Do NOT include anything sexual; keep it PG.",
44
- "Do NOT mention the image's resolution.",
45
- "You MUST include information about the subjective aesthetic quality of the image from low to very high.",
46
- "Include information on the image's composition style, such as leading lines, rule of thirds, or symmetry.",
47
- "Do NOT mention any text that is in the image.",
48
- "Specify the depth of field and whether the background is in focus or blurred.",
49
- "If applicable, mention the likely use of artificial or natural lighting sources.",
50
- "Do NOT use any ambiguous language.",
51
- "Include whether the image is sfw, suggestive, or nsfw.",
52
- "ONLY describe the most important elements of the image."
53
- ],
54
- label="Extra Options"
55
- )
56
- with gr.Row():
57
- jc_name_input = gr.Textbox(label="Person/Character Name (if applicable)")
58
- gr.Markdown("**Note:** Name input is only used if an Extra Option is selected that requires it.")
59
- jc_custom_prompt = gr.Textbox(label="Custom Prompt (optional, will override all other settings)")
60
- gr.Markdown("**Note:** Alpha Two is not a general instruction follower and will not follow prompts outside its training data well. Use this feature with caution.")
61
-
62
- with gr.Accordion("Advanced", open=False):
63
- with gr.Row():
64
- jc_text_model = gr.Dropdown(label="LLM Model", info="You can enter a huggingface model repo_id to want to use.",
65
- choices=get_text_model(), value=get_text_model()[0],
66
- allow_custom_value=True, interactive=True, min_width=320)
67
- jc_gguf = gr.Dropdown(label=f"GGUF Filename", choices=[], value="",
68
- allow_custom_value=True, min_width=320, visible=False)
69
- jc_nf4 = gr.Checkbox(label="Use NF4 quantization", value=True)
70
- jc_lora = gr.Checkbox(label="Use Custom VLM", info="Llama 3 BF16 only", value=True)
71
- jc_text_model_button = gr.Button("Load Model", variant="secondary", visible=False)
72
- jc_use_inference_client = gr.Checkbox(label="Use Inference Client", value=False, visible=False)
73
- with gr.Row():
74
- jc_tokens = gr.Slider(minimum=1, maximum=4096, value=300, step=1, label="Max tokens")
75
- jc_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.6, step=0.1, label="Temperature")
76
- jc_topp = gr.Slider(minimum=0, maximum=2.0, value=0.9, step=0.01, label="Top-P")
77
- jc_run_button = gr.Button("Caption", variant="primary")
78
- with gr.Column():
79
- jc_output_prompt = gr.Textbox(label="Prompt that was used")
80
- jc_output_caption = gr.Textbox(label="Caption", show_copy_button=True)
81
- gr.Markdown(JC_DESC_MD, elem_classes="info")
82
- gr.LoginButton()
83
- gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
84
-
85
- jc_run_button.click(fn=stream_chat_mod, inputs=[jc_input_image, jc_caption_type, jc_caption_length, jc_extra_options, jc_name_input, jc_custom_prompt,
86
- jc_tokens, jc_topp, jc_temperature, jc_text_model], outputs=[jc_output_prompt, jc_output_caption])
87
- jc_text_model.change(change_text_model, [jc_text_model, jc_use_inference_client, jc_gguf, jc_nf4, jc_lora], [jc_text_model], show_api=False)
88
- #jc_text_model_button.click(change_text_model, [jc_text_model, jc_use_inference_client, jc_gguf, jc_nf4], [jc_text_model], show_api=False)
89
- #jc_text_model.change(get_repo_gguf, [jc_text_model], [jc_gguf], show_api=False)
90
- #jc_use_inference_client.change(change_text_model, [jc_text_model, jc_use_inference_client], [jc_text_model], show_api=False)
91
-
92
- if __name__ == "__main__":
93
- #demo.queue()
94
- demo.launch()
 
 
 
 
 
1
+ import os
2
+ if os.environ.get("SPACES_ZERO_GPU") is not None: import spaces
3
+ import gradio as gr
4
+ from joycaption import stream_chat_mod, get_text_model, change_text_model, get_repo_gguf
5
+
6
+ JC_TITLE_MD = "<h1><center>JoyCaption Alpha Two Mod</center></h1>"
7
+ JC_DESC_MD = """This space is mod of [fancyfeast/joy-caption-alpha-two](https://huggingface.co/spaces/fancyfeast/joy-caption-alpha-two),
8
+ [Wi-zz/joy-caption-pre-alpha](https://huggingface.co/Wi-zz/joy-caption-pre-alpha).
9
+ Thanks to [dominic1021](https://huggingface.co/dominic1021), [IceHibiki](https://huggingface.co/IceHibiki)."""
10
+
11
+ css = """
12
+ .info {text-align:center; !important}
13
+ """
14
+
15
+ with gr.Blocks(fill_width=True, css=css, delete_cache=(60, 3600)) as demo:
16
+ gr.HTML(JC_TITLE_MD)
17
+ with gr.Row():
18
+ with gr.Column():
19
+ with gr.Group():
20
+ jc_input_image = gr.Image(type="pil", label="Input Image", sources=["upload", "clipboard"], height=384)
21
+ with gr.Accordion("Options", open=False):
22
+ with gr.Row():
23
+ jc_caption_type = gr.Dropdown(
24
+ choices=["Descriptive", "Descriptive (Informal)", "Training Prompt", "MidJourney", "Booru tag list", "Booru-like tag list", "Art Critic", "Product Listing", "Social Media Post"],
25
+ label="Caption Type",
26
+ value="Descriptive",
27
+ )
28
+ jc_caption_length = gr.Dropdown(
29
+ choices=["any", "very short", "short", "medium-length", "long", "very long"] +
30
+ [str(i) for i in range(20, 261, 10)],
31
+ label="Caption Length",
32
+ value="long",
33
+ )
34
+ jc_extra_options = gr.CheckboxGroup(
35
+ choices=[
36
+ "Include sexual references. Use terms like pussy and clit instead of genitals or vulva.",
37
+ "Do NOT mention any logos that are in the image."
38
+ '''
39
+ "If there is a person/character in the image you must refer to them as {name}.",
40
+ "Do NOT include information about people/characters that cannot be changed (like ethnicity, gender, etc), but do still include changeable attributes (like hair style).",
41
+ "Include information about lighting.",
42
+ "Include information about camera angle.",
43
+ "Include information about whether there is a watermark or not.",
44
+ "Include information about whether there are JPEG artifacts or not.",
45
+ "If it is a photo you MUST include information about what camera was likely used and details such as aperture, shutter speed, ISO, etc.",
46
+ "Do NOT include anything sexual; keep it PG.",
47
+ "Do NOT mention the image's resolution.",
48
+ "You MUST include information about the subjective aesthetic quality of the image from low to very high.",
49
+ "Include information on the image's composition style, such as leading lines, rule of thirds, or symmetry.",
50
+ "Do NOT mention any text that is in the image.",
51
+ "Specify the depth of field and whether the background is in focus or blurred.",
52
+ "If applicable, mention the likely use of artificial or natural lighting sources.",
53
+ "Do NOT use any ambiguous language.",
54
+ "Include whether the image is sfw, suggestive, or nsfw.",
55
+ "ONLY describe the most important elements of the image."
56
+ '''
57
+ ],
58
+ label="Extra Options"
59
+ )
60
+ with gr.Row():
61
+ jc_name_input = gr.Textbox(label="Person/Character Name (if applicable)")
62
+ gr.Markdown("**Note:** Name input is only used if an Extra Option is selected that requires it.")
63
+ jc_custom_prompt = gr.Textbox(label="Custom Prompt (optional, will override all other settings)")
64
+ gr.Markdown("**Note:** Alpha Two is not a general instruction follower and will not follow prompts outside its training data well. Use this feature with caution.")
65
+
66
+ with gr.Accordion("Advanced", open=False):
67
+ with gr.Row():
68
+ jc_text_model = gr.Dropdown(label="LLM Model", info="You can enter a huggingface model repo_id to want to use.",
69
+ choices=get_text_model(), value=get_text_model()[0],
70
+ allow_custom_value=True, interactive=True, min_width=320)
71
+ jc_gguf = gr.Dropdown(label=f"GGUF Filename", choices=[], value="",
72
+ allow_custom_value=True, min_width=320, visible=False)
73
+ jc_nf4 = gr.Checkbox(label="Use NF4 quantization", value=True)
74
+ jc_lora = gr.Checkbox(label="Use Custom VLM", info="Llama 3 BF16 only", value=True)
75
+ jc_text_model_button = gr.Button("Load Model", variant="secondary", visible=False)
76
+ jc_use_inference_client = gr.Checkbox(label="Use Inference Client", value=False, visible=False)
77
+ with gr.Row():
78
+ jc_tokens = gr.Slider(minimum=1, maximum=4096, value=300, step=1, label="Max tokens")
79
+ jc_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.6, step=0.1, label="Temperature")
80
+ jc_topp = gr.Slider(minimum=0, maximum=2.0, value=0.9, step=0.01, label="Top-P")
81
+ jc_run_button = gr.Button("Caption", variant="primary")
82
+ with gr.Column():
83
+ jc_output_prompt = gr.Textbox(label="Prompt that was used")
84
+ jc_output_caption = gr.Textbox(label="Caption", show_copy_button=True)
85
+ gr.Markdown(JC_DESC_MD, elem_classes="info")
86
+ gr.LoginButton()
87
+ gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
88
+
89
+ jc_run_button.click(fn=stream_chat_mod, inputs=[jc_input_image, jc_caption_type, jc_caption_length, jc_extra_options, jc_name_input, jc_custom_prompt,
90
+ jc_tokens, jc_topp, jc_temperature, jc_text_model], outputs=[jc_output_prompt, jc_output_caption])
91
+ jc_text_model.change(change_text_model, [jc_text_model, jc_use_inference_client, jc_gguf, jc_nf4, jc_lora], [jc_text_model], show_api=False)
92
+ #jc_text_model_button.click(change_text_model, [jc_text_model, jc_use_inference_client, jc_gguf, jc_nf4], [jc_text_model], show_api=False)
93
+ #jc_text_model.change(get_repo_gguf, [jc_text_model], [jc_gguf], show_api=False)
94
+ #jc_use_inference_client.change(change_text_model, [jc_text_model, jc_use_inference_client], [jc_text_model], show_api=False)
95
+
96
+ if __name__ == "__main__":
97
+ #demo.queue()
98
+ demo.launch()