mrbeliever commited on
Commit
1f684a1
Β·
verified Β·
1 Parent(s): 27d875e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -42
app.py CHANGED
@@ -14,12 +14,17 @@ processors = {
14
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
15
  }
16
 
17
- default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
 
 
 
18
 
19
  user_prompt = '<|user|>\n'
20
  assistant_prompt = '<|assistant|>\n'
21
  prompt_suffix = "<|end|>\n"
22
 
 
 
23
  @spaces.GPU
24
  def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
25
  model = models[model_id]
@@ -40,53 +45,29 @@ def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-
40
  return response
41
 
42
  css = """
43
- #container {
44
- border: 2px solid #333;
45
- padding: 20px;
46
- max-width: 400px;
47
- margin: auto;
48
  }
49
- #input_img, #output_text {
50
- border: 1px solid #444;
51
- border-radius: 5px;
52
- }
53
- #input_img {
54
- height: 200px;
55
- overflow: hidden;
56
- }
57
- #output_text {
58
- height: 150px;
59
- overflow-y: auto;
60
- }
61
- .copy-btn {
62
- display: inline-block;
63
- padding: 5px 10px;
64
- font-size: 14px;
65
- background-color: #333;
66
- color: #fff;
67
- border: none;
68
- border-radius: 3px;
69
- cursor: pointer;
70
- margin-top: 10px;
71
  }
72
  """
73
 
74
  with gr.Blocks(css=css) as demo:
75
- with gr.Box(elem_id="container"):
76
- input_img = gr.Image(label="Input Picture", elem_id="input_img")
77
- text_input = gr.Textbox(value=default_question, visible=False)
78
- submit_btn = gr.Button(value="Generate")
79
- output_text = gr.Textbox(label="Output Text", elem_id="output_text")
80
-
81
- submit_btn.click(run_example, [input_img, text_input], [output_text])
82
-
83
- def copy_to_clipboard(content):
84
- import pyperclip
85
- pyperclip.copy(content)
86
- return "Text copied!"
87
 
88
- copy_button = gr.Button("Copy Text", elem_id="copy-btn")
89
- copy_button.click(copy_to_clipboard, inputs=output_text, outputs=None)
90
 
91
  demo.queue(api_open=False)
92
  demo.launch(debug=True, show_api=False)
 
14
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
15
  }
16
 
17
+ DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
18
+
19
+ kwargs = {}
20
+ kwargs['torch_dtype'] = torch.bfloat16
21
 
22
  user_prompt = '<|user|>\n'
23
  assistant_prompt = '<|assistant|>\n'
24
  prompt_suffix = "<|end|>\n"
25
 
26
+ default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
27
+
28
  @spaces.GPU
29
  def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
30
  model = models[model_id]
 
45
  return response
46
 
47
  css = """
48
+ #output {
49
+ height: 500px;
50
+ overflow: auto;
51
+ border: 1px solid #ccc;
 
52
  }
53
+ #model_selector, #text_input {
54
+ display: none !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  """
57
 
58
  with gr.Blocks(css=css) as demo:
59
+ gr.Markdown(DESCRIPTION)
60
+ with gr.Tab(label="Phi-3.5 Input"):
61
+ with gr.Row():
62
+ with gr.Column():
63
+ input_img = gr.Image(label="Input Picture")
64
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
65
+ text_input = gr.Textbox(label="Question", value=default_question, visible=False)
66
+ submit_btn = gr.Button(value="Submit")
67
+ with gr.Column():
68
+ output_text = gr.Textbox(label="Output Text")
 
 
69
 
70
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
 
71
 
72
  demo.queue(api_open=False)
73
  demo.launch(debug=True, show_api=False)