mrbeliever commited on
Commit
3901da8
Β·
verified Β·
1 Parent(s): 352c3f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -26
app.py CHANGED
@@ -14,15 +14,6 @@ processors = {
14
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
15
  }
16
 
17
- DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
18
-
19
- kwargs = {}
20
- kwargs['torch_dtype'] = torch.bfloat16
21
-
22
- user_prompt = '<|user|>\n'
23
- assistant_prompt = '<|assistant|>\n'
24
- prompt_suffix = "<|end|>\n"
25
-
26
  default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
27
 
28
  @spaces.GPU
@@ -30,7 +21,7 @@ def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-
30
  model = models[model_id]
31
  processor = processors[model_id]
32
 
33
- prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
34
  image = Image.fromarray(image).convert("RGB")
35
 
36
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
@@ -45,29 +36,42 @@ def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-
45
  return response
46
 
47
  css = """
48
- #output {
49
- height: 500px;
50
- overflow: auto;
51
- border: 1px solid #ccc;
 
52
  }
53
- #model_selector, #text_input {
54
- display: none !important;
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
  """
57
 
58
  with gr.Blocks(css=css) as demo:
59
- gr.Markdown(DESCRIPTION)
60
- with gr.Tab(label="Phi-3.5 Input"):
 
61
  with gr.Row():
62
- with gr.Column():
63
- input_img = gr.Image(label="Input Picture")
64
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
65
- text_input = gr.Textbox(label="Question", value=default_question, visible=False)
66
- submit_btn = gr.Button(value="Submit")
67
- with gr.Column():
68
- output_text = gr.Textbox(label="Output Text")
69
 
70
- submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
 
71
 
72
  demo.queue(api_open=False)
73
  demo.launch(debug=True, show_api=False)
 
14
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
15
  }
16
 
 
 
 
 
 
 
 
 
 
17
  default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
18
 
19
  @spaces.GPU
 
21
  model = models[model_id]
22
  processor = processors[model_id]
23
 
24
+ prompt = f"<|user|>\n<|image_1|>\n{text_input}<|end|>\n<|assistant|>\n"
25
  image = Image.fromarray(image).convert("RGB")
26
 
27
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
 
36
  return response
37
 
38
  css = """
39
+ .container {
40
+ border: 2px solid #333;
41
+ padding: 20px;
42
+ max-width: 400px;
43
+ margin: auto;
44
  }
45
+ #input_img, #output_text {
46
+ border: 2px solid #333;
47
+ width: 100%;
48
+ height: 300px;
49
+ object-fit: cover;
50
+ }
51
+ .gr-button {
52
+ width: 100%;
53
+ margin-top: 10px;
54
+ }
55
+ #copy_button {
56
+ float: right;
57
+ margin-top: -30px;
58
+ cursor: pointer;
59
  }
60
  """
61
 
62
  with gr.Blocks(css=css) as demo:
63
+ with gr.Box(elem_id="container"):
64
+ input_img = gr.Image(label="Input Picture", elem_id="input_img", type="pil")
65
+ generate_button = gr.Button("Generate Prompt", elem_id="generate_button")
66
  with gr.Row():
67
+ output_text = gr.Textbox(label="Output Text", elem_id="output_text", interactive=False)
68
+ copy_button = gr.Button("Copy", elem_id="copy_button")
69
+
70
+ # Copy functionality
71
+ copy_button.click(fn=lambda text: text, inputs=output_text, outputs=None)
 
 
72
 
73
+ # Generate functionality
74
+ generate_button.click(run_example, [input_img, default_question], [output_text])
75
 
76
  demo.queue(api_open=False)
77
  demo.launch(debug=True, show_api=False)