mrbeliever commited on
Commit
2c8fc65
Β·
verified Β·
1 Parent(s): 190ad42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -20
app.py CHANGED
@@ -1,36 +1,51 @@
1
  import gradio as gr
2
- import spaces
3
  from transformers import AutoModelForCausalLM, AutoProcessor
4
  import torch
5
  from PIL import Image
6
 
7
- # Load model and processor
8
  models = {
9
- "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
 
 
 
 
 
10
  }
11
 
12
  processors = {
13
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
14
  }
15
 
16
- # Set the default query
17
- DEFAULT_QUERY = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
 
 
 
 
 
18
 
19
- @spaces.GPU
20
- def run_example(image, model_id="microsoft/Phi-3.5-vision-instruct"):
21
- model = models[model_id]
22
- processor = processors[model_id]
23
 
24
- # Use the default query directly without a user input text field
25
- prompt = f"<|user|>\n<|image_1|>\n{DEFAULT_QUERY}<|end|>\n<|assistant|>"
26
  image = Image.fromarray(image).convert("RGB")
27
 
28
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
29
- generate_ids = model.generate(**inputs, max_new_tokens=1000, eos_token_id=processor.tokenizer.eos_token_id)
 
 
 
 
30
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
31
- response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 
 
32
  return response
33
 
 
34
  css = """
35
  #container {
36
  background-color: #f9f9f9;
@@ -48,14 +63,14 @@ css = """
48
  height: 180px;
49
  object-fit: contain;
50
  }
51
- #output_text {
52
  margin-top: 15px;
53
  border: 2px solid #333;
54
  border-radius: 8px;
55
  height: 180px;
56
  overflow-y: auto;
57
  }
58
- #submit_btn {
59
  background-color: #fff;
60
  color: black;
61
  border-radius: 10px;
@@ -64,17 +79,22 @@ css = """
64
  transition: background-color 0.3s ease;
65
  margin-top: 15px;
66
  }
67
- #submit_btn:hover {
68
  background-color: #333;
69
  }
70
  """
71
 
 
72
  with gr.Blocks(css=css) as demo:
73
  with gr.Column(elem_id="container"):
74
- input_image = gr.Image(type="pil", label="Upload Image", elem_id="input_image")
75
- submit_btn = gr.Button(value="Generate Prompt", elem_id="submit_btn")
76
- output_text = gr.Textbox(label="Prompt Output", elem_id="output_text", show_copy_button=True, lines=6)
77
 
78
- submit_btn.click(run_example, [input_image], output_text)
 
 
 
 
79
 
80
  demo.launch(share=False)
 
1
  import gradio as gr
 
2
  from transformers import AutoModelForCausalLM, AutoProcessor
3
  import torch
4
  from PIL import Image
5
 
6
+ # Model and Processor Initialization
7
  models = {
8
+ "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained(
9
+ "microsoft/Phi-3.5-vision-instruct",
10
+ trust_remote_code=True,
11
+ torch_dtype="auto",
12
+ _attn_implementation="flash_attention_2"
13
+ ).cuda().eval()
14
  }
15
 
16
  processors = {
17
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
18
  }
19
 
20
+ # Default question
21
+ default_question = (
22
+ "You are an image-to-prompt converter. Your work is to observe each and every detail of the image and "
23
+ "craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, "
24
+ "and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera "
25
+ "and lens settings, lighting, colors, effects, texture, background, rendering]."
26
+ )
27
 
28
+ # Function to generate prompt
29
+ def generate_caption(image):
30
+ model = models["microsoft/Phi-3.5-vision-instruct"]
31
+ processor = processors["microsoft/Phi-3.5-vision-instruct"]
32
 
33
+ prompt = f"<|user|>\n<|image_1|>\n{default_question}<|end|>\n<|assistant|>\n"
 
34
  image = Image.fromarray(image).convert("RGB")
35
 
36
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
37
+ generate_ids = model.generate(
38
+ **inputs,
39
+ max_new_tokens=1000,
40
+ eos_token_id=processor.tokenizer.eos_token_id,
41
+ )
42
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
43
+ response = processor.batch_decode(
44
+ generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
45
+ )[0]
46
  return response
47
 
48
+ # Enhanced CSS for streamlined UI
49
  css = """
50
  #container {
51
  background-color: #f9f9f9;
 
63
  height: 180px;
64
  object-fit: contain;
65
  }
66
+ #output_caption {
67
  margin-top: 15px;
68
  border: 2px solid #333;
69
  border-radius: 8px;
70
  height: 180px;
71
  overflow-y: auto;
72
  }
73
+ #run_button {
74
  background-color: #fff;
75
  color: black;
76
  border-radius: 10px;
 
79
  transition: background-color 0.3s ease;
80
  margin-top: 15px;
81
  }
82
+ #run_button:hover {
83
  background-color: #333;
84
  }
85
  """
86
 
87
+ # Gradio Interface with Adjustments
88
  with gr.Blocks(css=css) as demo:
89
  with gr.Column(elem_id="container"):
90
+ input_image = gr.Image(type="pil", elem_id="input_image", label="Upload Image")
91
+ run_button = gr.Button(value="Generate Prompt", elem_id="run_button")
92
+ output_caption = gr.Textbox(label="Generated Prompt", show_copy_button=True, elem_id="output_caption", lines=6)
93
 
94
+ run_button.click(
95
+ fn=generate_caption,
96
+ inputs=[input_image],
97
+ outputs=output_caption,
98
+ )
99
 
100
  demo.launch(share=False)