mrbeliever commited on
Commit
15923f1
Β·
verified Β·
1 Parent(s): 8f19cf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -44
app.py CHANGED
@@ -4,80 +4,72 @@ from transformers import AutoModelForCausalLM, AutoProcessor
4
  import torch
5
  from PIL import Image
6
  import subprocess
 
 
7
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
 
 
9
  models = {
10
  "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
11
  }
12
-
13
  processors = {
14
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
15
  }
16
 
17
- DESCRIPTION = " "
18
-
19
- kwargs = {}
20
- kwargs['torch_dtype'] = torch.bfloat16
21
-
22
- user_prompt = '<|user|>\n'
23
- assistant_prompt = '<|assistant|>\n'
24
- prompt_suffix = "<|end|>\n"
25
-
26
- default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
27
 
 
28
  @spaces.GPU
29
  def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
30
  model = models[model_id]
31
  processor = processors[model_id]
32
-
 
 
 
33
  prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
34
  image = Image.fromarray(image).convert("RGB")
35
 
36
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
37
- generate_ids = model.generate(**inputs,
38
- max_new_tokens=1000,
39
- eos_token_id=processor.tokenizer.eos_token_id,
40
- )
41
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
42
- response = processor.batch_decode(generate_ids,
43
- skip_special_tokens=True,
44
- clean_up_tokenization_spaces=False)[0]
45
  return response
46
 
 
47
  css = """
48
- #output {
49
- margin-top: 15px;
50
- border: 2px solid #333; /* Darker outline */
51
- border-radius: 8px;
52
- height: 180px; /* Fixed height */
53
- object-fit: contain; /* Ensure image fits within the fixed height */
54
  }
55
-
56
- #input_img {
57
- margin-top: 15px;
58
- border: 2px solid #333; /* Darker outline */
59
- border-radius: 8px;
60
- height: 180px; /* Fixed height */
61
- object-fit: contain; /* Ensure image fits within the fixed height */
62
  }
63
- #model_selector, #text_input {
64
- display: none !important;
 
 
65
  }
66
  """
67
 
 
68
  with gr.Blocks(css=css) as demo:
69
  gr.Markdown(DESCRIPTION)
70
- with gr.Tab(label="Phi-3.5 Input"):
71
- with gr.Row():
72
- with gr.Column():
73
- input_img = gr.Image(label="Input Picture")
74
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
75
- text_input = gr.Textbox(label="Question", value=default_question, visible=False)
76
- submit_btn = gr.Button(value="Submit")
77
- with gr.Column():
78
- output_text = gr.Textbox(label="Output Text")
79
 
80
- submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
 
81
 
 
82
  demo.queue(api_open=False)
83
  demo.launch(debug=True, show_api=False)
 
4
  import torch
5
  from PIL import Image
6
  import subprocess
7
+
8
+ # Install flash-attn with no CUDA build isolation
9
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
10
 
11
+ # Load model and processor
12
  models = {
13
  "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
14
  }
 
15
  processors = {
16
  "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
17
  }
18
 
19
+ # Default description and prompt
20
+ DESCRIPTION = ""
21
+ default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words."
 
 
 
 
 
 
 
22
 
23
+ # Gradio function for generating output from image input
24
  @spaces.GPU
25
  def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
26
  model = models[model_id]
27
  processor = processors[model_id]
28
+ user_prompt = '<|user|>\n'
29
+ assistant_prompt = '<|assistant|>\n'
30
+ prompt_suffix = "<|end|>\n"
31
+
32
  prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
33
  image = Image.fromarray(image).convert("RGB")
34
 
35
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
36
+ generate_ids = model.generate(**inputs, max_new_tokens=1000, eos_token_id=processor.tokenizer.eos_token_id)
 
 
 
37
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
38
+ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 
 
39
  return response
40
 
41
+ # Custom CSS for styling
42
  css = """
43
+ #output_text {
44
+ height: 500px;
45
+ overflow: auto;
46
+ border: 1px solid #333;
 
 
47
  }
48
+ #model_selector, #text_input {
49
+ display: none !important;
 
 
 
 
 
50
  }
51
+ #main_container {
52
+ border: 2px solid black;
53
+ padding: 20px;
54
+ border-radius: 10px;
55
  }
56
  """
57
 
58
+ # Gradio interface with styling and layout improvements
59
  with gr.Blocks(css=css) as demo:
60
  gr.Markdown(DESCRIPTION)
61
+ with gr.Row(id="main_container"):
62
+ with gr.Column():
63
+ input_img = gr.Image(label="Input Image", interactive=True)
64
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
65
+ text_input = gr.Textbox(label="Question", value=default_question, visible=False)
66
+ submit_btn = gr.Button(value="Generate Prompt")
67
+
68
+ output_text = gr.Textbox(label="Output", id="output_text", interactive=False)
 
69
 
70
+ # Link button action to function
71
+ submit_btn.click(run_example, [input_img, text_input, model_selector], output_text)
72
 
73
+ # Launch Gradio interface
74
  demo.queue(api_open=False)
75
  demo.launch(debug=True, show_api=False)