mrbeliever commited on
Commit
b178c1a
Β·
verified Β·
1 Parent(s): ebb3eb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -63
app.py CHANGED
@@ -1,97 +1,73 @@
1
  import gradio as gr
2
  import spaces
3
- from transformers import AutoModelForCausalLM, AutoProcessor, GPT2LMHeadModel, GPT2Tokenizer
4
  import torch
5
  from PIL import Image
6
  import subprocess
7
-
8
- # Install flash-attn with no CUDA build isolation
9
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
10
 
11
- # Define models and processors with pinning to a stable revision
12
  models = {
13
- "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained(
14
- "microsoft/Phi-3.5-vision-instruct",
15
- revision="specific-revision-hash", # Pinning to a specific revision for stability
16
- trust_remote_code=True,
17
- torch_dtype="auto",
18
- _attn_implementation="flash_attention_2"
19
- ).cuda().eval()
20
  }
21
 
22
  processors = {
23
- "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained(
24
- "microsoft/Phi-3.5-vision-instruct",
25
- revision="specific-revision-hash", # Pinning to a specific revision for stability
26
- trust_remote_code=True
27
- )
28
  }
29
 
30
- # Fallback to GPT-2 for testing
31
- def load_fallback_model():
32
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
33
- model = GPT2LMHeadModel.from_pretrained("gpt2").cuda().eval()
34
- return model, tokenizer
35
-
36
- # Default description and prompt
37
  DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
38
- default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words."
39
 
40
- # Gradio function for generating output from image input with error handling
 
 
 
 
 
 
 
 
41
  @spaces.GPU
42
  def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
43
- try:
44
- model = models[model_id]
45
- processor = processors[model_id]
46
- except KeyError as e:
47
- print(f"Error loading model: {e}. Falling back to GPT-2.")
48
- model, processor = load_fallback_model()
49
-
50
- user_prompt = '<|user|>\n'
51
- assistant_prompt = '<|assistant|>\n'
52
- prompt_suffix = "<|end|>\n"
53
-
54
  prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
55
  image = Image.fromarray(image).convert("RGB")
56
 
57
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
58
- generate_ids = model.generate(**inputs, max_new_tokens=1000, eos_token_id=processor.tokenizer.eos_token_id)
 
 
 
59
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
60
- response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 
 
61
  return response
62
 
63
- # Custom CSS for styling
64
  css = """
65
- #output_text {
66
- height: 500px;
67
- overflow: auto;
68
- border: 1px solid #333;
69
- }
70
- #model_selector, #text_input {
71
- display: none !important;
72
  }
73
- #main_container {
74
- border: 2px solid black;
75
- padding: 20px;
76
- border-radius: 10px;
77
  }
78
  """
79
 
80
- # Gradio interface with styling and layout improvements
81
  with gr.Blocks(css=css) as demo:
82
  gr.Markdown(DESCRIPTION)
83
- with gr.Row(id="main_container"):
84
- with gr.Column():
85
- input_img = gr.Image(label="Input Image", interactive=True)
86
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
87
- text_input = gr.Textbox(label="Question", value=default_question, visible=False)
88
- submit_btn = gr.Button(value="Generate Prompt")
89
-
90
- output_text = gr.Textbox(label="Output", id="output_text", interactive=False)
 
91
 
92
- # Link button action to function
93
- submit_btn.click(run_example, [input_img, text_input, model_selector], output_text)
94
 
95
- # Launch Gradio interface
96
  demo.queue(api_open=False)
97
- demo.launch(debug=True, show_api=False)
 
1
  import gradio as gr
2
  import spaces
3
+ from transformers import AutoModelForCausalLM, AutoProcessor
4
  import torch
5
  from PIL import Image
6
  import subprocess
 
 
7
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
8
 
 
9
  models = {
10
+ "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
 
 
 
 
 
 
11
  }
12
 
13
  processors = {
14
+ "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
 
 
 
 
15
  }
16
 
 
 
 
 
 
 
 
17
  DESCRIPTION = "[Phi-3.5-vision Demo](https://huggingface.co/microsoft/Phi-3.5-vision-instruct)"
 
18
 
19
+ kwargs = {}
20
+ kwargs['torch_dtype'] = torch.bfloat16
21
+
22
+ user_prompt = '<|user|>\n'
23
+ assistant_prompt = '<|assistant|>\n'
24
+ prompt_suffix = "<|end|>\n"
25
+
26
+ default_question = "You are an image to prompt converter. Your work is to observe each and every detail of the image and craft a detailed prompt under 100 words in this format: [image content/subject, description of action, state, and mood], [art form, style], [artist/photographer reference if needed], [additional settings such as camera and lens settings, lighting, colors, effects, texture, background, rendering]."
27
+
28
  @spaces.GPU
29
  def run_example(image, text_input=default_question, model_id="microsoft/Phi-3.5-vision-instruct"):
30
+ model = models[model_id]
31
+ processor = processors[model_id]
32
+
 
 
 
 
 
 
 
 
33
  prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
34
  image = Image.fromarray(image).convert("RGB")
35
 
36
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
37
+ generate_ids = model.generate(**inputs,
38
+ max_new_tokens=1000,
39
+ eos_token_id=processor.tokenizer.eos_token_id,
40
+ )
41
  generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
42
+ response = processor.batch_decode(generate_ids,
43
+ skip_special_tokens=True,
44
+ clean_up_tokenization_spaces=False)[0]
45
  return response
46
 
 
47
  css = """
48
+ #output {
49
+ height: 500px;
50
+ overflow: auto;
51
+ border: 1px solid #ccc;
 
 
 
52
  }
53
+ #model_selector, #text_input {
54
+ display: none !important;
 
 
55
  }
56
  """
57
 
 
58
  with gr.Blocks(css=css) as demo:
59
  gr.Markdown(DESCRIPTION)
60
+ with gr.Tab(label="Phi-3.5 Input"):
61
+ with gr.Row():
62
+ with gr.Column():
63
+ input_img = gr.Image(label="Input Picture")
64
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct", visible=False)
65
+ text_input = gr.Textbox(label="Question", value=default_question, visible=False)
66
+ submit_btn = gr.Button(value="Submit")
67
+ with gr.Column():
68
+ output_text = gr.Textbox(label="Output Text")
69
 
70
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
 
71
 
 
72
  demo.queue(api_open=False)
73
+ demo.launch(debug=True, show_api=False)