bokesyo commited on
Commit
1048188
·
verified ·
1 Parent(s): b7fd459

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -179,38 +179,38 @@ def downvote(knowledge_base, query):
179
  return
180
 
181
 
 
182
  device = 'cuda'
 
 
183
  model_path = 'RhapsodyAI/minicpm-visual-embedding-v0' # replace with your local model path
184
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
185
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
186
  model.eval()
187
  model.to(device)
 
 
 
 
 
 
 
 
 
188
 
189
 
190
  @spaces.GPU(duration=50)
191
  def answer_question(images, question):
192
- print("model load begin...")
193
- gen_model_path = 'openbmb/MiniCPM-V-2_6'
194
- gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, trust_remote_code=True)
195
- gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16)
196
- gen_model.eval()
197
- gen_model.to(device)
198
- print("model load success!")
199
-
200
  # here each element of images is a tuple of (image_path, None).
201
-
202
  images_ = [Image.open(image[0]).convert('RGB') for image in images]
203
-
204
  msgs = [{'role': 'user', 'content': [*images_, question]}]
205
-
206
  answer = gen_model.chat(
207
  image=None,
208
  msgs=msgs,
209
  tokenizer=gen_tokenizer
210
  )
211
-
212
  print(answer)
213
-
214
  return answer
215
 
216
 
 
179
  return
180
 
181
 
182
+
183
  device = 'cuda'
184
+
185
+ print("emb model load begin...")
186
  model_path = 'RhapsodyAI/minicpm-visual-embedding-v0' # replace with your local model path
187
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
188
  model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
189
  model.eval()
190
  model.to(device)
191
+ print("emb model load success!")
192
+
193
+ print("gen model load begin...")
194
+ gen_model_path = 'openbmb/MiniCPM-V-2_6'
195
+ gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, trust_remote_code=True)
196
+ gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16)
197
+ gen_model.eval()
198
+ gen_model.to(device)
199
+ print("gen model load success!")
200
 
201
 
202
  @spaces.GPU(duration=50)
203
  def answer_question(images, question):
204
+ global gen_model, gen_tokenizer
 
 
 
 
 
 
 
205
  # here each element of images is a tuple of (image_path, None).
 
206
  images_ = [Image.open(image[0]).convert('RGB') for image in images]
 
207
  msgs = [{'role': 'user', 'content': [*images_, question]}]
 
208
  answer = gen_model.chat(
209
  image=None,
210
  msgs=msgs,
211
  tokenizer=gen_tokenizer
212
  )
 
213
  print(answer)
 
214
  return answer
215
 
216