BenK0y commited on
Commit
9dbce78
·
verified ·
1 Parent(s): 2e7b4c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -45
app.py CHANGED
@@ -1,47 +1,10 @@
1
- import torch
2
- from PIL import Image
3
- from transformers import AutoModel, AutoTokenizer
4
 
5
- model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True,
6
- attn_implementation='sdpa', torch_dtype=torch.bfloat16) # sdpa or flash_attention_2, no eager
7
- model = model.eval().cuda()
8
- tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2_6', trust_remote_code=True)
9
 
10
- image = Image.open('car.jpg').convert('RGB')
11
- question = 'What is in the image?'
12
- msgs = [{'role': 'user', 'content': [image, question]}]
13
-
14
- res = model.chat(
15
- image=None,
16
- msgs=msgs,
17
- tokenizer=tokenizer
18
- )
19
- print(res)
20
-
21
- ## if you want to use streaming, please make sure sampling=True and stream=True
22
- ## the model.chat will return a generator
23
- res = model.chat(
24
- image=None,
25
- msgs=msgs,
26
- tokenizer=tokenizer,
27
- sampling=True,
28
- stream=True
29
- )
30
-
31
- generated_text = ""
32
- for new_text in res:
33
- generated_text += new_text
34
- print(new_text, flush=True, end='')
35
-
36
-
37
- #import google.generativeai as genai
38
- #import os
39
-
40
- #genai.configure(api_key=os.environ["AIzaSyB5WiEJf_yLMD1dMQf305EAbaPTzF_QD-I"])
41
-
42
- #model = genai.GenerativeModel('gemini-1.5-flash')
43
- #response = model.generate_content(
44
- # text_input="the color of the car is ?",
45
- # image_input="car.jpg"
46
- #)
47
- #print(response)
 
1
+ import google.generativeai as genai
2
+ import PIL.Image
3
+ import os
4
 
5
+ genai.configure(api_key=os.environ["AIzaSyB5WiEJf_yLMD1dMQf305EAbaPTzF_QD-I"])
6
+ img = PIL.Image.open('car.png')
 
 
7
 
8
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash")
9
+ response = model.generate_content(["What is in this photo?", img])
10
+ print(response)