RADARPICKv3 / app.py
BenK0y's picture
Update app.py
76fec6b verified
raw
history blame
1.49 kB
# from transformers import AutoModel, AutoTokenizer
# tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
# model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
# model = model.eval().cuda()
# input your test image
# image_file = 'car.jpg'
# plain texts OCR
# res = model.chat(tokenizer, image_file, ocr_type='ocr')
# format texts OCR:
# res = model.chat(tokenizer, image_file, ocr_type='format')
# fine-grained OCR:
# res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_box='')
# res = model.chat(tokenizer, image_file, ocr_type='format', ocr_box='')
# res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_color='')
# res = model.chat(tokenizer, image_file, ocr_type='format', ocr_color='')
# multi-crop OCR:
# res = model.chat_crop(tokenizer, image_file, ocr_type='ocr')
# res = model.chat_crop(tokenizer, image_file, ocr_type='format')
# render the formatted OCR results:
# res = model.chat(tokenizer, image_file, ocr_type='format', render=True, save_render_file = './demo.html')
# print(res)
import google.generativeai as genai
import os
genai.configure(api_key=os.environ["AIzaSyB5WiEJf_yLMD1dMQf305EAbaPTzF_QD-I"])
model = genai.GenerativeModel('gemini-1.5-flash')
response = model.generate_content(
text_input="the color of the car is ?",
image_input="car.jpg"
)
print(response)