File size: 1,789 Bytes
4322a12 37b7885 efe753c c2e789f 37b7885 efe753c 37b7885 c2e789f 37b7885 c2e789f 37b7885 c2e789f 37b7885 c2e789f 37b7885 efe753c 37b7885 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
import torch
import pytesseract
from transformers import AutoTokenizer, AutoModel
# Set Tesseract executable path
pytesseract.pytesseract.tesseract_cmd = r'/opt/homebrew/bin/tesseract'
# Load the tokenizer and model
tokenizer_eng = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
model_eng = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True).eval()
def perform_ocr(image, language):
# Convert the Gradio image input to the format suitable for pytesseract
img_cv = image # Assuming image is already in the correct format
if language == "English":
# Perform OCR using the model for English
res_eng = model_eng.chat(tokenizer_eng, img_cv, ocr_type='ocr')
return res_eng # Return results for English
elif language == "Hindi":
# Perform OCR using pytesseract for Hindi
res_hin = pytesseract.image_to_string(img_cv, lang='hin', config='--psm 6')
return res_hin # Return results for Hindi
else:
return "Unsupported language selected."
def ocr_and_search(image, language):
# Call the perform_ocr function
extracted_text = perform_ocr(image, language)
# You may also want to implement any searching functionality here
# ...
return extracted_text # Return the OCR result for the selected language
# Create Gradio interface
iface = gr.Interface(
fn=ocr_and_search,
inputs=[
gr.Image(type="numpy", label="Upload Image"),
gr.Dropdown(choices=["English", "Hindi"], label="Select Language")
],
outputs=gr.Textbox(label="Extracted Text"),
title="OCR Application",
description="Upload an image to extract text using OCR."
)
# Run the app
if __name__ == "__main__":
iface.launch()
|