File size: 2,198 Bytes
20e9c3c
13ebdd6
 
 
 
20e9c3c
 
13ebdd6
20e9c3c
 
13ebdd6
 
 
20e9c3c
 
d445667
 
 
13ebdd6
 
d445667
13ebdd6
 
d445667
13ebdd6
 
 
 
 
 
d445667
13ebdd6
 
 
 
 
 
 
 
20e9c3c
13ebdd6
20e9c3c
 
 
13ebdd6
 
 
 
 
 
20e9c3c
13ebdd6
20e9c3c
13ebdd6
 
20e9c3c
 
13ebdd6
 
20e9c3c
13ebdd6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
from transformers import AutoTokenizer, AutoModel
from PIL import Image
import os
import re
import torch

# Load the GOT model
device = 'cuda' if torch.cuda.is_available() else 'cpu'

tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='auto', use_safetensors=True)
model = model.eval().to(device)

def extract_text(image):
    if image is None:
        return "No image uploaded", ""

    image_path = 'temp_image.png'
    image.save(image_path)

    try:
        res = model.chat(tokenizer, image_path, ocr_type='ocr')
        return res, res
    except Exception as e:
        return f"Error: {str(e)}", ""
    finally:
        if os.path.exists(image_path):
            os.remove(image_path)


def keyword_search(extracted_text, keyword):
    if not extracted_text:
        return "No text extracted yet."
    if not keyword:
        return extracted_text
    # Escape HTML special characters
    extracted_text = extracted_text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
    # Use regular expressions to find matches, ignoring case
    pattern = re.compile(re.escape(keyword), re.IGNORECASE)
    highlighted_text = pattern.sub(lambda x: f"<mark>{x.group()}</mark>", extracted_text)
    return highlighted_text

with gr.Blocks() as demo:
    gr.Markdown("# OCR and Document Search Web Application")

    extracted_text_state = gr.State()

    with gr.Column():
        image_input = gr.Image(type="pil", label="Upload an image")
        extract_button = gr.Button("Extract Text")

    extracted_text_output = gr.Textbox(label="Extracted Text", lines=10)
    keyword_input = gr.Textbox(label="Enter keyword to search")
    search_button = gr.Button("Search")
    search_results_output = gr.HTML(label="Search Results")

    extract_button.click(fn=extract_text, inputs=image_input, outputs=[extracted_text_output, extracted_text_state])
    search_button.click(fn=keyword_search, inputs=[extracted_text_state, keyword_input], outputs=search_results_output)

if __name__ == "__main__":
    demo.launch()