Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoProcessor, VisionEncoderDecoderModel
|
3 |
from vllm import LLM, SamplingParams
|
4 |
from PIL import Image
|
5 |
|
@@ -13,7 +13,8 @@ vllm_model = LLM(model=model_name, tensor_parallel_size=1, device="cpu")
|
|
13 |
# Load the OCR model and processor
|
14 |
ocr_model_name = "microsoft/trocr-small-handwritten"
|
15 |
ocr_model = VisionEncoderDecoderModel.from_pretrained(ocr_model_name)
|
16 |
-
ocr_processor =
|
|
|
17 |
|
18 |
def generate_response(prompt, max_tokens, temperature, top_p):
|
19 |
# Define sampling parameters
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoProcessor, VisionEncoderDecoderModel, TrOCRProcessor
|
3 |
from vllm import LLM, SamplingParams
|
4 |
from PIL import Image
|
5 |
|
|
|
13 |
# Load the OCR model and processor
|
14 |
ocr_model_name = "microsoft/trocr-small-handwritten"
|
15 |
ocr_model = VisionEncoderDecoderModel.from_pretrained(ocr_model_name)
|
16 |
+
ocr_processor = TrOCRProcessor.from_pretrained(ocr_model_name)
|
17 |
+
#ocr_processor = AutoProcessor.from_pretrained(ocr_model_name)
|
18 |
|
19 |
def generate_response(prompt, max_tokens, temperature, top_p):
|
20 |
# Define sampling parameters
|