virendravaishnav commited on
Commit
c282e28
·
1 Parent(s): b966683

Updated with OCR model and Gradio integration

Browse files
Files changed (2) hide show
  1. app.py +3 -4
  2. requirements.txt +1 -3
app.py CHANGED
@@ -1,13 +1,12 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoProcessor, AutoConfig, AutoModelForSeq2SeqLM
3
 
4
  repo_id = "OpenGVLab/InternVL2-1B"
5
 
6
- # Load the tokenizer, processor, and model directly from the Hugging Face Hub
7
  tokenizer = AutoTokenizer.from_pretrained(repo_id, trust_remote_code=True)
8
  processor = AutoProcessor.from_pretrained(repo_id, trust_remote_code=True)
9
- config = AutoConfig.from_pretrained(repo_id, trust_remote_code=True)
10
- model = AutoModelForSeq2SeqLM.from_pretrained(repo_id, config=config, trust_remote_code=True)
11
 
12
  def analyze_image(image):
13
  img = image.convert("RGB")
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoProcessor, AutoModel
3
 
4
  repo_id = "OpenGVLab/InternVL2-1B"
5
 
6
+ # Load the tokenizer, processor, and model directly from the Hub
7
  tokenizer = AutoTokenizer.from_pretrained(repo_id, trust_remote_code=True)
8
  processor = AutoProcessor.from_pretrained(repo_id, trust_remote_code=True)
9
+ model = AutoModel.from_pretrained(repo_id, trust_remote_code=True)
 
10
 
11
  def analyze_image(image):
12
  img = image.convert("RGB")
requirements.txt CHANGED
@@ -1,6 +1,4 @@
1
  transformers>=4.31.0
2
  gradio>=3.35.2
3
  torch>=1.9.0
4
- huggingface_hub>=0.14.1
5
- pillow
6
- accelerate
 
1
  transformers>=4.31.0
2
  gradio>=3.35.2
3
  torch>=1.9.0
4
+ pillow