bmas10 commited on
Commit
86cabcc
·
verified ·
1 Parent(s): 48a8a85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -7,12 +7,8 @@ from huggingface_hub import InferenceClient
7
  from transformers import pipeline,AutoModelForCausalLM, AutoTokenizer
8
  import torch
9
 
10
- model_name = "Qwen/Qwen2.5-VL-3B-Instruct"
11
- #access_token = base64.b64decode('aGZfekJxa0pXZEh1bm90UEJXek1mdkdOc096WXdIVVZvYkRwcg==') # Replace with your token
12
-
13
- tokenizer = AutoTokenizer.from_pretrained(model_name) #, use_auth_token=access_token)
14
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")#, use_auth_token=access_token)
15
- # Load model and tokenizer
16
 
17
 
18
  def chat(input_text, history=[]):
 
7
  from transformers import pipeline,AutoModelForCausalLM, AutoTokenizer
8
  import torch
9
 
10
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct")
11
+ model = AutoModelForImageTextToText.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct")
 
 
 
 
12
 
13
 
14
  def chat(input_text, history=[]):