Lahiru Menikdiwela commited on
Commit
72b02de
·
1 Parent(s): f8155b1

fix automodel for llama

Browse files
Files changed (1) hide show
  1. model.py +2 -2
model.py CHANGED
@@ -1,5 +1,5 @@
1
  import os
2
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
3
  from transformers import LEDForConditionalGeneration, LEDTokenizer
4
  from langchain_openai import OpenAI
5
  # from huggingface_hub import login
@@ -23,7 +23,7 @@ def get_local_model(model_name_or_path:str)->pipeline:
23
  model_name_or_path,
24
  token = hf_token
25
  )
26
- model = AutoModelForSeq2SeqLM.from_pretrained(
27
  model_name_or_path,
28
  torch_dtype=torch.float32,
29
  token = hf_token
 
1
  import os
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AutoModelForCausalLM
3
  from transformers import LEDForConditionalGeneration, LEDTokenizer
4
  from langchain_openai import OpenAI
5
  # from huggingface_hub import login
 
23
  model_name_or_path,
24
  token = hf_token
25
  )
26
+ model = AutoModelForCausalLM.from_pretrained(
27
  model_name_or_path,
28
  torch_dtype=torch.float32,
29
  token = hf_token