moamen270 commited on
Commit
c21ad85
·
1 Parent(s): d759b0c

Update endpoints.py

Browse files
Files changed (1) hide show
  1. endpoints.py +2 -5
endpoints.py CHANGED
@@ -16,10 +16,7 @@ import requests
16
  def LLM(llm_name, length):
17
  print(llm_name)
18
  tokenizer = AutoTokenizer.from_pretrained(llm_name)
19
- model = AutoModelForCausalLM.from_pretrained(llm_name,
20
- trust_remote_code=True,
21
- device_map="auto",
22
- load_in_8bit=True)
23
  pipe = pipeline("text-generation",
24
  model=model,
25
  tokenizer=tokenizer,
@@ -31,7 +28,7 @@ def LLM(llm_name, length):
31
  return pipe
32
 
33
 
34
- pipe = LLM("WizardLM/WizardCoder-Python-7B-V1.0",4000)
35
  # tokenizer = AutoTokenizer.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
36
  # base_model = AutoModelForCausalLM.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
37
  # Mistral 7B
 
16
  def LLM(llm_name, length):
17
  print(llm_name)
18
  tokenizer = AutoTokenizer.from_pretrained(llm_name)
19
+ model = AutoModelForCausalLM.from_pretrained(llm_name)
 
 
 
20
  pipe = pipeline("text-generation",
21
  model=model,
22
  tokenizer=tokenizer,
 
28
  return pipe
29
 
30
 
31
+ pipe = LLM("Salesforce/codegen-16B-nl",4000)
32
  # tokenizer = AutoTokenizer.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
33
  # base_model = AutoModelForCausalLM.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
34
  # Mistral 7B