moamen270 commited on
Commit
960bed2
·
1 Parent(s): 0909581

Update endpoints.py

Browse files
Files changed (1) hide show
  1. endpoints.py +27 -19
endpoints.py CHANGED
@@ -13,25 +13,33 @@ import requests
13
  # response = requests.post(API_URL, headers=headers, json=payload)
14
  # return response.json()
15
 
16
- def LLM(llm_name, length):
17
- print(llm_name)
18
- tokenizer = AutoTokenizer.from_pretrained(llm_name)
19
- model = AutoModelForCausalLM.from_pretrained(llm_name,
20
- trust_remote_code=True,
21
- device_map="auto",
22
- load_in_8bit=True)
23
- pipe = pipeline("text-generation",
24
- model=model,
25
- tokenizer=tokenizer,
26
- max_length=length,
27
- do_sample=True,
28
- top_p=0.95,
29
- repetition_penalty=1.2,
30
- )
31
- return pipe
32
-
33
-
34
- pipe = LLM("replit/replit-code-v1-3b",4000)
 
 
 
 
 
 
 
 
35
  # tokenizer = AutoTokenizer.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
36
  # base_model = AutoModelForCausalLM.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
37
  # Mistral 7B
 
13
  # response = requests.post(API_URL, headers=headers, json=payload)
14
  # return response.json()
15
 
16
+ # def LLM(llm_name, length):
17
+ # print(llm_name)
18
+ # tokenizer = AutoTokenizer.from_pretrained(llm_name)
19
+ # model = AutoModelForCausalLM.from_pretrained(llm_name,
20
+ # trust_remote_code=True,
21
+ # device_map="auto",
22
+ # load_in_8bit=True)
23
+ # pipe = pipeline("text-generation",
24
+ # model=model,
25
+ # tokenizer=tokenizer,
26
+ # max_length=length,
27
+ # do_sample=True,
28
+ # top_p=0.95,
29
+ # repetition_penalty=1.2,
30
+ # )
31
+ # return pipe
32
+ tokenizer = AutoTokenizer.from_pretrained('replit/replit-code-v1_5-3b', trust_remote_code=True)
33
+ model = AutoModelForCausalLM.from_pretrained('replit/replit-code-v1_5-3b', trust_remote_code=True)
34
+
35
+ x = tokenizer.encode('def fibonacci(n): ', return_tensors='pt')
36
+ y = model.generate(x, max_length=100, do_sample=True, top_p=0.95, top_k=4, temperature=0.2, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
37
+
38
+ # decoding
39
+ generated_code = tokenizer.decode(y[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
40
+ print(generated_code)
41
+
42
+ pipe = generated_code
43
  # tokenizer = AutoTokenizer.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
44
  # base_model = AutoModelForCausalLM.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
45
  # Mistral 7B