curiouscurrent commited on
Commit
a7322e4
·
verified ·
1 Parent(s): 8a7fda2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -23
app.py CHANGED
@@ -1,26 +1,30 @@
1
- from transformers import AutoTokenizer
2
- import transformers
3
- import torch
4
 
5
- model = "codellama/CodeLlama-7b-hf"
 
 
 
6
 
7
- tokenizer = AutoTokenizer.from_pretrained(model)
8
- pipeline = transformers.pipeline(
9
- "text-generation",
10
- model=model,
11
- torch_dtype=torch.float16,
12
- device_map="auto",
13
- )
14
 
15
- sequences = pipeline(
16
- 'import socket\n\ndef ping_exponential_backoff(host: str):',
17
- do_sample=True,
18
- top_k=10,
19
- temperature=0.1,
20
- top_p=0.95,
21
- num_return_sequences=1,
22
- eos_token_id=tokenizer.eos_token_id,
23
- max_length=200,
24
- )
25
- for seq in sequences:
26
- print(f"Result: {seq['generated_text']}")
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
 
 
2
 
3
+ # Load pre-trained model and tokenizer
4
+ model_name = "codellama/CodeLlama-7b-hf"
5
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
6
+ model = GPT2LMHeadModel.from_pretrained(model_name)
7
 
8
+ # System message
9
+ system_message = "You are a code teaching assistant named OmniCode created by Anusha K. Answer all the code related questions being asked."
 
 
 
 
 
10
 
11
+ def generate_response(prompt, max_length=150, temperature=1.0):
12
+ input_text = system_message + "\n" + prompt
13
+ input_ids = tokenizer.encode(input_text, return_tensors='pt')
14
+
15
+ # Generate response
16
+ output = model.generate(input_ids,
17
+ max_length=max_length,
18
+ temperature=temperature,
19
+ pad_token_id=tokenizer.eos_token_id,
20
+ num_return_sequences=1)
21
+
22
+ # Decode and return the response
23
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
24
+ return response
25
+
26
+ if __name__ == "__main__":
27
+ while True:
28
+ user_input = input("You: ")
29
+ response = generate_response(user_input)
30
+ print("OmniCode:", response)