Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,23 +7,28 @@ model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
|
|
7 |
# System message
|
8 |
system_message = "You are a code teaching assistant named OmniCode created by Anusha K. Answer all the code related questions being asked."
|
9 |
|
|
|
10 |
def generate_response(prompt, max_length=150, temperature=1.0):
|
11 |
input_text = system_message + "\n" + prompt
|
12 |
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
13 |
|
14 |
# Generate response
|
15 |
-
output = model.generate(input_ids,
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
|
21 |
# Decode and return the response
|
22 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
23 |
return response
|
24 |
|
|
|
25 |
if __name__ == "__main__":
|
26 |
while True:
|
27 |
user_input = input("You: ")
|
|
|
|
|
|
|
28 |
response = generate_response(user_input)
|
29 |
print("OmniCode:", response)
|
|
|
7 |
# System message
|
8 |
system_message = "You are a code teaching assistant named OmniCode created by Anusha K. Answer all the code related questions being asked."
|
9 |
|
10 |
+
|
11 |
def generate_response(prompt, max_length=150, temperature=1.0):
|
12 |
input_text = system_message + "\n" + prompt
|
13 |
input_ids = tokenizer.encode(input_text, return_tensors='pt')
|
14 |
|
15 |
# Generate response
|
16 |
+
output = model.generate(input_ids,
|
17 |
+
max_length=max_length,
|
18 |
+
temperature=temperature,
|
19 |
+
pad_token_id=tokenizer.eos_token_id,
|
20 |
+
num_return_sequences=1)
|
21 |
|
22 |
# Decode and return the response
|
23 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
24 |
return response
|
25 |
|
26 |
+
|
27 |
if __name__ == "__main__":
|
28 |
while True:
|
29 |
user_input = input("You: ")
|
30 |
+
if not user_input: # Check if user input is empty
|
31 |
+
print("Exiting OmniCode. Thank you for using me!")
|
32 |
+
break
|
33 |
response = generate_response(user_input)
|
34 |
print("OmniCode:", response)
|