Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load model directly | |
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") | |
model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") | |
# System message | |
system_message = "You are a code teaching assistant named OmniCode created by Anusha K. Answer all the code related questions being asked." | |
def generate_response(prompt, max_length=150, temperature=1.0): | |
input_text = system_message + "\n" + prompt | |
input_ids = tokenizer.encode(input_text, return_tensors='pt') | |
# Generate response | |
output = model.generate(input_ids, | |
max_length=max_length, | |
temperature=temperature, | |
pad_token_id=tokenizer.eos_token_id, | |
num_return_sequences=1) | |
# Decode and return the response | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
return response | |
if __name__ == "__main__": | |
while True: | |
user_input = input("You: ") | |
if not user_input: # Check if user input is empty | |
print("Exiting OmniCode. Thank you for using me!") | |
break | |
response = generate_response(user_input) | |
print("OmniCode:", response) | |