File size: 2,146 Bytes
b644b8d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import random
from transformers import AutoModelForCausalLM, AutoTokenizer
addend_counter = 1 # Global counter for the second addend
def generate_addition_question():
global addend_counter
question = f"What is 1 + {addend_counter}?"
addend_counter += 1
return question
def get_valid_number(prompt, min_val=1, max_val=100):
while True:
try:
value = int(input(prompt))
if min_val <= value <= max_val:
return value
else:
print(f"Please enter a number between {min_val} and {max_val}.")
except ValueError:
print("Invalid input. Please enter a valid integer.")
def main():
global addend_counter
# Load the fine-tuned model and tokenizer
model_output_dir = "/Users/migueldeguzman/Desktop/papercliptodd/phi-2b/v3/" # Replace with your model directory
tokenizer = AutoTokenizer.from_pretrained(model_output_dir)
model = AutoModelForCausalLM.from_pretrained(model_output_dir)
num_questions = get_valid_number("How many questions would you like to answer? ")
for _ in range(num_questions):
if addend_counter > 100:
user_choice = input("Reached the limit of 100. Type 'reset' to continue or 'exit' to stop: ").lower()
if user_choice == 'reset':
addend_counter = 1
else:
break
prompt = generate_addition_question()
print(prompt) # Display the question
input_ids = tokenizer.encode(prompt, return_tensors="pt")
output = model.generate(
input_ids,
max_length=50,
num_return_sequences=1,
no_repeat_ngram_size=2,
top_k=50,
top_p=0.95,
temperature=0.001
)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print("Generated Text:")
print(generated_text)
# Optional: User feedback mechanism
# print("Was the answer correct? (yes/no): ")
# feedback = input().lower()
print("Session completed.")
if __name__ == "__main__":
main()
|