|
import random |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
addend_counter = 1 |
|
|
|
def generate_addition_question(): |
|
global addend_counter |
|
question = f"What is 1 + {addend_counter}?" |
|
addend_counter += 1 |
|
return question |
|
|
|
def get_valid_number(prompt, min_val=1, max_val=100): |
|
while True: |
|
try: |
|
value = int(input(prompt)) |
|
if min_val <= value <= max_val: |
|
return value |
|
else: |
|
print(f"Please enter a number between {min_val} and {max_val}.") |
|
except ValueError: |
|
print("Invalid input. Please enter a valid integer.") |
|
|
|
def main(): |
|
global addend_counter |
|
|
|
|
|
model_output_dir = "/Users/migueldeguzman/Desktop/papercliptodd/phi-2b/v3/" |
|
tokenizer = AutoTokenizer.from_pretrained(model_output_dir) |
|
model = AutoModelForCausalLM.from_pretrained(model_output_dir) |
|
|
|
num_questions = get_valid_number("How many questions would you like to answer? ") |
|
|
|
for _ in range(num_questions): |
|
if addend_counter > 100: |
|
user_choice = input("Reached the limit of 100. Type 'reset' to continue or 'exit' to stop: ").lower() |
|
if user_choice == 'reset': |
|
addend_counter = 1 |
|
else: |
|
break |
|
|
|
prompt = generate_addition_question() |
|
print(prompt) |
|
|
|
input_ids = tokenizer.encode(prompt, return_tensors="pt") |
|
output = model.generate( |
|
input_ids, |
|
max_length=50, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.001 |
|
) |
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
print("Generated Text:") |
|
print(generated_text) |
|
|
|
|
|
|
|
|
|
|
|
print("Session completed.") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|