rodrisouza commited on
Commit
a421387
·
verified ·
1 Parent(s): d5d1f4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -13,8 +13,8 @@ torch.jit.script = lambda f: f
13
  # Initialize Google Sheets client
14
  client = init_google_sheets_client()
15
  sheet = client.open(google_sheets_name)
16
- stories_sheet = sheet.worksheet("Stories") # Assuming stories are in the second sheet (index 1)
17
- system_prompts_sheet = sheet.worksheet("System Prompts") # Assuming system prompts are in a separate sheet
18
 
19
  # Load stories from Google Sheets
20
  def load_stories():
@@ -77,7 +77,6 @@ def interact(user_input, history, interaction_count):
77
  raise ValueError("Tokenizer or model is not initialized.")
78
 
79
  interaction_count += 1
80
- print(f"Interaction count: {interaction_count}")
81
 
82
  messages = history + [{"role": "user", "content": user_input}]
83
 
@@ -93,7 +92,7 @@ def interact(user_input, history, interaction_count):
93
 
94
  # Generate response using selected model
95
  input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to("cuda")
96
- chat_history_ids = model.generate(input_ids, max_new_tokens=100, pad_token_id=tokenizer.eos_token_id)
97
  response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
98
 
99
  # Update chat history with generated response
@@ -131,8 +130,6 @@ Here is the story:
131
  if combined_message:
132
  chat_history = [] # Reset chat history
133
  chat_history.append({"role": "system", "content": combined_message})
134
-
135
- # Generate the first question based on the story
136
  question_prompt = "Please ask a simple question about the story to encourage interaction."
137
  _, formatted_history, chat_history, interaction_count = interact(question_prompt, chat_history, interaction_count)
138
 
 
13
  # Initialize Google Sheets client
14
  client = init_google_sheets_client()
15
  sheet = client.open(google_sheets_name)
16
+ stories_sheet = sheet.worksheet("Stories")
17
+ system_prompts_sheet = sheet.worksheet("System Prompts")
18
 
19
  # Load stories from Google Sheets
20
  def load_stories():
 
77
  raise ValueError("Tokenizer or model is not initialized.")
78
 
79
  interaction_count += 1
 
80
 
81
  messages = history + [{"role": "user", "content": user_input}]
82
 
 
92
 
93
  # Generate response using selected model
94
  input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to("cuda")
95
+ chat_history_ids = model.generate(input_ids, max_new_tokens=100, pad_token_id=tokenizer.eos_token_id, temperature=0.1)
96
  response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
97
 
98
  # Update chat history with generated response
 
130
  if combined_message:
131
  chat_history = [] # Reset chat history
132
  chat_history.append({"role": "system", "content": combined_message})
 
 
133
  question_prompt = "Please ask a simple question about the story to encourage interaction."
134
  _, formatted_history, chat_history, interaction_count = interact(question_prompt, chat_history, interaction_count)
135