CreitinGameplays commited on
Commit
d94551d
·
verified ·
1 Parent(s): b71d285

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -23,7 +23,10 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
  def generate_text(user_prompt):
24
  """Generates text using the ConvAI model from Hugging Face Transformers and removes the user prompt."""
25
  # Construct the full prompt with system introduction, user prompt, and assistant role
26
- prompt = f"<|system|> You are a helpful AI assistant. </s> <|prompter|> {user_prompt} </s> <|assistant|>"
 
 
 
27
 
28
  # Encode the entire prompt into tokens
29
  prompt_encoded = tokenizer.encode(prompt, return_tensors="pt").to(device)
@@ -44,10 +47,10 @@ def generate_text(user_prompt):
44
  # Decode the generated token sequence back to text
45
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
46
 
47
- # Extract the assistant's response (assuming it starts with "<|assistant|>")
48
- assistant_response = generated_text.split("<|assistant|>")[-1]
49
  assistant_response = assistant_response.replace(f"{user_prompt}", "").strip()
50
- assistant_response = assistant_response.replace("You are a helpful AI assistant.", "").strip()
51
 
52
  return assistant_response
53
 
 
23
  def generate_text(user_prompt):
24
  """Generates text using the ConvAI model from Hugging Face Transformers and removes the user prompt."""
25
  # Construct the full prompt with system introduction, user prompt, and assistant role
26
+
27
+ system = "You are a helpful AI language model called ChatGPT, your goal is helping users with their questions."
28
+
29
+ prompt = f"<|system|> {system} </s> <|user|> {user_prompt} </s>"
30
 
31
  # Encode the entire prompt into tokens
32
  prompt_encoded = tokenizer.encode(prompt, return_tensors="pt").to(device)
 
47
  # Decode the generated token sequence back to text
48
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
49
 
50
+ # Extract the assistant's response
51
+ assistant_response = generated_text.split("</s>")[-1]
52
  assistant_response = assistant_response.replace(f"{user_prompt}", "").strip()
53
+ assistant_response = assistant_response.replace(system, "").strip()
54
 
55
  return assistant_response
56