chegde commited on
Commit
f486321
·
verified ·
1 Parent(s): f9a267b

Clean up code output.

Browse files
Files changed (1) hide show
  1. app.py +11 -0
app.py CHANGED
@@ -27,6 +27,13 @@ except Exception as e:
27
  veri_tokenizer = None
28
 
29
  @spaces.GPU(duration=60)
 
 
 
 
 
 
 
30
  def generate_response(user_message, history):
31
  if not veri_model or not veri_tokenizer:
32
  return history + [["Error", "Model not loaded properly"]]
@@ -73,6 +80,10 @@ def generate_response(user_message, history):
73
 
74
  response = veri_tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
75
 
 
 
 
 
76
  if torch.cuda.is_available():
77
  torch.cuda.empty_cache()
78
 
 
27
  veri_tokenizer = None
28
 
29
  @spaces.GPU(duration=60)
30
+ def truncate_at_code_end(text):
31
+ """Truncate text at 'CODE END' to remove repetitive content"""
32
+ if "CODE END" in text:
33
+ end_index = text.find("CODE END") + len("CODE END")
34
+ return text[:end_index].strip()
35
+ return text.strip()
36
+
37
  def generate_response(user_message, history):
38
  if not veri_model or not veri_tokenizer:
39
  return history + [["Error", "Model not loaded properly"]]
 
80
 
81
  response = veri_tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
82
 
83
+ # Truncate at CODE END to remove repetitive content
84
+ response = truncate_at_code_end(response)
85
+
86
+
87
  if torch.cuda.is_available():
88
  torch.cuda.empty_cache()
89