sashtech commited on
Commit
b3aee5e
·
verified ·
1 Parent(s): 30196dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -58,9 +58,8 @@ def detect_ai_generated(text):
58
  inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(device)
59
  with torch.no_grad():
60
  outputs = model(**inputs)
61
- probabilities = torch.softmax(outputs.logits, dim=1)
62
- ai_probability = probabilities[0][1].item() # Probability of being AI-generated
63
- return ai_probability
64
 
65
  # Humanize the AI-detected text using the SRDdev Paraphrase model
66
  def humanize_text(AI_text):
@@ -69,14 +68,15 @@ def humanize_text(AI_text):
69
  for paragraph in paragraphs:
70
  if paragraph.strip():
71
  inputs = paraphrase_tokenizer(paragraph, return_tensors="pt", max_length=512, truncation=True).to(device)
72
- paraphrased_ids = paraphrase_model.generate(
73
- inputs['input_ids'],
74
- max_length=inputs['input_ids'].shape[-1] + 20, # Slightly more than the original input length
75
- num_beams=4,
76
- early_stopping=True,
77
- length_penalty=1.0,
78
- no_repeat_ngram_size=3,
79
- )
 
80
  paraphrased_text = paraphrase_tokenizer.decode(paraphrased_ids[0], skip_special_tokens=True)
81
  paraphrased_paragraphs.append(paraphrased_text)
82
  return "\n\n".join(paraphrased_paragraphs)
@@ -104,4 +104,4 @@ interface = gr.Interface(
104
  )
105
 
106
  # Launch the Gradio app
107
- interface.launch(debug=True)
 
58
  inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(device)
59
  with torch.no_grad():
60
  outputs = model(**inputs)
61
+ probabilities = torch.softmax(outputs.logits, dim=1)
62
+ return probabilities[0][1].item() # Probability of being AI-generated
 
63
 
64
  # Humanize the AI-detected text using the SRDdev Paraphrase model
65
  def humanize_text(AI_text):
 
68
  for paragraph in paragraphs:
69
  if paragraph.strip():
70
  inputs = paraphrase_tokenizer(paragraph, return_tensors="pt", max_length=512, truncation=True).to(device)
71
+ with torch.no_grad(): # Avoid gradient calculations for faster inference
72
+ paraphrased_ids = paraphrase_model.generate(
73
+ inputs['input_ids'],
74
+ max_length=inputs['input_ids'].shape[-1] + 20, # Slightly more than the original input length
75
+ num_beams=4,
76
+ early_stopping=True,
77
+ length_penalty=1.0,
78
+ no_repeat_ngram_size=3,
79
+ )
80
  paraphrased_text = paraphrase_tokenizer.decode(paraphrased_ids[0], skip_special_tokens=True)
81
  paraphrased_paragraphs.append(paraphrased_text)
82
  return "\n\n".join(paraphrased_paragraphs)
 
104
  )
105
 
106
  # Launch the Gradio app
107
+ interface.launch(debug=False) # Turn off debug mode for production