ai-score-openai / app.py
iabualhaol's picture
Update app.py
bfc1a13
import gradio as gr
import openai
import re # import the regular expression module to parse the score
def evaluate_text(api_key, model, text_to_evaluate):
# Setting the API key for OpenAI
openai.api_key = api_key
# Few-shot learning prompt template
prompt = """Given below are some texts. Please evaluate how likely each text is AI-generated on a scale from 1-100, where 100 means highly likely and 1 means highly unlikely to be AI-generated.
1. "I am going to the park today." - Human written
Evaluation: 5
2. "The quixotic endeavor synergizes cryptographic algorithms." - AI generated
Evaluation: 90
3. "{}"
Evaluation: """.format(text_to_evaluate)
try:
# Generate model's response
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=10
)
raw_evaluation_score = response.choices[0].text.strip()
# Parsing the evaluation score using regular expressions
parsed_score = re.search(r"\d+", raw_evaluation_score)
if parsed_score:
return parsed_score.group()
else:
return "Score could not be parsed."
except Exception as e:
return str(e)
# Gradio UI
iface = gr.Interface(
fn=evaluate_text,
inputs=[
gr.inputs.Textbox(lines=1, label="Enter your OpenAI API Key", type="password"),
gr.inputs.Dropdown(choices=["text-davinci-002", "text-ada-002"], label="Model"),
gr.inputs.Textbox(lines=5, label="Text to Evaluate")
],
outputs=gr.outputs.Textbox(label="AI Score out of 100"), # Customized label here
live=False
)
iface.launch()