Spaces:
Sleeping
Sleeping
File size: 3,539 Bytes
a09c6ce 0ce1955 54a8d6c 35a297f 54a8d6c 35a297f 54a8d6c b585e42 54a8d6c 35a297f 54a8d6c 35a297f 54a8d6c b585e42 54a8d6c 35a297f 54a8d6c 35a297f 54a8d6c 35a297f 54a8d6c 379e15d 54a8d6c b585e42 54a8d6c 379e15d 54a8d6c 379e15d 54a8d6c 379e15d b585e42 54a8d6c b585e42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
from collinear import Collinear
conv_template = Template(
"""
# Context:
{{ document }}
# Claim:
{% for message in conversation %}
{{ message.role }}: {{ message.content }}
{% endfor %}
"""
)
qa_template = Template(
"""
# Context:
{{ document }}
# Claim:
user: {{ question }}
assistant: {{ answer }}
"""
)
nli_template = Template(
"""
# Context:
{{ document }}
# Claim:
assistant: {{ claim }}
"""
)
# Function to dynamically update inputs based on the input style
def update_inputs(input_style):
# if input_style == "Conv":
# return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
# elif input_style == "NLI":
# return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
# elif input_style == "QA format":
# return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
# Function to judge reliability based on the selected input format
def judge_reliability(input_style, document, conversation, claim, question, answer):
# with torch.no_grad():
# if input_style == "Conv":
# conversation = json.loads(conversation)
# text = conv_template.render(document=document, conversation=conversation)
# elif input_style == "NLI":
# text = nli_template.render(document=document, claim=claim)
# elif input_style == "QA format":
# text = qa_template.render(document=document, question=question, answer=answer)
# print(text)
# outputs = model_pipeline(text)
# results = f"Reliability Judge Outputs: {outputs}"
# return results
# Create the interface using gr.Blocks
with gr.Blocks() as demo:
with gr.Row():
input_style_dropdown = gr.Dropdown(label="Input Style", choices=["Conv", "NLI", "QA format"], value="Conv", visible=True)
with gr.Row():
document_input = gr.Textbox(label="Document", lines=5, visible=True, value="Chris Voss is one of the best negotiators in the world. And he was born in Iowa, USA.")
conversation_input = gr.Textbox(label="Conversation", lines=5, visible=True, value='[{"role": "user", "content": "Hi Chris Voss, Where are you born?"}, {"role": "assistant", "content": "I am born in Iowa"}]')
claim_input = gr.Textbox(label="Claim", lines=5, visible=False, value="CV was born in Iowa")
question_input = gr.Textbox(label="Question", lines=5, visible=False, value="Where is Chris Voss born?")
answer_input = gr.Textbox(label="Answer", lines=5, visible=False, value="CV was born in Iowa")
with gr.Row():
result_output = gr.Textbox(label="Results")
# Set the visibility of inputs based on the selected input style
input_style_dropdown.change(
fn=update_inputs,
inputs=[input_style_dropdown],
outputs=[document_input, conversation_input, claim_input, question_input, answer_input]
)
# Set the function to handle the reliability check
gr.Button("Submit").click(
fn=judge_reliability,
inputs=[input_style_dropdown, document_input, conversation_input, claim_input, question_input, answer_input],
outputs=result_output
)
# Launch the demo
if __name__ == "__main__":
demo.launch()
|