File size: 2,691 Bytes
6ffa12b 6547b17 6ffa12b 6547b17 6ffa12b 6547b17 6ffa12b 8b7429d 6547b17 6ffa12b 6547b17 c5d3991 6ffa12b 8b7429d 6547b17 6ffa12b 8b7429d 6547b17 8b7429d 6547b17 c5d3991 6ffa12b c5d3991 6ffa12b c5d3991 6ffa12b c5d3991 6ffa12b 8b7429d 6ffa12b c5d3991 6ffa12b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Initialize the model and tokenizer
try:
tokenizer = AutoTokenizer.from_pretrained("satvikag/chatbot")
model = AutoModelForCausalLM.from_pretrained("satvikag/chatbot")
chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
except Exception as e:
print(f"Error initializing model: {e}")
chat_pipeline = None
def ai_vote(poll_title, choices, num_ais):
if chat_pipeline is None:
return "Error: Model not initialized.", ""
# Initialize results and explanations
results = {choice: 0 for choice in choices}
explanations = []
# Loop through the number of AIs to get responses
for i in range(num_ais):
input_text = f"Poll Title: {poll_title}\nChoices: {', '.join(choices)}\nChoose the best option and explain why."
try:
response = chat_pipeline(input_text, max_length=150, num_return_sequences=1)[0]['generated_text']
for choice in choices:
if choice.lower() in response.lower():
results[choice] += 1
explanation = response.split("\n", 1)[-1].strip() # Extract explanation
explanations.append((choice, explanation))
break
except Exception as e:
return f"Error: {str(e)}", ""
# Convert results to HTML for styled output
styled_results = f"<h2>{poll_title}</h2>"
styled_results += "<ul>"
for choice, votes in results.items():
styled_results += f"<li><strong>{choice}</strong>: {votes} votes</li>"
styled_results += "</ul>"
# Add explanations in HTML
styled_results += "<h3>AI Explanations:</h3><ul>"
for choice, explanation in explanations:
styled_results += f"<li><strong>{choice}:</strong> {explanation}</li>"
styled_results += "</ul>"
return styled_results, explanations
def gradio_interface(title, choices, num_ais):
try:
choices = [choice.strip() for choice in choices.split(",")]
styled_results, explanations = ai_vote(title, choices, num_ais)
return styled_results, explanations
except Exception as e:
return f"Error: {str(e)}", ""
# Gradio Interface
interface = gr.Interface(
fn=gradio_interface,
inputs=[
gr.Textbox(label="Poll Title"),
gr.Textbox(label="Choices (comma-separated)"),
gr.Slider(label="Number of AIs", minimum=1, maximum=10, step=1)
],
outputs=[
gr.HTML(label="Poll Results"), # Styled Output
gr.Textbox(label="Raw AI Explanations")
]
)
if __name__ == "__main__":
interface.launch() |