import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # Initialize the model and tokenizer try: tokenizer = AutoTokenizer.from_pretrained("satvikag/chatbot") model = AutoModelForCausalLM.from_pretrained("satvikag/chatbot") chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) except Exception as e: print(f"Error initializing model: {e}") chat_pipeline = None def ai_vote(poll_title, choices, num_ais): if chat_pipeline is None: return "Error: Model not initialized.", "" # Initialize results and explanations results = {choice: 0 for choice in choices} explanations = [] # Loop through the number of AIs to get responses for i in range(num_ais): input_text = f"Poll Title: {poll_title}\nChoices: {', '.join(choices)}\nChoose the best option and explain why." try: response = chat_pipeline(input_text, max_length=150, num_return_sequences=1)[0]['generated_text'] for choice in choices: if choice.lower() in response.lower(): results[choice] += 1 explanation = response.split("\n", 1)[-1].strip() # Extract explanation explanations.append((choice, explanation)) break except Exception as e: return f"Error: {str(e)}", "" # Convert results to HTML for styled output styled_results = f"