import gradio as gr import pixeltable as pxt from pixeltable.functions.mistralai import chat_completions from datetime import datetime from textblob import TextBlob import re import nltk from nltk.tokenize import word_tokenize from nltk.corpus import stopwords import os import getpass # Ensure necessary NLTK data is downloaded nltk.download('punkt', quiet=True) nltk.download('stopwords', quiet=True) nltk.download('punkt_tab', quiet=True) # Set up Mistral API key if 'MISTRAL_API_KEY' not in os.environ: os.environ['MISTRAL_API_KEY'] = getpass.getpass('Mistral AI API Key:') # Define UDFs @pxt.udf def get_sentiment_score(text: str) -> float: return TextBlob(text).sentiment.polarity @pxt.udf def extract_keywords(text: str, num_keywords: int = 5) -> list: stop_words = set(stopwords.words('english')) words = word_tokenize(text.lower()) keywords = [word for word in words if word.isalnum() and word not in stop_words] return sorted(set(keywords), key=keywords.count, reverse=True)[:num_keywords] @pxt.udf def calculate_readability(text: str) -> float: words = len(re.findall(r'\w+', text)) sentences = len(re.findall(r'\w+[.!?]', text)) or 1 average_words_per_sentence = words / sentences return 206.835 - 1.015 * average_words_per_sentence # Function to run inference and analysis def run_inference_and_analysis(task, system_prompt, input_text, temperature, top_p, max_tokens, stop, random_seed, safe_prompt): # Initialize Pixeltable pxt.drop_table('mistral_prompts', ignore_errors=True) t = pxt.create_table('mistral_prompts', { 'task': pxt.String, 'system': pxt.String, 'input_text': pxt.String, 'timestamp': pxt.Timestamp, 'temperature': pxt.Float, 'top_p': pxt.Float, 'max_tokens': pxt.Int, 'stop': pxt.String, 'random_seed': pxt.Int, 'safe_prompt': pxt.Bool }) # Insert new row into Pixeltable t.insert([{ 'task': task, 'system': system_prompt, 'input_text': input_text, 'timestamp': datetime.now(), 'temperature': temperature, 'top_p': top_p, 'max_tokens': max_tokens, 'stop': stop, 'random_seed': random_seed, 'safe_prompt': safe_prompt }]) # Define messages for chat completion msgs = [ {'role': 'system', 'content': t.system}, {'role': 'user', 'content': t.input_text} ] common_params = { 'messages': msgs, 'temperature': temperature, 'top_p': top_p, 'max_tokens': max_tokens if max_tokens is not None else 300, 'stop': stop.split(',') if stop else None, 'random_seed': random_seed, 'safe_prompt': safe_prompt } # Add computed columns for model responses and analysis t.add_computed_column(open_mistral_nemo=chat_completions(model='open-mistral-nemo', **common_params)) t.add_computed_column(mistral_medium=chat_completions(model='mistral-medium', **common_params)) # Extract responses t.add_computed_column(omn_response=t.open_mistral_nemo.choices[0].message.content.astype(pxt.String)) t.add_computed_column(ml_response=t.mistral_medium.choices[0].message.content.astype(pxt.String)) # Add computed columns for analysis t.add_computed_column(large_sentiment_score=get_sentiment_score(t.ml_response)) t.add_computed_column(large_keywords=extract_keywords(t.ml_response)) t.add_computed_column(large_readability_score=calculate_readability(t.ml_response)) t.add_computed_column(open_sentiment_score=get_sentiment_score(t.omn_response)) t.add_computed_column(open_keywords=extract_keywords(t.omn_response)) t.add_computed_column(open_readability_score=calculate_readability(t.omn_response)) # Retrieve results results = t.select( t.omn_response, t.ml_response, t.large_sentiment_score, t.open_sentiment_score, t.large_keywords, t.open_keywords, t.large_readability_score, t.open_readability_score ).tail(1) history = t.select(t.timestamp, t.task, t.system, t.input_text).order_by(t.timestamp, asc=False).collect().to_pandas() responses = t.select(t.timestamp, t.omn_response, t.ml_response).order_by(t.timestamp, asc=False).collect().to_pandas() analysis = t.select( t.timestamp, t.open_sentiment_score, t.large_sentiment_score, t.open_keywords, t.large_keywords, t.open_readability_score, t.large_readability_score ).order_by(t.timestamp, asc=False).collect().to_pandas() params = t.select( t.timestamp, t.temperature, t.top_p, t.max_tokens, t.stop, t.random_seed, t.safe_prompt ).order_by(t.timestamp, asc=False).collect().to_pandas() return ( results['omn_response'][0], results['ml_response'][0], results['large_sentiment_score'][0], results['open_sentiment_score'][0], results['large_keywords'][0], results['open_keywords'][0], results['large_readability_score'][0], results['open_readability_score'][0], history, responses, analysis, params ) def gradio_interface(): with gr.Blocks(theme=gr.themes.Base(), title="Pixeltable LLM Studio") as demo: # Enhanced Header with Branding gr.HTML("""
Pixeltable

LLM Studio

Powered by Pixeltable's Unified AI Data Infrastructure

""") # Product Overview Cards with gr.Row(): with gr.Column(): gr.HTML("""

🚀 Why Pixeltable?

""") with gr.Column(): gr.HTML("""

💡 Features

""") # Main Interface with gr.Tabs() as tabs: with gr.TabItem("🎯 Experiment", id=0): with gr.Row(): with gr.Column(scale=1): gr.HTML("""

Experiment Setup

Configure your prompt engineering experiment below

""") task = gr.Textbox( label="Task Category", placeholder="e.g., Sentiment Analysis, Text Generation, Summarization", elem_classes="input-style" ) system_prompt = gr.Textbox( label="System Prompt", placeholder="Define the AI's role and task...", lines=3, elem_classes="input-style" ) input_text = gr.Textbox( label="Input Text", placeholder="Enter your prompt or text to analyze...", lines=4, elem_classes="input-style" ) with gr.Accordion("🛠️ Advanced Settings", open=False): temperature = gr.Slider(minimum=0, maximum=1, value=0.7, step=0.1, label="Temperature") top_p = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.1, label="Top P") max_tokens = gr.Number(label="Max Tokens", value=300) stop = gr.Textbox(label="Stop Sequences (comma-separated)") random_seed = gr.Number(label="Random Seed", value=None) safe_prompt = gr.Checkbox(label="Safe Prompt", value=False) submit_btn = gr.Button( "🚀 Run Analysis", variant="primary", scale=1, min_width=200 ) with gr.Column(scale=1): gr.HTML("""

Results

Compare model outputs and analysis metrics

""") with gr.Group(): omn_response = gr.Textbox( label="Open-Mistral-Nemo Response", elem_classes="output-style" ) ml_response = gr.Textbox( label="Mistral-Medium Response", elem_classes="output-style" ) with gr.Group(): with gr.Row(): with gr.Column(): gr.HTML("

📊 Sentiment Analysis

") large_sentiment = gr.Number(label="Mistral-Medium") open_sentiment = gr.Number(label="Open-Mistral-Nemo") with gr.Column(): gr.HTML("

📈 Readability Scores

") large_readability = gr.Number(label="Mistral-Medium") open_readability = gr.Number(label="Open-Mistral-Nemo") gr.HTML("

🔑 Key Terms

") with gr.Row(): large_keywords = gr.Textbox(label="Mistral-Medium Keywords") open_keywords = gr.Textbox(label="Open-Mistral-Nemo Keywords") with gr.TabItem("📊 History & Analysis", id=1): with gr.Tabs(): with gr.TabItem("Prompt History"): history = gr.DataFrame( headers=["Timestamp", "Task", "System Prompt", "Input Text"], wrap=True, elem_classes="table-style" ) with gr.TabItem("Model Responses"): responses = gr.DataFrame( headers=["Timestamp", "Open-Mistral-Nemo", "Mistral-Medium"], wrap=True, elem_classes="table-style" ) with gr.TabItem("Analysis Results"): analysis = gr.DataFrame( headers=[ "Timestamp", "Open-Mistral-Nemo Sentiment", "Mistral-Medium Sentiment", "Open-Mistral-Nemo Keywords", "Mistral-Medium Keywords", "Open-Mistral-Nemo Readability", "Mistral-Medium Readability" ], wrap=True, elem_classes="table-style" ) with gr.TabItem("Model Parameters"): params = gr.DataFrame( headers=[ "Timestamp", "Temperature", "Top P", "Max Tokens", "Stop Sequences", "Random Seed", "Safe Prompt" ], wrap=True, elem_classes="table-style" ) # Footer with links and additional info gr.HTML("""

Built with Pixeltable

The unified data infrastructure for AI applications

📚 Documentation 💻 GitHub 💬 Community
""") # Custom CSS gr.HTML(""" """) submit_btn.click( run_inference_and_analysis, inputs=[ task, system_prompt, input_text, temperature, top_p, max_tokens, stop, random_seed, safe_prompt ], outputs=[ omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords, large_readability, open_readability, history, responses, analysis, params # Added params here ] ) return demo if __name__ == "__main__": gradio_interface().launch()