Rahatara's picture
Create app.py
ba459e3 verified
raw
history blame
6.37 kB
import os
import time
from typing import List, Tuple, Optional
import google.generativeai as genai
import gradio as gr
from PIL import Image
# Ensure Google API Key is set
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
TITLE = """<h1 align="center">β˜• Espresso with LeProf πŸ”₯</h1>"""
SUBTITLE = """<h2 align="center">🌟 Knowledge Shots for Curious Minds</h2>"""
IMAGE_WIDTH = 512
def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
return [seq.strip() for seq in stop_sequences.split(",")] if stop_sequences else None
def preprocess_image(image: Image.Image) -> Image.Image:
image_height = int(image.height * IMAGE_WIDTH / image.width)
return image.resize((IMAGE_WIDTH, image_height))
def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
"""Handles user input and appends to the chatbot."""
return "", chatbot + [[text_prompt, None]]
def bot(
google_key: str,
image_prompt: Optional[Image.Image],
temperature: float,
max_output_tokens: int,
stop_sequences: str,
top_k: int,
top_p: float,
topic: str,
chatbot: List[Tuple[str, str]],
):
"""Generates a response using Google Gemini."""
google_key = google_key or GOOGLE_API_KEY
if not google_key:
raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
if not topic:
raise ValueError("Topic is not set. Please provide a topic.")
# Get the user's input
text_prompt = chatbot[-1][0]
# Construct the system prompt
analysis_system_prompt = (
f"You are an expert in {topic}. Analyze the provided text with a focus on {topic}, "
"identifying recent issues, insights, or improvements relevant to academic standards and effectiveness. "
"Offer actionable advice for enhancing knowledge and provide real-life examples."
)
# Configure Generative AI model
genai.configure(api_key=google_key)
generation_config = genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_output_tokens,
stop_sequences=preprocess_stop_sequences(stop_sequences),
top_k=top_k,
top_p=top_p,
)
model_name = "gemini-1.5-pro-latest"
model = genai.GenerativeModel(model_name)
try:
# Pass the system prompt and user input as a single prompt
response = model.generate_content(
prompt=analysis_system_prompt,
input=text_prompt,
generation_config=generation_config,
)
except KeyError as e:
raise KeyError(f"Error in response generation: {e}")
# Process and stream the response
chatbot[-1][1] = ""
for chunk in response:
for i in range(0, len(chunk.text), 10):
chatbot[-1][1] += chunk.text[i : i + 10]
time.sleep(0.01)
yield chatbot
# Gradio Components
google_key_component = gr.Textbox(
label="GOOGLE API KEY",
type="password",
placeholder="Enter your API key...",
visible=GOOGLE_API_KEY is None,
)
topic_input = gr.Textbox(label="Set the Topic", placeholder="e.g., AI in Education, Human-Computer Interaction")
text_prompt_component = gr.Textbox(label="Ask LeProf", placeholder="Type your question here...")
chatbot_component = gr.Chatbot(label="LeProf says")
run_button_component = gr.Button("πŸ«— Get Your Knowledge Shot")
example_data = [
["AI in Education", "What are the challenges in AI tools for personalized learning?"],
["Multimedia Accessibility", "How can multimedia be made more accessible to people with disabilities?"],
["Ethical AI", "What are the ethical implications of AI in social media content moderation?"],
["Virtual Reality", "How does virtual reality improve skill training in industries?"],
["Augmented Reality", "What are the UX challenges in augmented reality for urban navigation?"],
]
# Advanced Settings
temperature_component = gr.Slider(
minimum=0, maximum=1.0, value=0.4, step=0.05, label="Creativity Level"
)
max_output_tokens_component = gr.Slider(
minimum=1, maximum=2048, value=1024, step=1, label="Max Tokens"
)
stop_sequences_component = gr.Textbox(label="Stop Sequences", placeholder="e.g., STOP, END")
top_k_component = gr.Slider(
minimum=1, maximum=40, value=32, step=1, label="Top-K Sampling"
)
top_p_component = gr.Slider(
minimum=0, maximum=1.0, value=1.0, step=0.01, label="Top-P Sampling"
)
# Layout with Gradio Blocks
with gr.Blocks() as demo:
gr.HTML(TITLE)
gr.HTML(SUBTITLE)
google_key_component.render()
topic_input.render()
chatbot_component.render()
text_prompt_component.render()
gr.Examples(
examples=example_data,
inputs=[topic_input, text_prompt_component],
label="Example Questions",
)
run_button_component.render()
with gr.Accordion("Parameters", open=False):
temperature_component.render()
max_output_tokens_component.render()
stop_sequences_component.render()
with gr.Accordion("Advanced Settings", open=False):
top_k_component.render()
top_p_component.render()
# Event Handlers
run_button_component.click(
fn=user, inputs=[text_prompt_component, chatbot_component], outputs=[text_prompt_component, chatbot_component], queue=False
).then(
fn=bot,
inputs=[
google_key_component,
None, # Placeholder for image_prompt (not used in this example)
temperature_component,
max_output_tokens_component,
stop_sequences_component,
top_k_component,
top_p_component,
topic_input,
chatbot_component,
],
outputs=[chatbot_component]
)
text_prompt_component.submit(
fn=user, inputs=[text_prompt_component, chatbot_component], outputs=[text_prompt_component, chatbot_component], queue=False
).then(
fn=bot,
inputs=[
google_key_component,
None, # Placeholder for image_prompt (not used in this example)
temperature_component,
max_output_tokens_component,
stop_sequences_component,
top_k_component,
top_p_component,
topic_input,
chatbot_component,
],
outputs=[chatbot_component]
)
# Launch the App
demo.launch()