Spaces:
Sleeping
Sleeping
import gradio as gr | |
import google.generativeai as genai | |
import os | |
# Set up Gemini API key (use environment variable on Hugging Face) | |
API_KEY = os.getenv("GEMINI_API_KEY") # Set this in Hugging Face secrets | |
genai.configure(api_key=API_KEY) | |
# Use Gemini 1.5 Flash (free-tier accessible) | |
model = genai.GenerativeModel("gemini-1.5-flash") | |
def chatbot(prompt, history=[]): | |
"""Generates a chatbot response using the free-tier Gemini API.""" | |
try: | |
response = model.generate_content(prompt) | |
return response.text | |
except Exception as e: | |
return f"Error: {e}" | |
def vqa(image, question): | |
"""Performs Visual Question Answering (VQA) using the Gemini API.""" | |
try: | |
response = model.generate_content([question, image]) | |
return response.text | |
except Exception as e: | |
return f"Error: {e}" | |
# Create Gradio interfaces | |
chat_interface = gr.ChatInterface( | |
chatbot, | |
title="Free Gemini API Chatbot", | |
description="Ae chatbot powered by the free-tier Google Gemini API." | |
) | |
vqa_interface = gr.Interface( | |
fn=vqa, | |
inputs=[gr.Image(type="filepath"), gr.Textbox(label="Question")], | |
outputs=gr.Textbox(label="Answer"), | |
title="Visual Question Answering (VQA)", | |
description="Upload an image and ask a question about it." | |
) | |
# Combine interfaces | |
with gr.Blocks() as demo: | |
gr.TabbedInterface([chat_interface, vqa_interface], ["Chatbot", "VQA"]) | |
if __name__ == "__main__": | |
demo.launch() | |