resolverkatla commited on
Commit
236cd54
·
1 Parent(s): f7ed8cb
Files changed (2) hide show
  1. app.py +48 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import google.generativeai as genai
3
+ import os
4
+
5
+ # Set up Gemini API key (use environment variable on Hugging Face)
6
+ API_KEY = os.getenv("GEMINI_API_KEY") # Set this in Hugging Face secrets
7
+ genai.configure(api_key=API_KEY)
8
+
9
+ # Use Gemini 1.5 Flash (free-tier accessible)
10
+ model = genai.GenerativeModel("gemini-1.5-flash")
11
+
12
+ def chatbot(prompt, history=[]):
13
+ """Generates a chatbot response using the free-tier Gemini API."""
14
+ try:
15
+ response = model.generate_content(prompt)
16
+ return response.text
17
+ except Exception as e:
18
+ return f"Error: {e}"
19
+
20
+ def vqa(image, question):
21
+ """Performs Visual Question Answering (VQA) using the Gemini API."""
22
+ try:
23
+ response = model.generate_content([question, image])
24
+ return response.text
25
+ except Exception as e:
26
+ return f"Error: {e}"
27
+
28
+ # Create Gradio interfaces
29
+ chat_interface = gr.ChatInterface(
30
+ chatbot,
31
+ title="Free Gemini API Chatbot",
32
+ description="Ae chatbot powered by the free-tier Google Gemini API."
33
+ )
34
+
35
+ vqa_interface = gr.Interface(
36
+ fn=vqa,
37
+ inputs=[gr.Image(type="filepath"), gr.Textbox(label="Question")],
38
+ outputs=gr.Textbox(label="Answer"),
39
+ title="Visual Question Answering (VQA)",
40
+ description="Upload an image and ask a question about it."
41
+ )
42
+
43
+ # Combine interfaces
44
+ with gr.Blocks() as demo:
45
+ gr.TabbedInterface([chat_interface, vqa_interface], ["Chatbot", "VQA"])
46
+
47
+ if __name__ == "__main__":
48
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ google-generativeai
2
+ gradio