Spaces:
Running
Running
import os | |
import google.generativeai as genai | |
import gradio as gr | |
from PIL import Image | |
import moviepy.editor as mp | |
# Configure Google API Key and model | |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY") | |
genai.configure(api_key=GOOGLE_API_KEY) | |
model = genai.GenerativeModel("gemini-1.5-pro-latest") | |
# Analysis function | |
def analyze_content(content): | |
if isinstance(content, str): # Text content | |
prompt = f"Analyze this text for any instances of gender-based discrimination and provide tips: {content}" | |
elif isinstance(content, Image.Image): # Image content | |
content = content.convert("RGB") # Convert image to RGB | |
prompt = "Analyze this image for any instances of gender-based discrimination and provide tips." | |
content = [prompt, content] # The model expects list inputs for images | |
else: # Video content | |
prompt = "Analyze this video for any instances of gender-based discrimination and provide tips." | |
clip = mp.VideoFileClip(content.name) | |
frame = clip.get_frame(1) # Get a frame at t=1 second | |
image = Image.fromarray(frame) | |
image = image.convert("RGB") | |
content = [prompt, image] # Use a single frame for analysis | |
generation_config = genai.types.GenerationConfig( | |
temperature=0.5, | |
max_output_tokens=300, | |
stop_sequences=["\n"], | |
top_k=40, | |
top_p=0.9 | |
) | |
response = model.generate_content(content, generation_config=generation_config) | |
return response.text if response else "No response generated." | |
# Gradio interface setup | |
with gr.Blocks() as app: | |
with gr.Tab("Upload Content"): | |
input_content = gr.DataInput(label="Upload text, image, or video") | |
output_analysis = gr.Textbox(label="Discrimination Analysis Output") | |
analyze_button = gr.Button("Analyze Discrimination") | |
analyze_button.click( | |
fn=analyze_content, | |
inputs=input_content, | |
outputs=output_analysis | |
) | |
app.launch() | |