Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,108 +1,50 @@
|
|
1 |
import os
|
2 |
-
import time
|
3 |
-
from typing import List, Tuple, Optional
|
4 |
-
|
5 |
import google.generativeai as genai
|
6 |
import gradio as gr
|
7 |
from PIL import Image
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
TITLE = """<h1 align="center">🕹️ Gemini Against Gender Based Discrimination🔥</h1>"""
|
14 |
-
SUBTITLE = """<h2 align="center">🎨Check the discrimination in communication</h2>"""
|
15 |
-
|
16 |
-
|
17 |
-
IMAGE_WIDTH = 512
|
18 |
-
|
19 |
-
def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
|
20 |
-
return [seq.strip() for seq in stop_sequences.split(",")] if stop_sequences else None
|
21 |
-
|
22 |
-
def preprocess_image(image: Image.Image) -> Image.Image:
|
23 |
-
image_height = int(image.height * IMAGE_WIDTH / image.width)
|
24 |
-
return image.resize((IMAGE_WIDTH, image_height))
|
25 |
-
|
26 |
-
def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
|
27 |
-
return "", chatbot + [["Analyze this text for gender-based discrimination, including implicit biases and stereotypes. Provide specific examples and explain why each example demonstrates bias. Also, suggest tips for how to address or mitigate these biases within the text.", None]]
|
28 |
-
|
29 |
-
def bot(
|
30 |
-
google_key: str,
|
31 |
-
image_prompt: Optional[Image.Image],
|
32 |
-
temperature: float,
|
33 |
-
max_output_tokens: int,
|
34 |
-
stop_sequences: str,
|
35 |
-
top_k: int,
|
36 |
-
top_p: float,
|
37 |
-
chatbot: List[Tuple[str, str]]
|
38 |
-
):
|
39 |
-
google_key = google_key or GOOGLE_API_KEY
|
40 |
-
if not google_key:
|
41 |
-
raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
|
42 |
-
|
43 |
-
text_prompt = chatbot[-1][0]
|
44 |
-
genai.configure(api_key=google_key)
|
45 |
generation_config = genai.types.GenerationConfig(
|
46 |
-
temperature=
|
47 |
-
max_output_tokens=
|
48 |
-
stop_sequences=
|
49 |
-
top_k=
|
50 |
-
top_p=
|
51 |
-
#instructions = "You are an expert stylist"
|
52 |
)
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
label="GOOGLE API KEY",
|
70 |
-
type="password",
|
71 |
-
placeholder="...",
|
72 |
-
visible=GOOGLE_API_KEY is None
|
73 |
-
)
|
74 |
-
|
75 |
-
image_prompt_component = gr.Image(type="pil", label="Image")
|
76 |
-
chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False)
|
77 |
-
text_prompt_component = gr.Textbox(placeholder="Write your text here", label="Ask me anything and press Enter")
|
78 |
-
run_button_component = gr.Button("Run")
|
79 |
-
temperature_component = gr.Slider(minimum=0, maximum=1.0, value=0.4, step=0.05, label="Temperature")
|
80 |
-
max_output_tokens_component = gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Token limit")
|
81 |
-
stop_sequences_component = gr.Textbox(label="Add stop sequence", placeholder="STOP, END")
|
82 |
-
top_k_component = gr.Slider(minimum=1, maximum=40, value=32, step=1, label="Top-K")
|
83 |
-
top_p_component = gr.Slider(minimum=0, maximum=1, value=1, step=0.01, label="Top-P")
|
84 |
-
|
85 |
-
user_inputs = [text_prompt_component, chatbot_component]
|
86 |
-
bot_inputs = [google_key_component, image_prompt_component, temperature_component, max_output_tokens_component, stop_sequences_component, top_k_component, top_p_component, chatbot_component]
|
87 |
-
|
88 |
-
with gr.Blocks() as demo:
|
89 |
-
gr.HTML(TITLE)
|
90 |
-
gr.HTML(SUBTITLE)
|
91 |
-
with gr.Column():
|
92 |
-
google_key_component.render()
|
93 |
-
with gr.Row():
|
94 |
-
image_prompt_component.render()
|
95 |
-
chatbot_component.render()
|
96 |
-
text_prompt_component.render()
|
97 |
-
run_button_component.render()
|
98 |
-
with gr.Accordion("Parameters", open=False):
|
99 |
-
temperature_component.render()
|
100 |
-
max_output_tokens_component.render()
|
101 |
-
stop_sequences_component.render()
|
102 |
-
with gr.Accordion("Advanced", open=False):
|
103 |
-
top_k_component.render()
|
104 |
-
top_p_component.render()
|
105 |
-
|
106 |
-
run_button_component.click(fn=user, inputs=user_inputs, outputs=[text_prompt_component, chatbot_component], queue=False).then(fn=bot, inputs=bot_inputs, outputs=[chatbot_component])
|
107 |
-
text_prompt_component.submit(fn=user, inputs=user_inputs, outputs=[text_prompt_component, chatbot_component], queue=False).then(fn=bot, inputs=bot_inputs, outputs=[chatbot_component])
|
108 |
-
demo.launch()
|
|
|
1 |
import os
|
|
|
|
|
|
|
2 |
import google.generativeai as genai
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
+
import moviepy.editor as mp
|
6 |
|
7 |
+
# Configure Google API Key and model
|
|
|
8 |
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
9 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
10 |
+
model = genai.GenerativeModel("gemini-1.5-pro-latest")
|
11 |
+
|
12 |
+
# Analysis function
|
13 |
+
def analyze_content(content):
|
14 |
+
if isinstance(content, str): # Text content
|
15 |
+
prompt = f"Analyze this text for any instances of gender-based discrimination and provide tips: {content}"
|
16 |
+
elif isinstance(content, Image.Image): # Image content
|
17 |
+
content = content.convert("RGB") # Convert image to RGB
|
18 |
+
prompt = "Analyze this image for any instances of gender-based discrimination and provide tips."
|
19 |
+
content = [prompt, content] # The model expects list inputs for images
|
20 |
+
else: # Video content
|
21 |
+
prompt = "Analyze this video for any instances of gender-based discrimination and provide tips."
|
22 |
+
clip = mp.VideoFileClip(content.name)
|
23 |
+
frame = clip.get_frame(1) # Get a frame at t=1 second
|
24 |
+
image = Image.fromarray(frame)
|
25 |
+
image = image.convert("RGB")
|
26 |
+
content = [prompt, image] # Use a single frame for analysis
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
generation_config = genai.types.GenerationConfig(
|
29 |
+
temperature=0.5,
|
30 |
+
max_output_tokens=300,
|
31 |
+
stop_sequences=["\n"],
|
32 |
+
top_k=40,
|
33 |
+
top_p=0.9
|
|
|
34 |
)
|
35 |
+
response = model.generate_content(content, generation_config=generation_config)
|
36 |
+
return response.text if response else "No response generated."
|
37 |
+
|
38 |
+
# Gradio interface setup
|
39 |
+
with gr.Blocks() as app:
|
40 |
+
with gr.Tab("Upload Content"):
|
41 |
+
input_content = gr.DataInput(label="Upload text, image, or video")
|
42 |
+
output_analysis = gr.Textbox(label="Discrimination Analysis Output")
|
43 |
+
analyze_button = gr.Button("Analyze Discrimination")
|
44 |
+
analyze_button.click(
|
45 |
+
fn=analyze_content,
|
46 |
+
inputs=input_content,
|
47 |
+
outputs=output_analysis
|
48 |
+
)
|
49 |
+
|
50 |
+
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|