Rahatara commited on
Commit
7ee831c
·
verified ·
1 Parent(s): a4be152

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -31
app.py CHANGED
@@ -1,17 +1,21 @@
1
  import os
2
  import time
3
  from typing import List, Tuple, Optional
 
4
  import google.generativeai as genai
5
  import gradio as gr
6
  from PIL import Image
7
 
8
- # Environment variable for Google API Key
 
9
  GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
10
 
11
- # Constants for image processing
 
 
 
12
  IMAGE_WIDTH = 512
13
 
14
- # Helper functions
15
  def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
16
  return [seq.strip() for seq in stop_sequences.split(",")] if stop_sequences else None
17
 
@@ -19,61 +23,87 @@ def preprocess_image(image: Image.Image) -> Image.Image:
19
  image_height = int(image.height * IMAGE_WIDTH / image.width)
20
  return image.resize((IMAGE_WIDTH, image_height))
21
 
22
- # Function to trigger model invocation
 
 
23
  def bot(
24
  google_key: str,
25
  image_prompt: Optional[Image.Image],
 
 
 
 
 
26
  chatbot: List[Tuple[str, str]]
27
  ):
28
  google_key = google_key or GOOGLE_API_KEY
29
  if not google_key:
30
  raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
31
-
32
- # Fixed text prompt for analyzing gender-based discrimination
33
- text_prompt = "Analyze this for any instances of gender-based discrimination. Consider both explicit and implicit biases, stereotypes, and unequal treatment. Provide specific examples from the text to support your analysis."
34
 
 
35
  genai.configure(api_key=google_key)
36
  generation_config = genai.types.GenerationConfig(
37
- temperature=0.4,
38
- max_output_tokens=1024,
39
- stop_sequences=preprocess_stop_sequences("STOP, END"),
40
- top_k=32,
41
- top_p=1.0,
 
42
  )
43
 
44
  model_name = "gemini-1.5-pro-latest"
45
  model = genai.GenerativeModel(model_name)
46
- inputs = [text_prompt, preprocess_image(image_prompt)] if image_prompt else [text_prompt]
47
 
48
  response = model.generate_content(inputs, stream=True, generation_config=generation_config)
49
  response.resolve()
50
 
51
  chatbot[-1][1] = ""
52
  for chunk in response:
53
- chatbot[-1][1] += chunk.text
54
- time.sleep(0.01)
55
- return chatbot
 
 
 
 
 
 
 
 
56
 
57
- # Gradio Interface Components
58
- google_key_component = gr.Textbox(label="GOOGLE API KEY", type="password", placeholder="...", visible=GOOGLE_API_KEY is None)
59
- image_prompt_component = gr.Image(type="pil")
60
  chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False)
61
- run_button_component = gr.Button("Check Discrimination")
 
 
 
 
 
 
 
 
 
62
 
63
- # Layout and Interaction
64
  with gr.Blocks() as demo:
 
 
 
65
  with gr.Column():
66
  google_key_component.render()
67
- image_prompt_component.render()
 
 
 
68
  run_button_component.render()
69
- chatbot_component.render()
70
-
71
- # Connect button to the bot function
72
- run_button_component.click(
73
- fn=bot,
74
- inputs=[google_key_component, image_prompt_component, chatbot_component],
75
- outputs=[chatbot_component]
76
- )
77
 
78
- # Launch the app
 
79
  demo.launch()
 
1
  import os
2
  import time
3
  from typing import List, Tuple, Optional
4
+
5
  import google.generativeai as genai
6
  import gradio as gr
7
  from PIL import Image
8
 
9
+ print("google-generativeai:", genai.__version__)
10
+
11
  GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
12
 
13
+ TITLE = """<h1 align="center">🕹️ Google Gemini Chatbot 🔥</h1>"""
14
+ SUBTITLE = """<h2 align="center">🎨Create with Multimodal Gemini</h2>"""
15
+
16
+
17
  IMAGE_WIDTH = 512
18
 
 
19
  def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]:
20
  return [seq.strip() for seq in stop_sequences.split(",")] if stop_sequences else None
21
 
 
23
  image_height = int(image.height * IMAGE_WIDTH / image.width)
24
  return image.resize((IMAGE_WIDTH, image_height))
25
 
26
+ def user(text_prompt: str, chatbot: List[Tuple[str, str]]):
27
+ return "", chatbot + [["Analyze this text for gender-based discrimination, including implicit biases and stereotypes. Provide specific examples and explain why each example demonstrates bias. Also, suggest tips for how to address or mitigate these biases within the text.", None]]
28
+
29
  def bot(
30
  google_key: str,
31
  image_prompt: Optional[Image.Image],
32
+ temperature: float,
33
+ max_output_tokens: int,
34
+ stop_sequences: str,
35
+ top_k: int,
36
+ top_p: float,
37
  chatbot: List[Tuple[str, str]]
38
  ):
39
  google_key = google_key or GOOGLE_API_KEY
40
  if not google_key:
41
  raise ValueError("GOOGLE_API_KEY is not set. Please set it up.")
 
 
 
42
 
43
+ text_prompt = chatbot[-1][0]
44
  genai.configure(api_key=google_key)
45
  generation_config = genai.types.GenerationConfig(
46
+ temperature=temperature,
47
+ max_output_tokens=max_output_tokens,
48
+ stop_sequences=preprocess_stop_sequences(stop_sequences),
49
+ top_k=top_k,
50
+ top_p=top_p,
51
+ #instructions = "You are an expert stylist"
52
  )
53
 
54
  model_name = "gemini-1.5-pro-latest"
55
  model = genai.GenerativeModel(model_name)
56
+ inputs = [text_prompt] if image_prompt is None else [text_prompt, preprocess_image(image_prompt)]
57
 
58
  response = model.generate_content(inputs, stream=True, generation_config=generation_config)
59
  response.resolve()
60
 
61
  chatbot[-1][1] = ""
62
  for chunk in response:
63
+ for i in range(0, len(chunk.text), 10):
64
+ chatbot[-1][1] += chunk.text[i:i + 10]
65
+ time.sleep(0.01)
66
+ yield chatbot
67
+
68
+ google_key_component = gr.Textbox(
69
+ label="GOOGLE API KEY",
70
+ type="password",
71
+ placeholder="...",
72
+ visible=GOOGLE_API_KEY is None
73
+ )
74
 
75
+ image_prompt_component = gr.Image(type="pil", label="Image")
 
 
76
  chatbot_component = gr.Chatbot(label='Gemini', bubble_full_width=False)
77
+ text_prompt_component = gr.Textbox(placeholder="Write your text here", label="Ask me anything and press Enter")
78
+ run_button_component = gr.Button("Run")
79
+ temperature_component = gr.Slider(minimum=0, maximum=1.0, value=0.4, step=0.05, label="Temperature")
80
+ max_output_tokens_component = gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Token limit")
81
+ stop_sequences_component = gr.Textbox(label="Add stop sequence", placeholder="STOP, END")
82
+ top_k_component = gr.Slider(minimum=1, maximum=40, value=32, step=1, label="Top-K")
83
+ top_p_component = gr.Slider(minimum=0, maximum=1, value=1, step=0.01, label="Top-P")
84
+
85
+ user_inputs = [text_prompt_component, chatbot_component]
86
+ bot_inputs = [google_key_component, image_prompt_component, temperature_component, max_output_tokens_component, stop_sequences_component, top_k_component, top_p_component, chatbot_component]
87
 
 
88
  with gr.Blocks() as demo:
89
+ gr.HTML(TITLE)
90
+ gr.HTML(SUBTITLE)
91
+ gr.HTML(DUPLICATE)
92
  with gr.Column():
93
  google_key_component.render()
94
+ with gr.Row():
95
+ image_prompt_component.render()
96
+ chatbot_component.render()
97
+ text_prompt_component.render()
98
  run_button_component.render()
99
+ with gr.Accordion("Parameters", open=False):
100
+ temperature_component.render()
101
+ max_output_tokens_component.render()
102
+ stop_sequences_component.render()
103
+ with gr.Accordion("Advanced", open=False):
104
+ top_k_component.render()
105
+ top_p_component.render()
 
106
 
107
+ run_button_component.click(fn=user, inputs=user_inputs, outputs=[text_prompt_component, chatbot_component], queue=False).then(fn=bot, inputs=bot_inputs, outputs=[chatbot_component])
108
+ text_prompt_component.submit(fn=user, inputs=user_inputs, outputs=[text_prompt_component, chatbot_component], queue=False).then(fn=bot, inputs=bot_inputs, outputs=[chatbot_component])
109
  demo.launch()