Rahatara commited on
Commit
e720417
·
verified ·
1 Parent(s): 6848f64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -1
app.py CHANGED
@@ -1,3 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Custom CSS for branding and animation
2
  css = """
3
  body { font-family: 'Arial', sans-serif; background-color: #E8F0FE; }
@@ -18,6 +65,16 @@ h1 { color: #333; animation: fadeIn 2s; }
18
  @keyframes blink-animation { 50% { opacity: 0; } }
19
  """
20
 
 
 
 
 
 
 
 
 
 
 
21
  # Gradio interface setup
22
  with gr.Blocks(css=css) as app:
23
  gr.Markdown("<div id='logo'>J<span>ustEva</span></div>")
@@ -40,4 +97,4 @@ with gr.Blocks(css=css) as app:
40
  outputs=text_output
41
  )
42
 
43
- app.launch()
 
1
+ import os
2
+ import google.generativeai as genai
3
+ import gradio as gr
4
+ from google.generativeai.types import HarmBlockThreshold, HarmCategory
5
+
6
+ # Configure Google API Key and model
7
+ GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
8
+ genai.configure(api_key=GOOGLE_API_KEY)
9
+ MODEL_ID = "gemini-1.5-pro-002"
10
+ model = genai.GenerativeModel(MODEL_ID)
11
+
12
+ example_model = genai.GenerativeModel(
13
+ MODEL_ID,
14
+ system_instruction=[
15
+ "You are an advocate against gender-based violence.",
16
+ "Analyze the content for signs of gender discrimination and provide actionable advice."
17
+ ],
18
+ )
19
+
20
+ # Set model parameters
21
+ generation_config = genai.GenerationConfig(
22
+ temperature=0.9,
23
+ top_p=1.0,
24
+ top_k=32,
25
+ candidate_count=1,
26
+ max_output_tokens=8192,
27
+ )
28
+
29
+ # Safety and instruction settings
30
+ safety_settings = {
31
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
32
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
33
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
34
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
35
+ }
36
+
37
+ # Function to analyze text with error handling
38
+ def analyze_text(text):
39
+ prompt = f"Analyze this text for any instances of gender-based discrimination and provide tips: {text}"
40
+ contents = [prompt]
41
+ response = example_model.generate_content(
42
+ contents,
43
+ generation_config=generation_config,
44
+ safety_settings=safety_settings,
45
+ )
46
+ return response.text if response else "No response generated."
47
+
48
  # Custom CSS for branding and animation
49
  css = """
50
  body { font-family: 'Arial', sans-serif; background-color: #E8F0FE; }
 
65
  @keyframes blink-animation { 50% { opacity: 0; } }
66
  """
67
 
68
+ # Example scenarios for gender discrimination analysis
69
+ example_scenarios = [
70
+ "During a team meeting, whenever a female colleague tried to express her opinion, she was often interrupted or talked over by male colleagues.",
71
+ "The feedback given to female employees often focuses more on their demeanor and less on their actual accomplishments.",
72
+ "Male employees are more frequently considered for promotions and challenging projects, even when female employees have similar or superior qualifications.",
73
+ "During a hiring panel, female candidates were often asked about their personal life, family plans, and how they would balance home and work.",
74
+ "There are significant wage discrepancies between male and female employees who hold the same position and possess comparable experience.",
75
+ "Some male colleagues often make inappropriate jokes or comments about female employees' appearances and attire."
76
+ ]
77
+
78
  # Gradio interface setup
79
  with gr.Blocks(css=css) as app:
80
  gr.Markdown("<div id='logo'>J<span>ustEva</span></div>")
 
97
  outputs=text_output
98
  )
99
 
100
+ app.launch()