Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,44 +22,44 @@ questions = [
|
|
22 |
|
23 |
# Function to query the Gemini API
|
24 |
def query_gemini_api(user_answers):
|
25 |
-
|
|
|
|
|
26 |
headers = {'Content-Type': 'application/json'}
|
27 |
|
28 |
-
#
|
29 |
-
input_text = " ".join(user_answers)
|
30 |
|
|
|
31 |
payload = {
|
32 |
-
|
33 |
-
{
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
}
|
38 |
-
]
|
39 |
}
|
40 |
|
41 |
try:
|
42 |
-
# Send the request
|
43 |
response = requests.post(url, headers=headers, json=payload)
|
44 |
|
45 |
-
#
|
46 |
if response.status_code == 200:
|
47 |
result = response.json()
|
48 |
-
|
49 |
-
#
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
return mood, recommendations
|
55 |
else:
|
56 |
-
return None
|
57 |
else:
|
58 |
-
|
59 |
-
return None
|
60 |
except requests.exceptions.RequestException as e:
|
61 |
-
|
62 |
-
return None
|
63 |
|
64 |
# Streamlit app for collecting answers
|
65 |
def main():
|
@@ -78,22 +78,17 @@ def main():
|
|
78 |
if len(responses) == len(questions):
|
79 |
st.write("Processing your answers...")
|
80 |
|
81 |
-
#
|
82 |
-
|
83 |
-
|
84 |
-
if mood and recommendations:
|
85 |
-
# Display the detected mood
|
86 |
-
st.write(f"Detected Mood: {mood}")
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
else:
|
93 |
-
|
94 |
-
pass
|
95 |
else:
|
96 |
-
st.
|
97 |
|
98 |
if __name__ == "__main__":
|
99 |
main()
|
|
|
22 |
|
23 |
# Function to query the Gemini API
|
24 |
def query_gemini_api(user_answers):
|
25 |
+
# Correct Gemini API endpoint (based on Google's Generative AI documentation)
|
26 |
+
url = f"https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key={GEMINI_API_KEY}"
|
27 |
+
|
28 |
headers = {'Content-Type': 'application/json'}
|
29 |
|
30 |
+
# Combine the user answers into a single input text
|
31 |
+
input_text = " ".join(user_answers)
|
32 |
|
33 |
+
# Payload for the API
|
34 |
payload = {
|
35 |
+
"prompt": {
|
36 |
+
"text": f"Analyze the following mood based on these inputs: {input_text}. Provide suggestions to improve the mood."
|
37 |
+
},
|
38 |
+
"temperature": 0.7,
|
39 |
+
"candidateCount": 1
|
|
|
|
|
40 |
}
|
41 |
|
42 |
try:
|
43 |
+
# Send the POST request
|
44 |
response = requests.post(url, headers=headers, json=payload)
|
45 |
|
46 |
+
# Check if the response is successful
|
47 |
if response.status_code == 200:
|
48 |
result = response.json()
|
49 |
+
|
50 |
+
# Extract the generated text from the response
|
51 |
+
candidates = result.get("candidates", [])
|
52 |
+
if candidates:
|
53 |
+
generated_text = candidates[0].get("output", "")
|
54 |
+
return generated_text
|
|
|
55 |
else:
|
56 |
+
return None
|
57 |
else:
|
58 |
+
st.error(f"API Error {response.status_code}: {response.text}")
|
59 |
+
return None
|
60 |
except requests.exceptions.RequestException as e:
|
61 |
+
st.error(f"An error occurred: {e}")
|
62 |
+
return None
|
63 |
|
64 |
# Streamlit app for collecting answers
|
65 |
def main():
|
|
|
78 |
if len(responses) == len(questions):
|
79 |
st.write("Processing your answers...")
|
80 |
|
81 |
+
# Query the Gemini API
|
82 |
+
generated_text = query_gemini_api(responses)
|
|
|
|
|
|
|
|
|
83 |
|
84 |
+
if generated_text:
|
85 |
+
# Display the generated mood analysis and recommendations
|
86 |
+
st.write("### Mood Analysis and Suggestions:")
|
87 |
+
st.write(generated_text)
|
88 |
else:
|
89 |
+
st.warning("Could not generate mood analysis. Please try again later.")
|
|
|
90 |
else:
|
91 |
+
st.info("Please answer all 3 questions to receive suggestions.")
|
92 |
|
93 |
if __name__ == "__main__":
|
94 |
main()
|