Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -267,6 +267,41 @@ def analyze_segment_with_gemini(cluster_text, is_full_text=False):
|
|
| 267 |
}}
|
| 268 |
"""
|
| 269 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
def process_document_with_quiz(text):
|
| 271 |
token_count = len(tokenizer.encode(text))
|
| 272 |
print(f"Text contains {token_count} tokens")
|
|
|
|
| 267 |
}}
|
| 268 |
"""
|
| 269 |
|
| 270 |
+
response = llm.invoke(prompt)
|
| 271 |
+
|
| 272 |
+
response_text = response.content
|
| 273 |
+
|
| 274 |
+
try:
|
| 275 |
+
json_match = re.search(r'\{[\s\S]*\}', response_text)
|
| 276 |
+
if json_match:
|
| 277 |
+
response_json = json.loads(json_match.group(0))
|
| 278 |
+
else:
|
| 279 |
+
response_json = json.loads(response_text)
|
| 280 |
+
|
| 281 |
+
return response_json
|
| 282 |
+
except json.JSONDecodeError as e:
|
| 283 |
+
print(f"Error parsing JSON response: {e}")
|
| 284 |
+
print(f"Raw response: {response_text}")
|
| 285 |
+
|
| 286 |
+
if is_full_text:
|
| 287 |
+
return {
|
| 288 |
+
"segments": [
|
| 289 |
+
{
|
| 290 |
+
"topic_name": "JSON Parsing Error",
|
| 291 |
+
"key_concepts": ["Error in response format"],
|
| 292 |
+
"summary": f"Could not parse the API response. Raw text: {response_text[:200]}...",
|
| 293 |
+
"quiz_questions": []
|
| 294 |
+
}
|
| 295 |
+
]
|
| 296 |
+
}
|
| 297 |
+
else:
|
| 298 |
+
return {
|
| 299 |
+
"topic_name": "JSON Parsing Error",
|
| 300 |
+
"key_concepts": ["Error in response format"],
|
| 301 |
+
"summary": f"Could not parse the API response. Raw text: {response_text[:200]}...",
|
| 302 |
+
"quiz_questions": []
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
def process_document_with_quiz(text):
|
| 306 |
token_count = len(tokenizer.encode(text))
|
| 307 |
print(f"Text contains {token_count} tokens")
|