Update app.py
Browse files
app.py
CHANGED
@@ -353,35 +353,33 @@ def scrape_full_content(url, max_chars=3000, timeout=5, use_pydf2=True):
|
|
353 |
|
354 |
def llm_summarize(json_input, model, temperature=0.2):
|
355 |
system_prompt = """You are Sentinel, a world-class Financial analysis AI model who is expert at searching the web and answering user's queries. You are also an expert at summarizing web pages or documents and searching for content in them."""
|
356 |
-
|
357 |
user_prompt = f"""
|
358 |
Please provide a comprehensive summary based on the following JSON input:
|
359 |
{json_input}
|
360 |
-
|
361 |
Instructions:
|
362 |
1. Analyze the query and the provided documents.
|
363 |
-
2. Write a detailed, long, and complete research document that is informative and relevant to the user's query.
|
364 |
-
3. Use an unbiased and
|
365 |
-
4.
|
366 |
-
5.
|
367 |
-
6.
|
368 |
-
7.
|
369 |
-
8.
|
370 |
-
9.
|
371 |
-
10.
|
372 |
-
|
373 |
-
|
374 |
-
|
|
|
375 |
messages = [
|
376 |
{"role": "system", "content": system_prompt},
|
377 |
{"role": "user", "content": user_prompt}
|
378 |
]
|
379 |
-
|
380 |
try:
|
381 |
if model == "groq":
|
382 |
response = groq_client.chat.completions.create(
|
383 |
messages=messages,
|
384 |
-
model="llama-3.1-
|
385 |
max_tokens=5500,
|
386 |
temperature=temperature,
|
387 |
top_p=0.9,
|
|
|
353 |
|
354 |
def llm_summarize(json_input, model, temperature=0.2):
|
355 |
system_prompt = """You are Sentinel, a world-class Financial analysis AI model who is expert at searching the web and answering user's queries. You are also an expert at summarizing web pages or documents and searching for content in them."""
|
|
|
356 |
user_prompt = f"""
|
357 |
Please provide a comprehensive summary based on the following JSON input:
|
358 |
{json_input}
|
|
|
359 |
Instructions:
|
360 |
1. Analyze the query and the provided documents.
|
361 |
+
2. Write a detailed, long, and complete research document that is informative and relevant to the user's query based on provided context (the context consists of search results containing a brief description of the content of that page).
|
362 |
+
3. You must use this context to answer the user's query in the best way possible. Use an unbiased and journalistic tone in your response. Do not repeat the text.
|
363 |
+
4. Use an unbiased and professional tone in your response.
|
364 |
+
5. Do not repeat text verbatim from the input.
|
365 |
+
6. Provide the answer in the response itself.
|
366 |
+
7. You can use markdown to format your response.
|
367 |
+
8. Use bullet points to list information where appropriate.
|
368 |
+
9. Cite the answer using [number] notation along with the appropriate source URL embedded in the notation.
|
369 |
+
10. Place these citations at the end of the relevant sentences.
|
370 |
+
11. You can cite the same sentence multiple times if it's relevant to different parts of your answer.
|
371 |
+
12. Make sure the answer is not short and is informative.
|
372 |
+
13. Your response should be detailed, informative, accurate, and directly relevant to the user's query."""
|
373 |
+
|
374 |
messages = [
|
375 |
{"role": "system", "content": system_prompt},
|
376 |
{"role": "user", "content": user_prompt}
|
377 |
]
|
|
|
378 |
try:
|
379 |
if model == "groq":
|
380 |
response = groq_client.chat.completions.create(
|
381 |
messages=messages,
|
382 |
+
model="llama-3.1-70b-instant",
|
383 |
max_tokens=5500,
|
384 |
temperature=temperature,
|
385 |
top_p=0.9,
|