Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -23,8 +23,13 @@ headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_TOKEN')}"}
|
|
23 |
|
24 |
def query_llama(payload):
|
25 |
"""Send a query to the Llama model via Hugging Face API"""
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_verify=None, days_back=90):
|
30 |
"""Perform a Google search and return results"""
|
@@ -44,59 +49,41 @@ def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_
|
|
44 |
escaped_term = urllib.parse.quote_plus(search_term)
|
45 |
start = 0
|
46 |
all_results = []
|
|
|
47 |
|
48 |
with requests.Session() as session:
|
49 |
-
|
|
|
50 |
try:
|
51 |
-
#
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
"
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
except requests.exceptions.RequestException as e:
|
70 |
print(f"Error fetching search results: {e}")
|
71 |
-
|
72 |
-
|
73 |
-
soup = BeautifulSoup(resp.text, "html.parser")
|
74 |
-
result_block = soup.find_all("div", attrs={"class": "g"})
|
75 |
-
if not result_block:
|
76 |
-
print("No more results found.")
|
77 |
-
break
|
78 |
-
|
79 |
-
for result in result_block:
|
80 |
-
if len(all_results) >= num_results:
|
81 |
-
break
|
82 |
-
link = result.find("a", href=True)
|
83 |
-
if link:
|
84 |
-
link = link["href"]
|
85 |
-
print(f"Found link: {link}")
|
86 |
-
try:
|
87 |
-
webpage = session.get(link, headers=headers, timeout=timeout)
|
88 |
-
webpage.raise_for_status()
|
89 |
-
visible_text = extract_text_from_webpage(webpage.text)
|
90 |
-
|
91 |
-
all_results.append({"link": link, "text": visible_text})
|
92 |
-
except requests.exceptions.RequestException as e:
|
93 |
-
print(f"Error fetching or processing {link}: {e}")
|
94 |
-
all_results.append({"link": link, "text": None})
|
95 |
-
else:
|
96 |
-
print("No link found in result.")
|
97 |
-
all_results.append({"link": None, "text": None})
|
98 |
-
|
99 |
-
start += len(result_block)
|
100 |
|
101 |
print(f"Total results fetched: {len(all_results)}")
|
102 |
return all_results
|
@@ -200,7 +187,8 @@ def summarize_financial_news(query):
|
|
200 |
Provide a detailed, coherent summary focusing on financial implications and analysis."""
|
201 |
|
202 |
summary = query_llama({"inputs": prompt, "parameters": {"max_length": 1000}})
|
203 |
-
|
|
|
204 |
|
205 |
# Combine summaries
|
206 |
combined_summary = "\n\n".join(summaries)
|
@@ -212,7 +200,10 @@ Focus on the most important financial implications and analysis."""
|
|
212 |
|
213 |
final_summary = query_llama({"inputs": final_prompt, "parameters": {"max_length": 2000}})
|
214 |
|
215 |
-
|
|
|
|
|
|
|
216 |
|
217 |
# Gradio Interface
|
218 |
iface = gr.Interface(
|
|
|
23 |
|
24 |
def query_llama(payload):
|
25 |
"""Send a query to the Llama model via Hugging Face API"""
|
26 |
+
try:
|
27 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
28 |
+
response.raise_for_status()
|
29 |
+
return response.json()
|
30 |
+
except requests.exceptions.RequestException as e:
|
31 |
+
print(f"Error querying Llama model: {e}")
|
32 |
+
return None
|
33 |
|
34 |
def google_search(term, num_results=5, lang="en", timeout=5, safe="active", ssl_verify=None, days_back=90):
|
35 |
"""Perform a Google search and return results"""
|
|
|
49 |
escaped_term = urllib.parse.quote_plus(search_term)
|
50 |
start = 0
|
51 |
all_results = []
|
52 |
+
max_attempts = num_results * 2 # Allow for some failed attempts
|
53 |
|
54 |
with requests.Session() as session:
|
55 |
+
attempts = 0
|
56 |
+
while len(all_results) < num_results and attempts < max_attempts:
|
57 |
try:
|
58 |
+
# ... (Google search request remains the same) ...
|
59 |
+
|
60 |
+
for result in result_block:
|
61 |
+
if len(all_results) >= num_results:
|
62 |
+
break
|
63 |
+
link = result.find("a", href=True)
|
64 |
+
if link:
|
65 |
+
link = link["href"]
|
66 |
+
print(f"Found link: {link}")
|
67 |
+
try:
|
68 |
+
webpage = session.get(link, headers=headers, timeout=timeout)
|
69 |
+
webpage.raise_for_status()
|
70 |
+
visible_text = extract_text_from_webpage(webpage.text)
|
71 |
+
all_results.append({"link": link, "text": visible_text})
|
72 |
+
except requests.exceptions.HTTPError as e:
|
73 |
+
if e.response.status_code == 403:
|
74 |
+
print(f"403 Forbidden error for {link}, skipping...")
|
75 |
+
else:
|
76 |
+
print(f"HTTP error {e.response.status_code} for {link}, skipping...")
|
77 |
+
except requests.exceptions.RequestException as e:
|
78 |
+
print(f"Error fetching or processing {link}: {e}")
|
79 |
+
else:
|
80 |
+
print("No link found in result.")
|
81 |
+
|
82 |
+
start += len(result_block)
|
83 |
+
attempts += 1
|
84 |
except requests.exceptions.RequestException as e:
|
85 |
print(f"Error fetching search results: {e}")
|
86 |
+
attempts += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
print(f"Total results fetched: {len(all_results)}")
|
89 |
return all_results
|
|
|
187 |
Provide a detailed, coherent summary focusing on financial implications and analysis."""
|
188 |
|
189 |
summary = query_llama({"inputs": prompt, "parameters": {"max_length": 1000}})
|
190 |
+
if summary and 'generated_text' in summary[0]:
|
191 |
+
summaries.append(summary[0]['generated_text'])
|
192 |
|
193 |
# Combine summaries
|
194 |
combined_summary = "\n\n".join(summaries)
|
|
|
200 |
|
201 |
final_summary = query_llama({"inputs": final_prompt, "parameters": {"max_length": 2000}})
|
202 |
|
203 |
+
if final_summary and 'generated_text' in final_summary[0]:
|
204 |
+
return final_summary[0]['generated_text']
|
205 |
+
else:
|
206 |
+
return "Unable to generate summary due to an error."
|
207 |
|
208 |
# Gradio Interface
|
209 |
iface = gr.Interface(
|