Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
from datetime import datetime, timezone
|
5 |
+
|
6 |
+
# Fetch data from the API
|
7 |
+
API_URL = "https://huggingface.co/api/daily_papers"
|
8 |
+
|
9 |
+
# Global variables for pagination
|
10 |
+
current_page = 1
|
11 |
+
papers_per_page = 10
|
12 |
+
|
13 |
+
def fetch_papers(page=1):
|
14 |
+
all_papers = []
|
15 |
+
while True:
|
16 |
+
response = requests.get(f"{API_URL}?page={page}")
|
17 |
+
if response.status_code == 200:
|
18 |
+
data = response.json()
|
19 |
+
if not data:
|
20 |
+
break
|
21 |
+
all_papers.extend(data)
|
22 |
+
page += 1
|
23 |
+
else:
|
24 |
+
print(f"Failed to fetch data: {response.status_code}")
|
25 |
+
break
|
26 |
+
return all_papers
|
27 |
+
|
28 |
+
papers = fetch_papers()
|
29 |
+
total_pages = (len(papers) + papers_per_page - 1) // papers_per_page
|
30 |
+
|
31 |
+
print("API Response structure:", json.dumps(papers[0] if papers else {}, indent=2))
|
32 |
+
|
33 |
+
def format_paper(paper):
|
34 |
+
title = paper.get('title', 'No title')
|
35 |
+
url = f"https://huggingface.co/papers/{paper['paper'].get('id', '')}"
|
36 |
+
authors = ', '.join([author.get('name', '') for author in paper['paper'].get('authors', [])])
|
37 |
+
upvotes = paper.get('paper', {}).get('upvotes', 0)
|
38 |
+
comments = paper.get('numComments', 0)
|
39 |
+
published_time = datetime.fromisoformat(paper.get('publishedAt', datetime.now(timezone.utc).isoformat()).replace('Z', '+00:00'))
|
40 |
+
time_ago = (datetime.now(timezone.utc) - published_time).days
|
41 |
+
|
42 |
+
return f"""<div style='border-bottom: 1px solid #eee; padding: 10px 0;'>
|
43 |
+
<a href='{url}' target='_blank' style='color: #000; text-decoration: none; font-weight: bold;'>{title}</a>
|
44 |
+
<div style='font-size: 0.8em; color: #666; margin-top: 5px;'>
|
45 |
+
{upvotes} upvotes | by {authors} | {time_ago} days ago | {comments} comments
|
46 |
+
</div>
|
47 |
+
</div>"""
|
48 |
+
|
49 |
+
def sort_papers_by_upvotes(papers):
|
50 |
+
return sorted(papers, key=lambda x: x.get('paper', {}).get('upvotes', 0), reverse=True)
|
51 |
+
|
52 |
+
def render_papers():
|
53 |
+
start_index = (current_page - 1) * papers_per_page
|
54 |
+
end_index = start_index + papers_per_page
|
55 |
+
sorted_papers = sort_papers_by_upvotes(papers)
|
56 |
+
current_papers = sorted_papers[start_index:end_index]
|
57 |
+
if not current_papers:
|
58 |
+
return "<div>No more papers available.</div>"
|
59 |
+
return "".join([format_paper(paper) for paper in current_papers])
|
60 |
+
|
61 |
+
def search_papers(query):
|
62 |
+
global papers
|
63 |
+
if not query:
|
64 |
+
papers = fetch_papers()
|
65 |
+
return render_papers()
|
66 |
+
|
67 |
+
filtered_papers = [paper for paper in papers if query.lower() in paper.get('title', '').lower()]
|
68 |
+
sorted_filtered_papers = sort_papers_by_upvotes(filtered_papers)
|
69 |
+
return "".join([format_paper(paper) for paper in sorted_filtered_papers[:papers_per_page]])
|
70 |
+
|
71 |
+
def refresh_papers():
|
72 |
+
global papers, total_pages
|
73 |
+
papers = fetch_papers()
|
74 |
+
total_pages = (len(papers) + papers_per_page - 1) // papers_per_page
|
75 |
+
return render_papers()
|
76 |
+
|
77 |
+
def next_page():
|
78 |
+
global current_page
|
79 |
+
if current_page < total_pages:
|
80 |
+
current_page += 1
|
81 |
+
return render_papers(), f"Page {current_page} of {total_pages}"
|
82 |
+
|
83 |
+
def prev_page():
|
84 |
+
global current_page
|
85 |
+
if current_page > 1:
|
86 |
+
current_page -= 1
|
87 |
+
return render_papers(), f"Page {current_page} of {total_pages}"
|
88 |
+
|
89 |
+
css = """
|
90 |
+
body {
|
91 |
+
font-family: Arial, sans-serif;
|
92 |
+
max-width: 800px;
|
93 |
+
margin: 0 auto;
|
94 |
+
padding: 20px;
|
95 |
+
}
|
96 |
+
.paper-list {
|
97 |
+
max-height: 600px;
|
98 |
+
overflow-y: auto;
|
99 |
+
border: 1px solid #eee;
|
100 |
+
border-radius: 5px;
|
101 |
+
padding: 10px;
|
102 |
+
}
|
103 |
+
.search-row {
|
104 |
+
display: flex;
|
105 |
+
gap: 10px;
|
106 |
+
margin-bottom: 20px;
|
107 |
+
}
|
108 |
+
"""
|
109 |
+
|
110 |
+
demo = gr.Blocks(css=css)
|
111 |
+
|
112 |
+
with demo:
|
113 |
+
gr.Markdown("# Daily Papers - HackerNews Style")
|
114 |
+
with gr.Row(elem_classes=["search-row"]):
|
115 |
+
search_input = gr.Textbox(label="Search papers", placeholder="Enter search term...")
|
116 |
+
refresh_button = gr.Button("Refresh")
|
117 |
+
paper_list = gr.HTML(render_papers(), elem_classes=["paper-list"])
|
118 |
+
|
119 |
+
with gr.Row():
|
120 |
+
prev_button = gr.Button("Previous Page")
|
121 |
+
next_button = gr.Button("Next Page")
|
122 |
+
page_info = gr.Markdown(f"Page {current_page} of {total_pages}")
|
123 |
+
|
124 |
+
search_input.change(search_papers, inputs=[search_input], outputs=[paper_list])
|
125 |
+
refresh_button.click(refresh_papers, outputs=[paper_list])
|
126 |
+
prev_button.click(prev_page, outputs=[paper_list, page_info])
|
127 |
+
next_button.click(next_page, outputs=[paper_list, page_info])
|
128 |
+
|
129 |
+
if __name__ == "__main__":
|
130 |
+
demo.launch(server_name="0.0.0.0", server_port=5000)
|