#!/usr/bin/env python import datetime import operator import pandas as pd import tqdm.auto from apscheduler.schedulers.background import BackgroundScheduler from huggingface_hub import HfApi import gradio as gr from gradio_calendar import Calendar import datasets import requests from datetime import timezone # Ensure timezone is imported # --- Data Loading and Processing --- api = HfApi() def get_df() -> pd.DataFrame: """ Loads and merges the papers and stats datasets, preprocesses the data by removing unnecessary columns, and adds a 'paper_page' link for each paper. """ # Load datasets df_papers = datasets.load_dataset("hysts-bot-data/daily-papers", split="train").to_pandas() df_stats = datasets.load_dataset("hysts-bot-data/daily-papers-stats", split="train").to_pandas() # Merge datasets on 'arxiv_id' df = pd.merge(left=df_papers, right=df_stats, on="arxiv_id") # Reverse the DataFrame to have the latest papers first df = df[::-1].reset_index(drop=True) # Ensure 'date' is in datetime format and handle missing dates df["date"] = pd.to_datetime(df["date"], errors='coerce') df["date"] = df["date"].dt.strftime("%Y-%m-%d").fillna(datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d")) # Prepare the DataFrame by removing 'abstract' paper_info = [] for _, row in tqdm.auto.tqdm(df.iterrows(), total=len(df)): info = row.copy() if "abstract" in info: del info["abstract"] paper_info.append(info) df_prepared = pd.DataFrame(paper_info) # Add 'paper_page' links df_prepared["paper_page"] = df_prepared["arxiv_id"].apply(lambda x: f"https://huggingface.co/papers/{x}") return df_prepared class Prettifier: """ Converts raw DataFrame rows into a prettified format suitable for display. """ @staticmethod def get_github_link(link: str) -> str: if not link: return "" return Prettifier.create_link("github", link) @staticmethod def create_link(text: str, url: str) -> str: return f'{text}' def __call__(self, df: pd.DataFrame) -> pd.DataFrame: new_rows = [] for _, row in df.iterrows(): # Handle date_display as a clickable link date_display = Prettifier.create_link(row.date, f"https://huggingface.co/papers?date={row.date}") new_row = { "arxiv_id": row.get("arxiv_id", ""), # Include arxiv_id "date_display": date_display, # For display "date": row.get("date", datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d")), # For internal calculations "paper_page": Prettifier.create_link(row.get("arxiv_id", ""), row.get("paper_page", "#")), "title": row.get("title", "No title"), "github": Prettifier.get_github_link(row.get("github", "")), "👍": row.get("upvotes", 0), "💬": row.get("num_comments", 0), } new_rows.append(new_row) return pd.DataFrame(new_rows) class PaperList: """ Manages the list of papers, including search functionality. """ COLUMN_INFO = [ ["arxiv_id", "str"], # Added arxiv_id ["date_display", "markdown"], # For display ["date", "str"], # For internal use ["paper_page", "markdown"], ["title", "str"], ["github", "markdown"], ["👍", "number"], ["💬", "number"], ] def __init__(self, df: pd.DataFrame): self.df_raw = df self._prettifier = Prettifier() self.df_prettified = self._prettifier(df).loc[:, self.column_names] @property def column_names(self): return list(map(operator.itemgetter(0), self.COLUMN_INFO)) @property def column_datatype(self): return list(map(operator.itemgetter(1), self.COLUMN_INFO)) def search( self, title_search_query: str, max_num_to_retrieve: int = 1000, # Set a high default to include all if not specified ) -> pd.DataFrame: """ Filters the DataFrame based on the title search query and limits the number of results. """ df = self.df_raw.copy() # Filter by title if search query is provided if title_search_query: df = df[df["title"].str.contains(title_search_query, case=False, na=False)] # Limit the number of papers to retrieve if max_num_to_retrieve is set if max_num_to_retrieve: df = df.head(max_num_to_retrieve) # Prettify the DataFrame df_prettified = self._prettifier(df).loc[:, self.column_names] return df_prettified # --- Sorting and Pagination Management --- class PaperManager: """ Manages sorting, pagination, and search queries for the list of papers. """ def __init__(self, paper_list: PaperList, papers_per_page=30): self.paper_list = paper_list self.papers_per_page = papers_per_page self.sort_method = "hot" # Default sort method self.current_search_query = "" # Initialize with no search query self.top_time_frame = "all time" # Default time frame for "Top" sorting self.sort_papers() # 'current_page' and 'total_pages' are set in 'sort_papers()' def calculate_score(self, row): """ Calculate the score of a paper based on upvotes and age. This mimics the "hotness" algorithm used by platforms like Hacker News. """ upvotes = row.get('👍', 0) date_str = row.get('date', datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d")) try: published_time = datetime.datetime.strptime(date_str, "%Y-%m-%d").replace(tzinfo=timezone.utc) except ValueError: # If parsing fails, use current time to minimize the impact on sorting published_time = datetime.datetime.now(timezone.utc) time_diff = datetime.datetime.now(timezone.utc) - published_time time_diff_hours = time_diff.total_seconds() / 3600 # Convert time difference to hours # Avoid division by zero and apply the hotness formula score = upvotes / ((time_diff_hours + 2) ** 1.5) if (time_diff_hours + 2) > 0 else 0 return score def sort_papers(self): """ Sorts the papers based on the current sort method and search query. """ df = self.paper_list.df_raw.copy() # Apply search filter if a search query exists if self.current_search_query: df = df[df["title"].str.contains(self.current_search_query, case=False, na=False)] if self.sort_method == "hot": df['score'] = df.apply(self.calculate_score, axis=1) df_sorted = df.sort_values(by='score', ascending=False).drop(columns=['score']) elif self.sort_method == "new": df_sorted = df.sort_values(by='date', ascending=False) # Sort by 'date' elif self.sort_method == "top": # Filter based on the selected time frame now = datetime.datetime.now(timezone.utc) if self.top_time_frame == "day": time_threshold = now - datetime.timedelta(days=1) elif self.top_time_frame == "week": time_threshold = now - datetime.timedelta(weeks=1) elif self.top_time_frame == "month": time_threshold = now - datetime.timedelta(days=30) elif self.top_time_frame == "year": time_threshold = now - datetime.timedelta(days=365) elif self.top_time_frame == "all time": time_threshold = datetime.datetime.min.replace(tzinfo=timezone.utc) else: time_threshold = datetime.datetime.min.replace(tzinfo=timezone.utc) # Convert 'date' column to datetime df_sorted = df.copy() df_sorted['date_parsed'] = pd.to_datetime(df_sorted['date'], errors='coerce').dt.tz_localize(timezone.utc) df_sorted = df_sorted[df_sorted['date_parsed'] >= time_threshold] df_sorted = df_sorted.sort_values(by='upvotes', ascending=False).drop(columns=['date_parsed']) else: df_sorted = df self.paper_list.df_raw = df_sorted.reset_index(drop=True) self.paper_list.df_prettified = self.paper_list._prettifier(self.paper_list.df_raw).loc[:, self.paper_list.column_names] self.total_pages = max((len(self.paper_list.df_raw) + self.papers_per_page - 1) // self.papers_per_page, 1) self.current_page = 1 def set_sort_method(self, method, time_frame=None): """ Sets the sort method ('hot', 'new', 'top') and re-sorts the papers. If 'top' is selected, also sets the time frame. """ if method not in ["hot", "new", "top"]: method = "hot" print(f"Setting sort method to: {method}") self.sort_method = method if method == "top" and time_frame: self.top_time_frame = time_frame.lower() print(f"Setting top time frame to: {self.top_time_frame}") self.sort_papers() return True # Assume success def set_search_query(self, query: str): """ Sets the current search query and re-sorts the papers. """ print(f"Setting search query to: {query}") self.current_search_query = query self.sort_papers() return True # Assume success def get_current_page_papers(self) -> str: """ Retrieves the HTML string of the current page's papers. """ start = (self.current_page - 1) * self.papers_per_page end = start + self.papers_per_page current_papers = self.paper_list.df_prettified.iloc[start:end] if current_papers.empty: return "
Daily Papers |