#!/usr/bin/env python import datetime import pandas as pd import tqdm.auto from apscheduler.schedulers.background import BackgroundScheduler from huggingface_hub import HfApi import gradio as gr import datasets # Ensure the datasets library is imported from datetime import timezone import atexit # To gracefully shut down the scheduler import logging # For logging purposes # --- Logging Configuration --- logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # --- Data Loading and Processing --- api = HfApi() def get_df() -> pd.DataFrame: """ Loads and merges the papers and stats datasets, preprocesses the data by removing unnecessary columns, and adds a 'paper_page' link for each paper. """ try: # Load datasets logger.info("Loading 'daily-papers' dataset.") df_papers = datasets.load_dataset("hysts-bot-data/daily-papers", split="train").to_pandas() logger.info("Loading 'daily-papers-stats' dataset.") df_stats = datasets.load_dataset("hysts-bot-data/daily-papers-stats", split="train").to_pandas() # Merge datasets on 'arxiv_id' logger.info("Merging datasets on 'arxiv_id'.") df = pd.merge(left=df_papers, right=df_stats, on="arxiv_id", suffixes=('_papers', '_stats')) # Reverse the DataFrame to have the latest papers first df = df[::-1].reset_index(drop=True) # Ensure 'date' is in datetime format and handle missing dates logger.info("Processing 'date' column.") df["date"] = pd.to_datetime(df["date"], errors='coerce') df["date"] = df["date"].dt.strftime("%Y-%m-%d").fillna(datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d")) # Prepare the DataFrame by removing 'abstract' logger.info("Removing 'abstract' column if present.") if 'abstract' in df.columns: df = df.drop(columns=['abstract']) # Add 'paper_page' links logger.info("Adding 'paper_page' links.") df["paper_page"] = df["arxiv_id"].apply(lambda x: f"https://huggingface.co/papers/{x}") # Verify that 'date' column exists if 'date' not in df.columns: logger.error("'date' column is missing from the DataFrame. Filling with current date.") df["date"] = datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d") logger.info("DataFrame preparation complete.") return df except Exception as e: logger.error(f"Error in get_df: {e}") return pd.DataFrame() # Return empty DataFrame on error class Prettifier: """ Converts raw DataFrame rows into a prettified format suitable for display. """ REQUIRED_COLUMNS = ["arxiv_id", "date_display", "date", "paper_page", "title", "github", "👍", "💬"] @staticmethod def get_github_link(link: str) -> str: if not link: return "" return Prettifier.create_link("github", link) @staticmethod def create_link(text: str, url: str) -> str: return f'{text}' def __call__(self, df: pd.DataFrame) -> pd.DataFrame: new_rows = [] for _, row in df.iterrows(): # Handle date_display as a clickable link date_display = Prettifier.create_link(row.get("date", ""), f"https://huggingface.co/papers?date={row.get('date', '')}") new_row = { "arxiv_id": row.get("arxiv_id", ""), # Include arxiv_id "date_display": date_display, # For display "date": row.get("date", datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d")), # For internal calculations "paper_page": Prettifier.create_link(row.get("arxiv_id", ""), row.get("paper_page", "#")), "title": row.get("title", "No title"), "github": Prettifier.get_github_link(row.get("github", "")), "👍": row.get("upvotes", 0), "💬": row.get("num_comments", 0), } new_rows.append(new_row) # If no rows, return empty DataFrame with required columns to prevent KeyError if not new_rows: return pd.DataFrame(columns=self.REQUIRED_COLUMNS) return pd.DataFrame(new_rows) class PaperList: """ Manages the list of papers. """ COLUMN_INFO = [ ["arxiv_id", "str"], # Added arxiv_id ["date_display", "markdown"], # For display ["date", "str"], # For internal use ["paper_page", "markdown"], ["title", "str"], ["github", "markdown"], ["👍", "number"], ["💬", "number"], ] def __init__(self, df: pd.DataFrame): self.df_raw = df self._prettifier = Prettifier() self.df_prettified = self._prettifier(df).loc[:, self.column_names] @property def column_names(self): return [col[0] for col in self.COLUMN_INFO] @property def column_datatype(self): return [col[1] for col in self.COLUMN_INFO] def get_prettified_df(self) -> pd.DataFrame: """ Returns the prettified DataFrame. """ return self.df_prettified # --- Sorting and Pagination Management --- class PaperManager: """ Manages sorting and pagination for the list of papers. """ def __init__(self, paper_list: PaperList, papers_per_page=30): self.paper_list = paper_list self.papers_per_page = papers_per_page self.sort_method = "hot" # Default sort method self.sort_papers() # 'current_page' and 'total_pages' are set in 'sort_papers()' def calculate_score(self, row): """ Calculate the score of a paper based on upvotes and age. This mimics the "hotness" algorithm used by platforms like Hacker News. """ upvotes = row.get('upvotes', 0) # Corrected from '👍' to 'upvotes' date_str = row.get('date', datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d")) try: published_time = datetime.datetime.strptime(date_str, "%Y-%m-%d").replace(tzinfo=timezone.utc) except ValueError: # If parsing fails, use current time to minimize the impact on sorting published_time = datetime.datetime.now(timezone.utc) time_diff = datetime.datetime.now(timezone.utc) - published_time time_diff_hours = time_diff.total_seconds() / 3600 # Convert time difference to hours # Avoid division by zero and apply the hotness formula score = upvotes / ((time_diff_hours + 2) ** 1.5) if (time_diff_hours + 2) > 0 else 0 return score def sort_papers(self): """ Sorts the papers based on the current sort method. """ df = self.paper_list.df_raw.copy() if self.sort_method == "hot": if not df.empty: df = df.drop(columns=['score'], errors='ignore') # Remove existing 'score' column if present df['score'] = df.apply(self.calculate_score, axis=1) df_sorted = df.sort_values(by='score', ascending=False).drop(columns=['score']) else: df_sorted = df elif self.sort_method == "new": df_sorted = df.sort_values(by='date', ascending=False) # Sort by 'date' else: df_sorted = df self.paper_list.df_raw = df_sorted.reset_index(drop=True) self.paper_list.df_prettified = self.paper_list._prettifier(self.paper_list.df_raw).loc[:, self.paper_list.column_names] self.total_pages = max((len(self.paper_list.df_raw) + self.papers_per_page - 1) // self.papers_per_page, 1) self.current_page = 1 logger.info(f"Papers sorted by {self.sort_method}. Total pages: {self.total_pages}") def set_sort_method(self, method, time_frame=None): """ Sets the sort method ('hot', 'new') and re-sorts the papers. """ if method not in ["hot", "new"]: method = "hot" logger.info(f"Setting sort method to: {method}") self.sort_method = method self.sort_papers() return True # Assume success def get_current_page_papers(self) -> str: """ Retrieves the HTML string of the current page's papers. """ start = (self.current_page - 1) * self.papers_per_page end = start + self.papers_per_page current_papers = self.paper_list.df_prettified.iloc[start:end] if current_papers.empty: return "
Daily Papers |