File size: 10,335 Bytes
d88a570
970eef1
 
 
d88a570
 
c750639
 
 
970eef1
 
 
c750639
970eef1
 
c750639
970eef1
 
 
c750639
81e0b0c
 
d88a570
 
 
 
c750639
81e0b0c
 
 
c750639
81e0b0c
 
 
 
 
d88a570
 
 
 
 
 
 
 
c750639
81e0b0c
d88a570
 
 
 
 
 
 
81e0b0c
c750639
81e0b0c
 
 
d88a570
 
 
 
 
 
 
 
 
81e0b0c
d88a570
 
 
ffa4ae8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
970eef1
 
 
 
 
 
 
 
 
 
 
c750639
970eef1
d88a570
970eef1
2a342ed
 
 
970eef1
 
 
 
 
 
 
 
2a342ed
 
 
970eef1
2a342ed
970eef1
c750639
970eef1
 
 
c750639
d88a570
81e0b0c
 
d88a570
81e0b0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d88a570
81e0b0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d88a570
81e0b0c
 
 
 
 
 
 
 
 
 
 
 
 
d88a570
81e0b0c
 
 
 
 
 
 
 
 
 
 
 
 
d88a570
 
c750639
d88a570
81e0b0c
d88a570
970eef1
 
 
c750639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
from fastapi import APIRouter, UploadFile, File, HTTPException
import os
import shutil
import uuid
from bs4 import BeautifulSoup
from PyPDF2 import PdfReader
import requests
from fastapi import Form
from typing import Optional

router = APIRouter(tags=["files"])

# Define file storage by session (imported in main.py)
session_files = {}

# Root folder for uploads
UPLOAD_ROOT = "uploaded_files"
os.makedirs(UPLOAD_ROOT, exist_ok=True)

# Minimum length for any file (in characters)
MIN_FILE_LENGTH = 500

def validate_pdf(file_path: str) -> bool:
    """Validate if file is a valid PDF."""
    try:
        reader = PdfReader(file_path)
        # Check that the PDF has at least one page
        if len(reader.pages) == 0:
            return False
            
        # Extract text to check length
        text = ""
        for page in reader.pages:
            text += page.extract_text()
            
        return len(text) >= MIN_FILE_LENGTH
    except:
        return False

def validate_markdown(file_path: str) -> bool:
    """Validate if file is a valid Markdown file."""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
            # Check minimum length and presence of markdown elements
            return len(content) >= MIN_FILE_LENGTH and any(marker in content for marker in ['#', '-', '*', '`', '[', '>'])
    except:
        return False

def validate_html(file_path: str) -> bool:
    """Validate if file is a valid HTML file."""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
            # Check minimum length and HTML structure
            if len(content) < MIN_FILE_LENGTH:
                return False
            BeautifulSoup(content, 'html.parser')
            return True
    except:
        return False

def validate_txt(file_path: str) -> bool:
    """Validate if file is a valid text file."""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
            return len(content.strip()) >= MIN_FILE_LENGTH
    except:
        return False

# Initialize session files dictionary with pre-calculated documents
precalculated_docs = ["the-bitter-lesson", "hurricane-faq", "pokemon-guide"]

for doc_id in precalculated_docs:
    doc_dir = os.path.join(UPLOAD_ROOT, doc_id)
    if os.path.exists(doc_dir):
        doc_files_dir = os.path.join(doc_dir, "uploaded_files")
        if os.path.exists(doc_files_dir):
            for filename in os.listdir(doc_files_dir):
                if filename.endswith((".pdf", ".txt", ".html", ".md")):
                    file_path = os.path.join(doc_files_dir, filename)
                    session_files[doc_id] = file_path
                    print(f"Added pre-calculated document to session_files: {doc_id} -> {file_path}")
                    break
        else:
            # Search directly in the doc_dir
            for filename in os.listdir(doc_dir):
                if filename.endswith((".pdf", ".txt", ".html", ".md")):
                    file_path = os.path.join(doc_dir, filename)
                    session_files[doc_id] = file_path
                    print(f"Added pre-calculated document to session_files: {doc_id} -> {file_path}")
                    break

@router.post("/upload")
async def upload_file(file: UploadFile = File(...)):
    """
    Upload a file to the server and generate a session ID
    
    Args:
        file: The file to upload
        
    Returns:
        Dictionary with filename, status and session_id
    """
    # Check if the file is a PDF, TXT, HTML or MD
    if not file.filename.endswith(('.pdf', '.txt', '.html', '.md')):
        raise HTTPException(status_code=400, detail="Only PDF, TXT, HTML and MD files are accepted")
    
    # Get the file extension
    file_extension = os.path.splitext(file.filename)[1].lower()
    
    # Generate a session ID for this file
    session_id = str(uuid.uuid4())
    
    # Create the session directory structure
    session_dir = os.path.join(UPLOAD_ROOT, session_id)
    uploaded_files_dir = os.path.join(session_dir, "uploaded_files")
    os.makedirs(uploaded_files_dir, exist_ok=True)
    
    # Create standardized filename
    standardized_filename = f"document{file_extension}"
    
    # Create the full path to save the file
    file_path = os.path.join(uploaded_files_dir, standardized_filename)
    
    # Save the file
    with open(file_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
    
    # Validate the file according to its type
    is_valid = False
    error_detail = ""
    
    if file_extension == '.pdf':
        try:
            reader = PdfReader(file_path)
            if len(reader.pages) == 0:
                error_detail = "PDF must contain at least one page"
                is_valid = False
            else:
                text = ""
                for page in reader.pages:
                    text += page.extract_text()
                
                if len(text) < MIN_FILE_LENGTH:
                    error_detail = f"PDF contains {len(text)} characters but must contain at least {MIN_FILE_LENGTH}"
                    is_valid = False
                else:
                    is_valid = True
        except:
            error_detail = "Invalid PDF format"
            is_valid = False
    elif file_extension == '.md':
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                
                if len(content) < MIN_FILE_LENGTH:
                    error_detail = f"Markdown file contains {len(content)} characters but must contain at least {MIN_FILE_LENGTH}"
                    is_valid = False
                elif not any(marker in content for marker in ['#', '-', '*', '`', '[', '>']):
                    error_detail = "Markdown file does not contain any valid Markdown elements"
                    is_valid = False
                else:
                    is_valid = True
        except:
            error_detail = "Invalid Markdown format"
            is_valid = False
    elif file_extension == '.html':
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                
                if len(content) < MIN_FILE_LENGTH:
                    error_detail = f"HTML file contains {len(content)} characters but must contain at least {MIN_FILE_LENGTH}"
                    is_valid = False
                else:
                    BeautifulSoup(content, 'html.parser')
                    is_valid = True
        except:
            error_detail = "Invalid HTML format"
            is_valid = False
    elif file_extension == '.txt':
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                content_length = len(content.strip())
                
                if content_length < MIN_FILE_LENGTH:
                    error_detail = f"Text file contains {content_length} characters but must contain at least {MIN_FILE_LENGTH}"
                    is_valid = False
                else:
                    is_valid = True
        except:
            error_detail = "Invalid text format"
            is_valid = False
    
    if not is_valid:
        # Delete the invalid file
        os.remove(file_path)
        raise HTTPException(status_code=400, detail=error_detail or f"Invalid {file_extension[1:].upper()} file")
    
    # Store file path for later use
    session_files[session_id] = file_path
    
    return {"filename": standardized_filename, "status": "uploaded", "session_id": session_id}

@router.post("/upload-url")
async def upload_url(url: str = Form(...)):
    """
    Upload content from a URL, extract text and store it as a document
    
    Args:
        url: The URL to download content from
        
    Returns:
        Dictionary with status and session_id
    """
    try:
        # Retrieve the content from the URL
        response = requests.get(url, timeout=10)
        response.raise_for_status()  # Raise an exception if the HTTP status is not 200
        
        # Extract text from HTML with BeautifulSoup
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # Remove script and style tags
        for script in soup(["script", "style"]):
            script.extract()
            
        # Extract the text
        text = soup.get_text()
        
        # Clean the text (remove multiple spaces and empty lines)
        lines = (line.strip() for line in text.splitlines())
        chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
        text = '\n'.join(chunk for chunk in chunks if chunk)
        
        # Limit to 1000 characters if necessary
        if len(text) > 25000:
            text = text[:25000]
            
        # Check if the text is long enough
        if len(text.strip()) < MIN_FILE_LENGTH:
            raise HTTPException(
                status_code=400, 
                detail=f"The content is too short ({len(text.strip())} characters). Minimum required: {MIN_FILE_LENGTH} characters."
            )
        
        # Generate a session ID
        session_id = str(uuid.uuid4())
        # Create the directory structure for the session
        session_dir = os.path.join(UPLOAD_ROOT, session_id)
        uploaded_files_dir = os.path.join(session_dir, "uploaded_files")
        os.makedirs(uploaded_files_dir, exist_ok=True)
        
        # Path of the file to save
        file_path = os.path.join(uploaded_files_dir, "document.txt")
        
        # Save the text
        with open(file_path, "w", encoding="utf-8") as f:
            f.write(text)
            
        # Store the file path for later use
        session_files[session_id] = file_path
        
        return {
            "status": "uploaded", 
            "session_id": session_id,
            "filename": "document.txt",
            "text_length": len(text),
            "source_url": url
        }
    
    except requests.exceptions.RequestException as e:
        raise HTTPException(status_code=400, detail=f"Error retrieving the URL: {str(e)}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Error processing the URL: {str(e)}")