File size: 41,656 Bytes
f1090ff 8703329 f1090ff 4f7293e f1090ff 27c8f12 1eca925 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 73e0489 f1090ff 27c8f12 f1090ff 27c8f12 da3ff3f af8e43a da3ff3f af8e43a da3ff3f af8e43a 8703329 4f7293e af8e43a 8703329 af8e43a 8703329 4f7293e da3ff3f 27c8f12 da3ff3f 27c8f12 da3ff3f 27c8f12 da3ff3f 27c8f12 af8e43a 27c8f12 af8e43a 27c8f12 f1090ff 27c8f12 4f7293e 27c8f12 f1090ff 27c8f12 f1090ff af8e43a 27c8f12 af8e43a 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 4f7293e 2ca7d6c 4f7293e 2ca7d6c 27c8f12 2ca7d6c 27c8f12 2ca7d6c 27c8f12 2ca7d6c 27c8f12 2ca7d6c 27c8f12 4f7293e 2ca7d6c 27c8f12 2ca7d6c 27c8f12 2ca7d6c 1eca925 2ca7d6c 1eca925 2ca7d6c 27c8f12 2ca7d6c 27c8f12 2ca7d6c 4f7293e 2ca7d6c 27c8f12 2ca7d6c 27c8f12 2ca7d6c 1eca925 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 af8e43a 27c8f12 da3ff3f 27c8f12 f1090ff 27c8f12 4f7293e 27c8f12 f1090ff 27c8f12 f1090ff 1d8231b 27c8f12 1d8231b 27c8f12 4f7293e 27c8f12 f1090ff 27c8f12 f1090ff 27c8f12 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 |
import gradio as gr
import pdfplumber, docx, sqlite3, os, random, tempfile, shutil
from datetime import datetime
import pandas as pd
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import numpy as np
from fpdf import FPDF
import logging
import hashlib
from typing import List, Tuple, Optional
import asyncio
import aiohttp
from sklearn.metrics.pairwise import cosine_similarity
import re
import time
# -----------------------------
# ENHANCED CONFIG
# -----------------------------
DB_NAME = "db.sqlite3"
USERNAME = "aixbi"
PASSWORD = "aixbi@123"
MAX_SENTENCES_CHECK = 15 # Increased for better coverage
LOGO_PATH = "aixbi.jpg"
MIN_SENTENCE_LENGTH = 20 # Reduced for better detection
SIMILARITY_THRESHOLD = 0.85 # For semantic similarity
CHUNK_SIZE = 512 # For processing large documents
LOG_FILE = "plagiarism_detector.log"
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(LOG_FILE),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# -----------------------------
# ENHANCED DB INIT
# -----------------------------
def init_db():
"""Enhanced database with additional fields and indexes"""
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
# Main results table with more fields
c.execute("""CREATE TABLE IF NOT EXISTS results (
id INTEGER PRIMARY KEY AUTOINCREMENT,
student_id TEXT NOT NULL,
student_name TEXT NOT NULL,
document_hash TEXT,
ai_score REAL,
plagiarism_score REAL,
word_count INTEGER,
sentence_count INTEGER,
suspicious_sentences_count INTEGER,
processing_time REAL,
file_type TEXT,
timestamp TEXT,
status TEXT DEFAULT 'completed'
)""")
# Suspicious sentences table for detailed tracking
c.execute("""CREATE TABLE IF NOT EXISTS suspicious_sentences (
id INTEGER PRIMARY KEY AUTOINCREMENT,
result_id INTEGER,
sentence TEXT,
similarity_score REAL,
source_found BOOLEAN,
FOREIGN KEY (result_id) REFERENCES results (id)
)""")
# Create indexes for better performance
c.execute("CREATE INDEX IF NOT EXISTS idx_student_id ON results (student_id)")
c.execute("CREATE INDEX IF NOT EXISTS idx_timestamp ON results (timestamp)")
c.execute("CREATE INDEX IF NOT EXISTS idx_document_hash ON results (document_hash)")
conn.commit()
conn.close()
init_db()
# -----------------------------
# ENHANCED MODEL LOADING WITH ERROR HANDLING
# -----------------------------
try:
embedder = SentenceTransformer('all-MiniLM-L6-v2')
tokenizer = AutoTokenizer.from_pretrained("hello-simpleai/chatgpt-detector-roberta")
model = AutoModelForSequenceClassification.from_pretrained("hello-simpleai/chatgpt-detector-roberta")
logger.info("Models loaded successfully")
except Exception as e:
logger.error(f"Error loading models: {e}")
raise
# -----------------------------
# ENHANCED FILE HANDLING
# -----------------------------
def calculate_file_hash(file_path: str) -> str:
"""Calculate SHA-256 hash of file for duplicate detection"""
hash_sha256 = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def extract_text(file_obj):
"""Extracts text safely from PDF/DOCX/TXT - Enhanced version of working code"""
if file_obj is None:
return None
name = file_obj.name
ext = os.path.splitext(name)[1].lower()
# Copy to temp file preserving extension
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
shutil.copy(file_obj.name, tmp.name)
tmp_path = tmp.name
try:
if ext == ".pdf":
with pdfplumber.open(tmp_path) as pdf:
text = " ".join(page.extract_text() or "" for page in pdf.pages)
elif ext == ".docx":
doc = docx.Document(tmp_path)
text = " ".join(p.text for p in doc.paragraphs)
elif ext == ".txt":
with open(tmp_path, "r", encoding="utf-8", errors="ignore") as f:
text = f.read()
else:
return None
except:
return None
finally:
# Clean up temp file
try:
os.unlink(tmp_path)
except:
pass
return text.strip() if text else None
def extract_text_with_metadata(file_obj) -> Optional[Tuple[str, dict]]:
"""Enhanced text extraction with metadata - calls the working extract_text function"""
if file_obj is None:
return None, None
# Use the working extract_text function first
text = extract_text(file_obj)
if text is None:
return None, None
# Now gather metadata safely
name = file_obj.name
ext = os.path.splitext(name)[1].lower()
# Create temporary file again for metadata extraction
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
shutil.copy(file_obj.name, tmp.name)
tmp_path = tmp.name
try:
metadata = {
'file_type': ext,
'file_size': os.path.getsize(tmp_path),
'file_hash': calculate_file_hash(tmp_path),
'word_count': len(text.split()),
'char_count': len(text)
}
# Add specific metadata based on file type
if ext == ".pdf":
try:
with pdfplumber.open(tmp_path) as pdf:
metadata['page_count'] = len(pdf.pages)
except:
metadata['page_count'] = 'Unknown'
elif ext == ".docx":
try:
doc = docx.Document(tmp_path)
metadata['paragraph_count'] = len(doc.paragraphs)
except:
metadata['paragraph_count'] = 'Unknown'
except Exception as e:
logger.error(f"Error gathering metadata from {name}: {e}")
# Return text with minimal metadata if metadata extraction fails
metadata = {
'file_type': ext,
'file_size': 0,
'file_hash': '',
'word_count': len(text.split()),
'char_count': len(text)
}
finally:
try:
os.unlink(tmp_path)
except:
pass
# Final validation
if len(text.strip()) < 50:
logger.warning("Extracted text is too short for meaningful analysis")
return None, None
return text, metadata
# -----------------------------
# ENHANCED AI DETECTION WITH CHUNKING
# -----------------------------
def detect_ai_text(text: str) -> Tuple[float, dict]:
"""Enhanced AI detection with confidence scores and chunking for large texts"""
try:
# Split into chunks for large texts
chunks = [text[i:i+CHUNK_SIZE] for i in range(0, len(text), CHUNK_SIZE)]
scores = []
details = {'chunk_scores': [], 'confidence': 'low'}
for chunk in chunks[:5]: # Limit to first 5 chunks for performance
if len(chunk.strip()) < 20:
continue
inputs = tokenizer(chunk, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs)
probabilities = torch.softmax(outputs.logits, dim=1)
score = probabilities[0][1].item() # AI probability
scores.append(score)
details['chunk_scores'].append(round(score * 100, 2))
if not scores:
return 0.0, details
avg_score = np.mean(scores)
std_score = np.std(scores) if len(scores) > 1 else 0
# Determine confidence based on consistency
if std_score < 0.1:
details['confidence'] = 'high'
elif std_score < 0.2:
details['confidence'] = 'medium'
else:
details['confidence'] = 'low'
details['std_deviation'] = round(std_score, 3)
return avg_score, details
except Exception as e:
logger.error(f"Error in AI detection: {e}")
return 0.0, {'error': str(e)}
# -----------------------------
# ENHANCED PLAGIARISM DETECTION
# -----------------------------
def preprocess_text(text: str) -> List[str]:
"""Extract meaningful sentences with better filtering"""
# Split into sentences using multiple delimiters
sentences = re.split(r'[.!?]+', text)
# Clean and filter sentences
cleaned_sentences = []
for sentence in sentences:
sentence = sentence.strip()
# Filter out short sentences, headers, page numbers, etc.
if (len(sentence) >= MIN_SENTENCE_LENGTH and
not sentence.isdigit() and
len(sentence.split()) >= 5 and
not re.match(r'^(page|chapter|\d+)[\s\d]*$', sentence.lower())):
cleaned_sentences.append(sentence)
return cleaned_sentences
def semantic_similarity_check(sentences: List[str], suspicious_sentences: List[str]) -> List[Tuple[str, float]]:
"""Check for semantic similarity between sentences"""
if not sentences or not suspicious_sentences:
return []
try:
# Encode sentences
sentence_embeddings = embedder.encode(sentences)
suspicious_embeddings = embedder.encode(suspicious_sentences)
# Calculate similarities
similarities = cosine_similarity(sentence_embeddings, suspicious_embeddings)
high_similarity_pairs = []
for i, sentence in enumerate(sentences):
max_similarity = np.max(similarities[i])
if max_similarity > SIMILARITY_THRESHOLD:
high_similarity_pairs.append((sentence, max_similarity))
return high_similarity_pairs
except Exception as e:
logger.error(f"Error in semantic similarity check: {e}")
return []
async def async_web_search(sentence: str, session: aiohttp.ClientSession) -> bool:
"""Async web search for better performance"""
try:
# Simple search simulation - replace with actual search API
# This is a placeholder for actual web search implementation
await asyncio.sleep(0.1) # Simulate network delay
return random.choice([True, False]) # Placeholder result
except Exception as e:
logger.error(f"Error in web search: {e}")
return False
def enhanced_plagiarism_check(sentences: List[str]) -> Tuple[float, List[dict]]:
"""Enhanced plagiarism detection with multiple methods"""
if not sentences:
return 0.0, []
# Sample sentences strategically (beginning, middle, end)
total_sentences = len(sentences)
if total_sentences <= MAX_SENTENCES_CHECK:
samples = sentences
else:
# Take samples from different parts of the document
begin_samples = sentences[:MAX_SENTENCES_CHECK//3]
middle_start = total_sentences // 2 - MAX_SENTENCES_CHECK//6
middle_samples = sentences[middle_start:middle_start + MAX_SENTENCES_CHECK//3]
end_samples = sentences[-(MAX_SENTENCES_CHECK//3):]
samples = begin_samples + middle_samples + end_samples
suspicious_results = []
# Simulate plagiarism detection (replace with actual implementation)
for sentence in samples:
# Placeholder for actual plagiarism detection logic
is_suspicious = len(sentence) > 100 and random.random() > 0.7
confidence = random.uniform(0.5, 1.0) if is_suspicious else random.uniform(0.0, 0.4)
suspicious_results.append({
'sentence': sentence,
'is_suspicious': is_suspicious,
'confidence': confidence,
'source_found': is_suspicious,
'similarity_score': confidence if is_suspicious else 0.0
})
# Calculate overall plagiarism score
suspicious_count = sum(1 for r in suspicious_results if r['is_suspicious'])
plagiarism_score = (suspicious_count / len(samples)) * 100 if samples else 0
return plagiarism_score, suspicious_results
# -----------------------------
# ENHANCED DB OPERATIONS
# -----------------------------
def save_result(student_id: str, student_name: str, ai_score: float, plagiarism_score: float,
metadata: dict, suspicious_results: List[dict], processing_time: float) -> int:
"""Enhanced result saving with detailed information"""
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
# Insert main result
c.execute("""INSERT INTO results
(student_id, student_name, document_hash, ai_score, plagiarism_score,
word_count, sentence_count, suspicious_sentences_count, processing_time,
file_type, timestamp, status)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?)""",
(student_id, student_name, metadata.get('file_hash', ''),
ai_score, plagiarism_score, metadata.get('word_count', 0),
len(suspicious_results), sum(1 for r in suspicious_results if r['is_suspicious']),
processing_time, metadata.get('file_type', ''),
datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'completed'))
result_id = c.lastrowid
# Insert suspicious sentences
for result in suspicious_results:
if result['is_suspicious']:
c.execute("""INSERT INTO suspicious_sentences
(result_id, sentence, similarity_score, source_found)
VALUES (?,?,?,?)""",
(result_id, result['sentence'], result['similarity_score'],
result['source_found']))
conn.commit()
conn.close()
logger.info(f"Saved result for {student_name} ({student_id}) - ID: {result_id}")
return result_id
def load_results() -> pd.DataFrame:
"""Enhanced results loading with better formatting"""
conn = sqlite3.connect(DB_NAME)
query = """SELECT id, student_id, student_name,
ROUND(ai_score, 2) as ai_score,
ROUND(plagiarism_score, 2) as plagiarism_score,
word_count, suspicious_sentences_count,
ROUND(processing_time, 2) as processing_time,
file_type, timestamp, status
FROM results
ORDER BY timestamp DESC"""
df = pd.read_sql_query(query, conn)
conn.close()
return df
def check_duplicate_submission(document_hash: str) -> Optional[dict]:
"""Check if document was already analyzed"""
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
c.execute("SELECT student_name, timestamp FROM results WHERE document_hash = ? ORDER BY timestamp DESC LIMIT 1",
(document_hash,))
result = c.fetchone()
conn.close()
if result:
return {'student_name': result[0], 'timestamp': result[1]}
return None
# -----------------------------
# ENHANCED PDF REPORT WITH UNICODE SUPPORT
# -----------------------------
def clean_text_for_pdf(text: str) -> str:
"""Clean text to be PDF-safe by removing/replacing problematic Unicode characters"""
# Replace common Unicode characters with ASCII equivalents
replacements = {
'•': '-', # bullet point
'–': '-', # en dash
'—': '-', # em dash
'"': '"', # left double quote
'"': '"', # right double quote
''': "'", # left single quote
''': "'", # right single quote
'…': '...', # ellipsis
'®': '(R)', # registered trademark
'©': '(C)', # copyright
'™': '(TM)', # trademark
'€': 'EUR', # euro sign
'£': 'GBP', # pound sign
'¥': 'JPY', # yen sign
'§': 'Section', # section sign
'¶': 'Para', # paragraph sign
'†': '+', # dagger
'‡': '++', # double dagger
'°': ' degrees', # degree sign
'±': '+/-', # plus-minus
'÷': '/', # division sign
'×': 'x', # multiplication sign
'≤': '<=', # less than or equal
'≥': '>=', # greater than or equal
'≠': '!=', # not equal
'∞': 'infinity', # infinity
'α': 'alpha', 'β': 'beta', 'γ': 'gamma', 'δ': 'delta', # Greek letters
'λ': 'lambda', 'μ': 'mu', 'π': 'pi', 'σ': 'sigma', 'Ω': 'Omega'
}
# Apply replacements
for unicode_char, replacement in replacements.items():
text = text.replace(unicode_char, replacement)
# Remove any remaining non-ASCII characters by encoding/decoding
try:
# Try to encode as latin-1 (which FPDF supports)
text.encode('latin-1')
return text
except UnicodeEncodeError:
# If that fails, remove non-ASCII characters
text = text.encode('ascii', 'ignore').decode('ascii')
return text
class EnhancedPDF(FPDF):
def header(self):
if os.path.exists(LOGO_PATH):
try:
self.image(LOGO_PATH, 10, 8, 20)
except:
pass # Skip logo if there's an issue
self.set_font('Arial', 'B', 15)
title = clean_text_for_pdf('AIxBI - Professional Plagiarism Analysis Report')
self.cell(0, 10, title, 0, 1, 'C')
self.ln(10)
def footer(self):
self.set_y(-15)
self.set_font('Arial', 'I', 8)
footer_text = clean_text_for_pdf(f'Page {self.page_no()} | Generated on {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}')
self.cell(0, 10, footer_text, 0, 0, 'C')
def add_section_header(self, title: str):
self.set_font('Arial', 'B', 12)
self.set_fill_color(200, 220, 255)
clean_title = clean_text_for_pdf(title)
self.cell(0, 10, clean_title, 0, 1, 'L', 1)
self.ln(2)
def add_highlighted_text(self, text: str, color: tuple, max_length: int = 100):
self.set_fill_color(*color)
# Clean and truncate text
clean_text = clean_text_for_pdf(text)
display_text = clean_text[:max_length] + "..." if len(clean_text) > max_length else clean_text
try:
self.multi_cell(0, 8, display_text, 1, 'L', 1)
except Exception as e:
# Fallback: create a safe version
safe_text = "Text contains unsupported characters - please check original document"
self.multi_cell(0, 8, safe_text, 1, 'L', 1)
self.ln(2)
def safe_cell(self, w, h, txt, border=0, ln=0, align='L', fill=False):
"""Safe cell method that handles Unicode issues"""
try:
clean_txt = clean_text_for_pdf(str(txt))
self.cell(w, h, clean_txt, border, ln, align, fill)
except Exception as e:
# Fallback to a safe message
self.cell(w, h, "[Content contains unsupported characters]", border, ln, align, fill)
def safe_multi_cell(self, w, h, txt, border=0, align='L', fill=False):
"""Safe multi_cell method that handles Unicode issues"""
try:
clean_txt = clean_text_for_pdf(str(txt))
self.multi_cell(w, h, clean_txt, border, align, fill)
except Exception as e:
# Fallback to a safe message
self.multi_cell(w, h, "[Content contains unsupported characters - please check source document]", border, align, fill)
def generate_enhanced_pdf_report(student_name: str, student_id: str, ai_score: float,
plagiarism_score: float, suspicious_results: List[dict],
metadata: dict, ai_details: dict, output_path: str):
"""Generate comprehensive PDF report with Unicode safety"""
try:
pdf = EnhancedPDF()
pdf.add_page()
# Executive Summary
pdf.add_section_header("EXECUTIVE SUMMARY")
pdf.set_font('Arial', '', 10)
summary_data = [
f"Student: {student_name} ({student_id})",
f"Document Type: {metadata.get('file_type', 'Unknown').upper()}",
f"Word Count: {metadata.get('word_count', 0):,}",
f"AI Detection Score: {ai_score:.1f}% (Confidence: {ai_details.get('confidence', 'N/A')})",
f"Plagiarism Score: {plagiarism_score:.1f}%",
f"Suspicious Sentences: {sum(1 for r in suspicious_results if r['is_suspicious'])}",
f"Analysis Date: {datetime.now().strftime('%B %d, %Y at %H:%M:%S')}"
]
for item in summary_data:
pdf.safe_cell(0, 6, item, 0, 1)
pdf.ln(5)
# Risk Assessment
pdf.add_section_header("RISK ASSESSMENT")
pdf.set_font('Arial', '', 10)
risk_level = "HIGH" if (ai_score > 70 or plagiarism_score > 30) else "MEDIUM" if (ai_score > 40 or plagiarism_score > 15) else "LOW"
risk_color = (255, 200, 200) if risk_level == "HIGH" else (255, 255, 200) if risk_level == "MEDIUM" else (200, 255, 200)
pdf.set_fill_color(*risk_color)
pdf.safe_cell(0, 10, f"Overall Risk Level: {risk_level}", 1, 1, 'C', 1)
pdf.ln(5)
# AI Detection Details
if ai_details.get('chunk_scores'):
pdf.add_section_header("AI DETECTION ANALYSIS")
pdf.set_font('Arial', '', 9)
pdf.safe_cell(0, 6, f"Chunks Analyzed: {len(ai_details['chunk_scores'])}", 0, 1)
pdf.safe_cell(0, 6, f"Score Consistency (Std Dev): {ai_details.get('std_deviation', 'N/A')}", 0, 1)
pdf.ln(3)
# Suspicious Content
suspicious_sentences = [r for r in suspicious_results if r['is_suspicious']]
if suspicious_sentences:
pdf.add_section_header("FLAGGED CONTENT")
pdf.set_font('Arial', '', 9)
for i, result in enumerate(suspicious_sentences[:10], 1): # Limit to 10
pdf.safe_cell(0, 6, f"Issue #{i} (Confidence: {result['confidence']:.1f})", 0, 1)
pdf.add_highlighted_text(result['sentence'], (255, 230, 230), 150)
# Recommendations
pdf.add_section_header("RECOMMENDATIONS")
pdf.set_font('Arial', '', 10)
recommendations = []
if ai_score > 50:
recommendations.append("- Review content for AI-generated sections and rewrite in original voice")
if plagiarism_score > 20:
recommendations.append("- Add proper citations for referenced material")
recommendations.append("- Paraphrase flagged sentences to ensure originality")
if len(suspicious_sentences) > 5:
recommendations.append("- Conduct thorough revision focusing on highlighted sections")
recommendations.extend([
"- Use plagiarism detection tools during writing process",
"- Ensure all sources are properly attributed",
"- Maintain academic integrity standards"
])
for rec in recommendations:
pdf.safe_multi_cell(0, 6, rec)
pdf.ln(1)
# Generate PDF with error handling
pdf.output(output_path)
logger.info(f"PDF report generated successfully: {output_path}")
except Exception as e:
logger.error(f"Error generating PDF report: {e}")
# Create a simple fallback PDF
try:
simple_pdf = FPDF()
simple_pdf.add_page()
simple_pdf.set_font('Arial', 'B', 16)
simple_pdf.cell(0, 10, 'AIxBI Analysis Report', 0, 1, 'C')
simple_pdf.ln(10)
simple_pdf.set_font('Arial', '', 12)
simple_pdf.cell(0, 10, f'Student: {clean_text_for_pdf(student_name)}', 0, 1)
simple_pdf.cell(0, 10, f'Student ID: {clean_text_for_pdf(student_id)}', 0, 1)
simple_pdf.cell(0, 10, f'AI Score: {ai_score:.1f}%', 0, 1)
simple_pdf.cell(0, 10, f'Plagiarism Score: {plagiarism_score:.1f}%', 0, 1)
simple_pdf.cell(0, 10, f'Date: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}', 0, 1)
simple_pdf.ln(10)
simple_pdf.multi_cell(0, 10, 'Note: Full report could not be generated due to character encoding issues. Please contact administrator if this persists.')
simple_pdf.output(output_path)
logger.info(f"Fallback PDF report generated: {output_path}")
except Exception as fallback_error:
logger.error(f"Even fallback PDF generation failed: {fallback_error}")
raise Exception(f"PDF generation failed: {e}")
# -----------------------------
# ENHANCED APP LOGIC
# -----------------------------
def login(user: str, pwd: str):
"""Enhanced login with logging"""
if user == USERNAME and pwd == PASSWORD:
logger.info(f"Successful login for user: {user}")
return gr.update(visible=False), gr.update(visible=True), ""
else:
logger.warning(f"Failed login attempt for user: {user}")
return gr.update(), gr.update(), "❌ Invalid username or password!"
def analyze_document(student_name: str, student_id: str, file_obj) -> Tuple:
"""Enhanced document analysis with comprehensive error handling"""
start_time = time.time()
# Input validation
if not all([student_name.strip(), student_id.strip(), file_obj]):
return "❌ Please fill all fields and upload a document.", None, None, None, None, None
logger.info(f"Starting analysis for {student_name} ({student_id})")
try:
# Extract text and metadata using the working function
result = extract_text_with_metadata(file_obj)
if result is None or result[0] is None:
return "❌ Error: Could not read the file. Please upload a valid PDF, DOCX, or TXT.", None, None, None, None, None
text, metadata = result
# Check for duplicate submission
duplicate = check_duplicate_submission(metadata['file_hash'])
if duplicate:
logger.warning(f"Duplicate submission detected for {student_name}")
return f"⚠️ Warning: This document was previously analyzed by {duplicate['student_name']} on {duplicate['timestamp']}", None, None, None, None, None
# Preprocess text
sentences = preprocess_text(text)
if len(sentences) < 3:
return "❌ Error: Document too short for meaningful analysis (minimum 3 sentences required).", None, None, None, None, None
# AI Detection
ai_score, ai_details = detect_ai_text(text)
ai_percentage = ai_score * 100
# Plagiarism Detection
plagiarism_score, suspicious_results = enhanced_plagiarism_check(sentences)
# Calculate processing time
processing_time = time.time() - start_time
# Save results
result_id = save_result(student_id, student_name, ai_percentage, plagiarism_score,
metadata, suspicious_results, processing_time)
# Generate PDF report
output_pdf = f"reports/{student_id}_{result_id}_report.pdf"
os.makedirs("reports", exist_ok=True)
generate_enhanced_pdf_report(student_name, student_id, ai_percentage, plagiarism_score,
suspicious_results, metadata, ai_details, output_pdf)
# Prepare highlighted text
suspicious_sentences = [r['sentence'] for r in suspicious_results if r['is_suspicious']]
if suspicious_sentences:
highlighted_text = "\n\n".join([f"🚨 FLAGGED: {s[:200]}..." if len(s) > 200 else f"🚨 FLAGGED: {s}"
for s in suspicious_sentences[:5]])
else:
highlighted_text = "✅ No suspicious sentences detected."
# Status message with detailed breakdown
status_msg = f"""✅ Analysis completed for {student_name} ({student_id})
📊 Processed {metadata['word_count']:,} words in {processing_time:.1f} seconds
🤖 AI Detection: {ai_percentage:.1f}% (Confidence: {ai_details.get('confidence', 'N/A')})
📋 Plagiarism: {plagiarism_score:.1f}% ({len(suspicious_sentences)} flagged sentences)
📄 Report ID: {result_id}"""
logger.info(f"Analysis completed for {student_name} - AI: {ai_percentage:.1f}%, Plagiarism: {plagiarism_score:.1f}%")
return (status_msg, round(ai_percentage, 2), round(plagiarism_score, 2),
output_pdf, highlighted_text, f"📈 Total sentences analyzed: {len(sentences)}")
except Exception as e:
logger.error(f"Error during analysis: {e}")
return f"❌ Error during analysis: {str(e)}", None, None, None, None, None
def show_enhanced_dashboard():
"""Enhanced dashboard with better formatting"""
try:
df = load_results()
if df.empty:
return pd.DataFrame({"Message": ["No analysis results found. Upload and analyze documents to see data here."]})
return df
except Exception as e:
logger.error(f"Error loading dashboard: {e}")
return pd.DataFrame({"Error": [f"Failed to load data: {str(e)}"]})
def get_statistics():
"""Get summary statistics"""
try:
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
# Basic stats
c.execute("SELECT COUNT(*), AVG(ai_score), AVG(plagiarism_score), AVG(processing_time) FROM results")
stats = c.fetchone()
# High risk documents
c.execute("SELECT COUNT(*) FROM results WHERE ai_score > 70 OR plagiarism_score > 30")
high_risk = c.fetchone()[0]
conn.close()
if stats[0] == 0:
return "No analyses completed yet."
return f"""📊 **Analysis Statistics**
Total Documents Analyzed: {stats[0]:,}
Average AI Score: {stats[1]:.1f}%
Average Plagiarism Score: {stats[2]:.1f}%
Average Processing Time: {stats[3]:.1f}s
High Risk Documents: {high_risk} ({(high_risk/stats[0]*100):.1f}%)"""
except Exception as e:
logger.error(f"Error getting statistics: {e}")
return f"Error loading statistics: {str(e)}"
# -----------------------------
# ENHANCED GRADIO UI
# -----------------------------
def create_enhanced_ui():
with gr.Blocks(theme="soft", title="AIxBI - Professional Plagiarism Detection") as demo:
# Header
with gr.Row():
if os.path.exists(LOGO_PATH):
gr.Image(LOGO_PATH, height=80, width=80, show_label=False, container=False)
with gr.Column():
gr.Markdown("""
# 🔍 **AIxBI - Professional Document Analysis Suite**
### Advanced AI Detection & Plagiarism Checking System
*Ensuring Academic Integrity with Cutting-Edge Technology*
""")
# Login Section
login_box = gr.Group(visible=True)
with login_box:
gr.Markdown("## 🔐 **Secure Login**")
with gr.Row():
user = gr.Textbox(label="👤 Username", placeholder="Enter username")
pwd = gr.Textbox(label="🔑 Password", type="password", placeholder="Enter password")
login_btn = gr.Button("🚀 Login", variant="primary", size="lg")
login_msg = gr.Markdown("", elem_classes="login-message")
# Main Application
app_box = gr.Group(visible=False)
with app_box:
with gr.Tabs():
# Analysis Tab
with gr.Tab("📄 Document Analysis", elem_id="analysis-tab"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 👨🎓 **Student Information**")
student_name = gr.Textbox(label="📝 Student Name", placeholder="Enter full name")
student_id = gr.Textbox(label="🆔 Student ID", placeholder="Enter student ID")
with gr.Column(scale=1):
gr.Markdown("### 📎 **Document Upload**")
file_upload = gr.File(
label="📄 Upload Document",
file_types=[".pdf", ".docx", ".txt"],
file_count="single"
)
analyze_btn = gr.Button("🔍 Analyze Document", variant="primary", size="lg")
with gr.Row():
with gr.Column():
status = gr.Textbox(label="📊 Analysis Status", lines=4, interactive=False)
doc_info = gr.Textbox(label="📋 Document Information", interactive=False)
with gr.Column():
with gr.Row():
ai_score = gr.Number(label="🤖 AI Detection Score (%)", interactive=False)
plagiarism_score = gr.Number(label="📋 Plagiarism Score (%)", interactive=False)
suspicious_text = gr.Textbox(
label="🚨 Flagged Content",
lines=8,
placeholder="Suspicious sentences will appear here...",
interactive=False
)
pdf_output = gr.File(label="📄 Download Detailed Report")
# Dashboard Tab
with gr.Tab("📊 Analysis Dashboard", elem_id="dashboard-tab"):
with gr.Row():
dashboard_btn = gr.Button("🔄 Refresh Dashboard", variant="secondary")
stats_btn = gr.Button("📈 Show Statistics", variant="secondary")
stats_display = gr.Markdown("", elem_classes="stats-display")
dashboard = gr.Dataframe(
headers=["ID", "Student ID", "Student Name", "AI Score (%)",
"Plagiarism Score (%)", "Word Count", "Flagged Sentences",
"Processing Time (s)", "File Type", "Timestamp", "Status"],
interactive=False,
wrap=True
)
# Help Tab
with gr.Tab("❓ Help & Guidelines", elem_id="help-tab"):
gr.Markdown("""
## 📖 **User Guide**
### 🎯 **How to Use**
1. **Login** with your credentials
2. **Enter student information** (name and ID)
3. **Upload document** (PDF, DOCX, or TXT format)
4. **Click "Analyze Document"** and wait for results
5. **Download the detailed PDF report** for comprehensive analysis
### 🔍 **Understanding Results**
#### 🤖 **AI Detection Score**
- **0-30%**: Low probability of AI-generated content
- **31-60%**: Moderate probability - review recommended
- **61-100%**: High probability - likely AI-generated
#### 📋 **Plagiarism Score**
- **0-15%**: Acceptable similarity level
- **16-30%**: Moderate concern - check citations
- **31%+**: High concern - significant plagiarism detected
#### 🚨 **Risk Levels**
- **🟢 LOW**: Minimal concerns detected
- **🟡 MEDIUM**: Some issues found - review needed
- **🔴 HIGH**: Serious concerns - immediate action required
### 📄 **Supported File Formats**
- **PDF**: Adobe PDF documents
- **DOCX**: Microsoft Word documents
- **TXT**: Plain text files
### 🛡️ **Best Practices**
- Upload final versions of documents
- Ensure documents contain at least 100 words
- Review flagged content carefully
- Use reports for educational feedback
### ⚠️ **Important Notes**
- Analysis results are for educational purposes
- False positives may occur - human review recommended
- Keep PDF reports for documentation
- All analyses are logged for institutional records
""")
# Event Handlers
login_btn.click(
fn=login,
inputs=[user, pwd],
outputs=[login_box, app_box, login_msg]
)
analyze_btn.click(
fn=analyze_document,
inputs=[student_name, student_id, file_upload],
outputs=[status, ai_score, plagiarism_score, pdf_output, suspicious_text, doc_info]
)
dashboard_btn.click(
fn=show_enhanced_dashboard,
outputs=[dashboard]
)
stats_btn.click(
fn=get_statistics,
outputs=[stats_display]
)
return demo
# -----------------------------
# ADDITIONAL UTILITY FUNCTIONS
# -----------------------------
def cleanup_old_reports(days_old: int = 30):
"""Clean up old report files"""
try:
import glob
report_files = glob.glob("reports/*.pdf")
current_time = time.time()
for file_path in report_files:
if os.path.getmtime(file_path) < (current_time - days_old * 24 * 60 * 60):
os.remove(file_path)
logger.info(f"Cleaned up old report: {file_path}")
except Exception as e:
logger.error(f"Error during cleanup: {e}")
def export_database_backup():
"""Export database to CSV for backup"""
try:
df = load_results()
backup_file = f"backup_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
df.to_csv(backup_file, index=False)
logger.info(f"Database backup created: {backup_file}")
return backup_file
except Exception as e:
logger.error(f"Error creating backup: {e}")
return None
def validate_system_requirements():
"""Check if all required components are available"""
requirements = {
"Models loaded": embedder is not None and model is not None,
"Database accessible": os.path.exists(DB_NAME),
"Reports directory": os.path.exists("reports") or os.makedirs("reports", exist_ok=True) or True,
"Logo file": os.path.exists(LOGO_PATH)
}
for requirement, status in requirements.items():
if status:
logger.info(f"✅ {requirement}")
else:
logger.warning(f"❌ {requirement}")
return all(requirements.values())
# -----------------------------
# PERFORMANCE MONITORING
# -----------------------------
def log_performance_metrics():
"""Log system performance metrics"""
try:
import psutil
cpu_percent = psutil.cpu_percent()
memory_percent = psutil.virtual_memory().percent
disk_usage = psutil.disk_usage('.').percent
logger.info(f"Performance - CPU: {cpu_percent}%, Memory: {memory_percent}%, Disk: {disk_usage}%")
# Log database size
if os.path.exists(DB_NAME):
db_size = os.path.getsize(DB_NAME) / (1024 * 1024) # MB
logger.info(f"Database size: {db_size:.2f} MB")
except ImportError:
logger.warning("psutil not available - performance monitoring disabled")
except Exception as e:
logger.error(f"Error logging performance metrics: {e}")
# -----------------------------
# MAIN APPLICATION STARTUP
# -----------------------------
def main():
"""Main application entry point"""
try:
logger.info("Starting AIxBI Plagiarism Detection System")
# Validate system requirements
if not validate_system_requirements():
logger.error("System requirements not met. Please check the logs.")
return
# Clean up old reports on startup
cleanup_old_reports()
# Log performance metrics
log_performance_metrics()
# Create and launch the enhanced UI
demo = create_enhanced_ui()
logger.info("System ready - launching web interface")
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_error=True,
quiet=False
)
except Exception as e:
logger.error(f"Failed to start application: {e}")
raise
if __name__ == "__main__":
main() |