File size: 7,261 Bytes
553537a
 
a1c1173
a11d742
628d40e
 
a1c1173
 
 
 
876b12f
a1c1173
 
 
876b12f
553537a
 
a11d742
9a2420b
a11d742
7c1cdd8
628d40e
 
7c1cdd8
628d40e
a1c1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553537a
a1c1173
 
 
 
 
 
 
 
 
 
 
 
 
876b12f
a1c1173
 
628d40e
7c1cdd8
628d40e
 
 
 
 
 
 
a1c1173
 
a11d742
 
 
 
 
a1c1173
 
9a2420b
a11d742
 
876b12f
a1c1173
 
 
 
 
 
628d40e
a11d742
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1c1173
628d40e
7c1cdd8
628d40e
 
 
 
 
 
 
 
 
 
a1c1173
628d40e
a1c1173
 
876b12f
a1c1173
a11d742
 
a1c1173
a11d742
a1c1173
553537a
a1c1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
628d40e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, HttpUrl
from typing import Dict, Any, List
import logging
import os
from supabase import create_client

from mediaunmasked.scrapers.article_scraper import ArticleScraper
from mediaunmasked.analyzers.scoring import MediaScorer
from mediaunmasked.utils.logging_config import setup_logging

# Initialize logging
setup_logging()
logger = logging.getLogger(__name__)

# Initialize router and dependencies
router = APIRouter(tags=["analysis"])
scraper = ArticleScraper()
scorer = MediaScorer()

# Initialize Supabase connection (works for async environments)
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
supabase = create_client(SUPABASE_URL, SUPABASE_KEY)  # This works for async

class ArticleRequest(BaseModel):
    url: HttpUrl

class MediaScoreDetails(BaseModel):
    headline_analysis: Dict[str, Any]
    sentiment_analysis: Dict[str, Any]
    bias_analysis: Dict[str, Any]
    evidence_analysis: Dict[str, Any]

class MediaScore(BaseModel):
    media_unmasked_score: float
    rating: str
    details: MediaScoreDetails

class AnalysisResponse(BaseModel):
    headline: str
    content: str
    sentiment: str
    bias: str
    bias_score: float
    bias_percentage: float
    flagged_phrases: List[str]
    media_score: MediaScore

@router.post("/analyze", response_model=AnalysisResponse)
async def analyze_article(request: ArticleRequest) -> AnalysisResponse:
    """
    Analyze an article for bias, sentiment, and credibility.
    
    Args:
        request: ArticleRequest containing the URL to analyze
        
    Returns:
        AnalysisResponse with complete analysis results
        
    Raises:
        HTTPException: If scraping or analysis fails
    """
    try:
        logger.info(f"Analyzing article: {request.url}")
        
        # Check if the article has already been analyzed
        existing_article = await supabase.table('article_analysis').select('*').eq('url', str(request.url)).execute()
        
        if existing_article.status_code == 200 and existing_article.data:
            logger.info("Article already analyzed. Returning cached data.")
            # Return the existing analysis result if it exists
            cached_data = existing_article.data[0]
            return AnalysisResponse.parse_obj(cached_data)
        
        # Scrape article
        article = scraper.scrape_article(str(request.url))
        if not article:
            raise HTTPException(
                status_code=400,
                detail="Failed to scrape article content"
            )
        
        # Analyze content
        analysis = scorer.calculate_media_score(
            article["headline"],
            article["content"]
        )
        
        # Log raw values for debugging
        logger.info("Raw values:")
        logger.info(f"media_unmasked_score type: {type(analysis['media_unmasked_score'])}")
        logger.info(f"media_unmasked_score value: {analysis['media_unmasked_score']}")
        
        # Prepare response data
        response_dict = {
            "headline": str(article['headline']),
            "content": str(article['content']),
            "sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
            "bias": str(analysis['details']['bias_analysis']['bias']),
            "bias_score": float(analysis['details']['bias_analysis']['bias_score']),
            "bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage']),
            "flagged_phrases": list(analysis['details']['sentiment_analysis']['flagged_phrases']),
            "media_score": {
                "media_unmasked_score": float(analysis['media_unmasked_score']),
                "rating": str(analysis['rating']),
                "details": {
                    "headline_analysis": {
                        "headline_vs_content_score": float(analysis['details']['headline_analysis']['headline_vs_content_score']),
                        "contradictory_phrases": analysis['details']['headline_analysis'].get('contradictory_phrases', [])
                    },
                    "sentiment_analysis": {
                        "sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
                        "manipulation_score": float(analysis['details']['sentiment_analysis']['manipulation_score']),
                        "flagged_phrases": list(analysis['details']['sentiment_analysis']['flagged_phrases'])
                    },
                    "bias_analysis": {
                        "bias": str(analysis['details']['bias_analysis']['bias']),
                        "bias_score": float(analysis['details']['bias_analysis']['bias_score']),
                        "bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage'])
                    },
                    "evidence_analysis": {
                        "evidence_based_score": float(analysis['details']['evidence_analysis']['evidence_based_score'])
                    }
                }
            }
        }
        
        # Save the new analysis to Supabase
        await supabase.table('article_analysis').upsert({
            'url': str(request.url),
            'headline': response_dict['headline'],
            'content': response_dict['content'],
            'sentiment': response_dict['sentiment'],
            'bias': response_dict['bias'],
            'bias_score': response_dict['bias_score'],
            'bias_percentage': response_dict['bias_percentage'],
            'flagged_phrases': response_dict['flagged_phrases'],
            'media_score': response_dict['media_score']
        }).execute()
        
        # Return the response
        return AnalysisResponse.parse_obj(response_dict)
        
    except Exception as e:
        logger.error(f"Analysis failed: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=500,
            detail=f"Analysis failed: {str(e)}"
        )

@router.get("/debug")
async def debug_response():
    mock_analysis = {
        "headline": "Test Headline",
        "content": "Test content",
        "sentiment": "Neutral",
        "bias": "Neutral",
        "bias_score": 0.75,  # Note: 0-1 scale
        "bias_percentage": 0,
        "flagged_phrases": ["test phrase"],
        "media_score": {
            "media_unmasked_score": 75.5,
            "rating": "Some Bias Present",
            "details": {
                "headline_analysis": {
                    "headline_vs_content_score": 20,
                    "contradictory_phrases": ["Sample contradiction"]
                },
                "sentiment_analysis": {
                    "sentiment": "Neutral",
                    "manipulation_score": 30,
                    "flagged_phrases": ["Sample manipulative phrase"]
                },
                "bias_analysis": {
                    "bias": "Neutral",
                    "bias_score": 0.75,
                    "bias_percentage": 0
                },
                "evidence_analysis": {
                    "evidence_based_score": 80
                }
            }
        }
    }
    return AnalysisResponse.parse_obj(mock_analysis)