File size: 28,169 Bytes
5dc3509
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
import logging
import re
from utils.models import get_llm_model
from utils.performance import PerformanceTracker

logger = logging.getLogger("misinformation_detector")

performance_tracker = PerformanceTracker()

def classify_with_llm(claim, evidence):
    """
    Optimized classification function that handles evidence classification
    and verdict generation in a single LLM call with robust parsing
    """
    logger.info(f"Classifying evidence for claim: {claim}")
    
    # Get the LLM model
    llm_model = get_llm_model()
    
    # Skip if no evidence
    if not evidence:
        logger.warning("No evidence provided for classification")
        return []

    # Normalize evidence to a list
    if not isinstance(evidence, list):
        if evidence:
            try:
                evidence = [evidence]
            except Exception as e:
                logger.error(f"Could not convert evidence to list: {e}")
                return []
        else:
            return []

    # Does the claim contain strong assertions that require specific evidence?
    strong_assertion_markers = [
        "solved", "cured", "discovered", "confirmed", "proven", "definitive", 
        "breakthrough", "revolutionary", "successfully", "first ever", "extends",
        "conclusive", "unprecedented", "remarkable", "definitively"
    ]
    
    # Check if the claim contains strong assertions that would require specific supporting evidence
    contains_strong_assertions = any(marker in claim.lower() for marker in strong_assertion_markers)
    
    # Limit to top 5 evidence items to reduce token usage
    evidence = evidence[:5]

    try:
        # Format evidence items
        evidence_text = ""
        for idx, chunk in enumerate(evidence):
            # Truncate long evidence
            chunk_text = str(chunk)
            if len(chunk_text) > 300:
                chunk_text = chunk_text[:297] + "..."
            
            evidence_text += f"EVIDENCE {idx+1}:\n{chunk_text}\n\n"

        # Create a structured prompt with explicit formatting instructions
        # Adjust instructions based on claim characteristics
        if contains_strong_assertions:
            prompt = f"""
            CLAIM: {claim}

            EVIDENCE:
            {evidence_text}

            TASK: Evaluate if the evidence supports, contradicts, or is irrelevant to the claim.
            
            IMPORTANT CONTEXT: This claim makes strong assertions that require specific supporting evidence.
            
            When evaluating such claims:
            1. Strong assertions require strong, direct evidence - look for specific confirmation from credible sources
            2. General information about the topic is not sufficient to support specific assertions
            3. Evidence of ongoing work or research is not sufficient to support claims of completion or success
            4. If the evidence doesn't directly confirm the specific assertion, classify it as "insufficient" rather than "support"
            
            INSTRUCTIONS:
            1. For each evidence, provide your analysis in EXACTLY this format:
            
            EVIDENCE 1 ANALYSIS:
            Relevance: [relevant/irrelevant]
            Classification: [support/contradict/insufficient/irrelevant]
            Confidence: [number between 0-100]
            Reason: [brief explanation focusing on whether evidence directly confirms the specific assertion]
            
            2. After analyzing all evidence pieces, provide a final verdict in this format:
            
            FINAL VERDICT: [clear statement if evidence collectively supports or contradicts the claim]
            
            Without specific, direct supporting evidence, default to "The evidence does not support the claim" rather than "insufficient evidence."

            CRITICAL INSTRUCTION: FOCUS ON THE EXACT CLAIM. Evaluate ONLY the specific claim, not related topics
            """
        else:
            prompt = f"""
            CLAIM: {claim}

            EVIDENCE:
            {evidence_text}

            TASK: Evaluate if the evidence supports, contradicts, or is irrelevant to the claim.
            
            INSTRUCTIONS:
            1. For each evidence, provide your analysis in EXACTLY this format:
            
            EVIDENCE 1 ANALYSIS:
            Relevance: [relevant/irrelevant]
            Classification: [support/contradict/insufficient/irrelevant]
            Confidence: [number between 0-100]
            Reason: [brief explanation]
            
            2. After analyzing all evidence pieces, provide a final verdict in this format:
            
            FINAL VERDICT: [clear statement if evidence collectively supports or contradicts the claim]
            
            CRITICAL INSTRUCTION: FOCUS ON THE EXACT CLAIM. Evaluate ONLY the specific claim, not related topics
            """

        # Get response with temperature=0 for consistency
        result = llm_model.invoke(prompt, temperature=0)
        result_text = result.content.strip()
        
        # Extract final verdict first since it's most important
        final_verdict = None
        final_match = re.search(r'FINAL VERDICT:\s*(.*?)(?=\s*$|\n\n)', result_text, re.DOTALL | re.IGNORECASE)
        if final_match:
            final_verdict = final_match.group(1).strip()
            logger.info(f"Final assessment: {final_verdict}")
        
        # Define a precise regex pattern matching the requested format
        analysis_pattern = r'EVIDENCE\s+(\d+)\s+ANALYSIS:\s*\n+Relevance:\s*(relevant|irrelevant)\s*\n+Classification:\s*(support|contradict|neutral|irrelevant|insufficient)\s*\n+Confidence:\s*(\d+)\s*\n+Reason:\s*(.*?)(?=\s*EVIDENCE\s+\d+\s+ANALYSIS:|\s*FINAL VERDICT:|\s*$)'
        
        # Parse each evidence analysis
        classification_results = []
        matched_evidence = set()
        
        # Try matching with our strict pattern first
        matches = list(re.finditer(analysis_pattern, result_text, re.IGNORECASE | re.DOTALL))
        
        # If no matches, try a more flexible pattern
        if not matches:
            flexible_pattern = r'(?:EVIDENCE|Evidence)\s+(\d+)(?:\s+ANALYSIS)?:?\s*\n+(?:Relevance|relevance):\s*(relevant|irrelevant|unknown)\s*\n+(?:Classification|classification):\s*(support|contradict|neutral|irrelevant|insufficient|unknown)\s*\n+(?:Confidence|confidence):\s*(\d+)\s*\n+(?:Reason|reason|Brief reason):\s*(.*?)(?=\s*(?:EVIDENCE|Evidence)\s+\d+|FINAL VERDICT:|$)'
            matches = list(re.finditer(flexible_pattern, result_text, re.IGNORECASE | re.DOTALL))
            
        # Process matches
        for match in matches:
            try:
                evidence_idx = int(match.group(1)) - 1
                relevance = match.group(2).lower()
                classification = match.group(3).lower()
                confidence = int(match.group(4))
                reason = match.group(5).strip()
                
                # Normalize classification terms
                if classification == "neutral":
                    classification = "insufficient"
                
                # For strong assertions, apply confidence adjustments based on classification
                if contains_strong_assertions:
                    if classification == "support":
                        # Check if the reasoning indicates direct or indirect support
                        indirect_support_markers = ["general", "doesn't directly", "does not directly", 
                                                  "doesn't specifically", "not specific", "related to", 
                                                  "doesn't confirm"]
                        if any(marker in reason.lower() for marker in indirect_support_markers):
                            # Downgrade support confidence for indirect evidence
                            confidence = max(5, confidence - 20)
                    elif classification == "contradict":
                        # For contradictions of strong assertions, slightly boost confidence
                        confidence = min(95, confidence + 5)
                
                # Ensure index is valid
                if 0 <= evidence_idx < len(evidence):
                    matched_evidence.add(evidence_idx)
                    
                    # Create result entry
                    classification_results.append({
                        "label": classification,
                        "confidence": confidence / 100.0,
                        "evidence": evidence[evidence_idx],
                        "relevance": relevance,
                        "reason": reason,
                        "final_assessment": final_verdict
                    })
            except (ValueError, IndexError) as e:
                logger.error(f"Error parsing evidence analysis: {e}")
        
        # Handle any unmatched evidence items
        if matches:  # Only add defaults if we successfully matched some
            for idx, ev in enumerate(evidence):
                if idx not in matched_evidence:
                    # Check if the evidence text itself suggests a classification
                    contains_support = bool(re.search(r'support|confirm|verify|true|correct|released', final_verdict or "", re.IGNORECASE))
                    contains_contradicting = bool(re.search(r'not yet|hasn\'t|have not|doesn\'t|don\'t|cannot|preliminary|proposed', str(ev).lower()))
                    
                    # For claims with strong assertions without explicit evidence, be more cautious
                    if contains_strong_assertions:
                        if contains_contradicting:
                            label = "contradict"
                            confidence = 0.6
                        elif contains_support:
                            label = "insufficient"  # Default to insufficient for strong assertions without clear analysis
                            confidence = 0.5
                        else:
                            label = "insufficient"
                            confidence = 0.5
                    else:
                        label = "support" if contains_support else "unknown"
                        confidence = 0.7 if contains_support else 0.5
                    
                    classification_results.append({
                        "label": label,
                        "confidence": confidence,
                        "evidence": ev,
                        "relevance": "relevant" if (contains_support or contains_contradicting) else "unknown",
                        "reason": "Based on overall assessment",
                        "final_assessment": final_verdict
                    })
        else:
            # No structured parsing worked, use final verdict to create simple results
            contains_support = bool(re.search(r'support|confirm|verify|true|correct|released', final_verdict or "", re.IGNORECASE))
            contains_contradict = bool(re.search(r'contradict|against|false|incorrect|not support|does not support|insufficient evidence|does not confirm|no evidence', final_verdict or "", re.IGNORECASE))
            contains_insufficient = bool(re.search(r'insufficient|not enough|cannot determine|no evidence|lack of evidence', final_verdict or "", re.IGNORECASE))
            
            # For claims with strong assertions, be more stringent
            if contains_strong_assertions:
                if contains_support and not contains_insufficient and not contains_contradict:
                    label = "support"
                    confidence = 0.6  # Lower confidence even for support of strong assertions
                elif contains_contradict:
                    label = "contradict"
                    confidence = 0.8  # Higher confidence for contradiction of strong assertions
                else:
                    label = "insufficient"
                    confidence = 0.7  # Good confidence for insufficient judgment
            else:
                label = "support" if contains_support else "contradict" if contains_contradict else "unknown"
                confidence = 0.7 if (contains_support or contains_contradict) else 0.5
            
            # Create basic results based on final verdict
            for ev in evidence:
                classification_results.append({
                    "label": label,
                    "confidence": confidence,
                    "evidence": ev,
                    "relevance": "relevant" if (contains_support or contains_contradict) else "unknown",
                    "reason": final_verdict or "Based on collective evidence",
                    "final_assessment": final_verdict
                })
        
        logger.info(f"Classified {len(classification_results)} evidence items")
        return classification_results

    except Exception as e:
        logger.error(f"Error in evidence classification: {str(e)}")
        # Provide a basic fallback that checks for keywords in evidence
        try:
            fallback_results = []
            for ev in evidence:
                ev_text = str(ev).lower()
                supports = False
                contradicts = False
                
                # Basic keyword checking as last resort
                if claim.lower() in ev_text:
                    keywords = [word for word in claim.lower().split() if len(word) > 3]
                    matching_keywords = [k for k in keywords if k in ev_text]
                    
                    # If substantial keywords match, consider it support
                    supports = len(matching_keywords) >= max(1, len(keywords) // 2)
                
                # Check for contradiction terms
                contradiction_terms = ["not yet", "hasn't", "haven't", "cannot", "can't", 
                                      "doesn't", "don't", "no evidence", "insufficient", 
                                      "preliminary", "proposed", "in development", "future"]
                contradicts = any(term in ev_text for term in contradiction_terms)
                
                # For claims with strong assertions, be more conservative in the fallback case
                if contains_strong_assertions:
                    if contradicts:
                        fallback_results.append({
                            "label": "contradict",
                            "confidence": 0.6,
                            "evidence": ev,
                            "relevance": "relevant",
                            "reason": "Evidence suggests the claim is not yet proven (fallback method)"
                        })
                    elif supports:
                        fallback_results.append({
                            "label": "insufficient",
                            "confidence": 0.6,
                            "evidence": ev,
                            "relevance": "relevant",
                            "reason": "Evidence is related but doesn't conclusively confirm the assertion (fallback method)"
                        })
                    else:
                        fallback_results.append({
                            "label": "unknown",
                            "confidence": 0.5,
                            "evidence": ev,
                            "relevance": "unknown",
                            "reason": "Cannot determine relevance (fallback method)"
                        })
                else:
                    fallback_results.append({
                        "label": "support" if supports else "unknown",
                        "confidence": 0.6 if supports else 0.5,
                        "evidence": ev,
                        "relevance": "relevant" if supports else "unknown",
                        "reason": "Based on keyword matching (fallback method)"
                    })
            
            return fallback_results
        except:
            # Absolute last resort
            return [{"label": "unknown", "confidence": 0.5, "evidence": ev} for ev in evidence]
    
def aggregate_evidence(classification_results):
    """
    Aggregate evidence classifications to determine overall verdict
    with robust fallback mechanisms for reliable results
    """
    logger.info(f"Aggregating evidence from {len(classification_results) if classification_results else 0} results")
    
    if not classification_results:
        logger.warning("No classification results to aggregate")
        return "Uncertain", 0.3  # Default with low confidence
    
    # Assess the claim's characteristics (without relying on explicit category detection)
    # Does the claim contain strong assertions that require specific evidence?
    strong_assertion_markers = [
        "solved", "cured", "discovered", "confirmed", "proven", "definitive", 
        "breakthrough", "revolutionary", "successfully", "first ever", "extends",
        "conclusive", "unprecedented", "remarkable", "definitively"
    ]
    
    # Check if claim text is available in final assessment
    claim_text = None
    claim_has_strong_assertions = False
    
    # Extract claim from final assessment if available
    for item in classification_results:
        if "final_assessment" in item and item["final_assessment"]:
            match = re.search(r'the claim (?:that )?"?([^"]+)"?', item["final_assessment"], re.IGNORECASE)
            if match:
                claim_text = match.group(1)
                claim_has_strong_assertions = any(marker in claim_text.lower() for marker in strong_assertion_markers)
                break
    
    # If we couldn't extract the claim, check evidence context for assertion indicators
    if not claim_text:
        # Check if evidence reasons suggest dealing with strong assertions
        assertion_context_indicators = ["conclusive evidence", "definitive proof", "solved", "breakthrough", 
                                      "revolutionary", "directly confirms", "specific confirmation"]
        
        reasons = [item.get("reason", "").lower() for item in classification_results if "reason" in item]
        assertion_indicators_count = sum(1 for indicator in assertion_context_indicators 
                                        for reason in reasons if indicator in reason)
        
        claim_has_strong_assertions = assertion_indicators_count >= 2
    
    # Extract final assessment if present
    final_assessment = None
    for item in classification_results:
        if "final_assessment" in item and item["final_assessment"]:
            final_assessment = item["final_assessment"]
            break
    
    # Count evidence by classification
    support_items = [item for item in classification_results if item.get("label") == "support"]
    contradict_items = [item for item in classification_results if item.get("label") == "contradict"]
    insufficient_items = [item for item in classification_results if item.get("label") in ["insufficient", "neutral"]]
    relevant_items = [item for item in classification_results 
                     if item.get("relevance") == "relevant" or item.get("label") in ["support", "contradict"]]
    
    # Calculate the proportion of supported evidence
    total_relevant = len(relevant_items)
    
    # Direct keyword detection from final assessment or evidence
    if final_assessment:
        # Check for support indicators in final assessment
        supports_pattern = r'\b(support|confirm|verify|true|correct|released|proves|validates|evidence (?:that |for |of )(?:the claim|it) is true)\b'
        contradicts_pattern = r'\b(contradict|refute|deny|false|incorrect|not released|doesn\'t support|does not support|no evidence|cannot support|is not true|evidence (?:that |for |of )(?:the claim|it) is false)\b'
        insufficient_pattern = r'\b(uncertain|insufficient|not enough|inconclusive|cannot determine|unable to determine|lack of evidence)\b'
        
        supports_match = re.search(supports_pattern, final_assessment, re.IGNORECASE)
        contradicts_match = re.search(contradicts_pattern, final_assessment, re.IGNORECASE)
        insufficient_match = re.search(insufficient_pattern, final_assessment, re.IGNORECASE)
        
        # Direct determination based on final assessment keywords
        if supports_match and not contradicts_match and not insufficient_match:
            # Get max confidence from supporting evidence
            confidence = max([item.get("confidence", 0) for item in support_items]) if support_items else 0.7
            
            # Adjust confidence for claims with strong assertions
            if claim_has_strong_assertions:
                confidence = min(confidence, 0.8)  # Cap confidence for strong assertions
                
            return "True (Based on Evidence)", max(0.6, confidence)  # Minimum 0.6 confidence
            
        if contradicts_match and not supports_match:
            # Get max confidence from contradicting evidence
            confidence = max([item.get("confidence", 0) for item in contradict_items]) if contradict_items else 0.7
            
            # For claims with strong assertions, increase confidence in contradiction
            if claim_has_strong_assertions:
                confidence = max(confidence, 0.7)  # Minimum 0.7 confidence for contradicting strong assertions
                
            return "False (Based on Evidence)", max(0.6, confidence)  # Minimum 0.6 confidence
            
        if insufficient_match:
            # For claims with strong assertions without confirming evidence, 
            # change "Uncertain" to a clearer negative verdict
            if claim_has_strong_assertions:
                return "False (Based on Evidence)", 0.7
            return "Uncertain", 0.4  # Medium-low confidence
    
    # If we have distinct classifications, weigh them by confidence and quantity
    if support_items and (not contradict_items or all(item.get("confidence", 0) < 0.95 for item in contradict_items)):
        # Check if there's high confidence support evidence (greater than 0.95)
        high_confidence_support = [item for item in support_items if item.get("confidence", 0) > 0.95]
        
        if high_confidence_support:
            # High confidence support evidence exists, use it even if there are some contradictions
            confidence = max([item.get("confidence", 0) for item in high_confidence_support])
            # For claims with strong assertions, be more conservative with pure support
            if claim_has_strong_assertions:
                confidence = min(confidence, 0.8)
            return "True (Based on Evidence)", max(0.7, confidence)
        elif not contradict_items:
            # All supportive evidence with no contradictions (standard case)
            confidence = max([item.get("confidence", 0) for item in support_items])
            
            # For claims with strong assertions, be more conservative with pure support
            if claim_has_strong_assertions:
                # For strong assertions with only support but no contradictions, be cautious
                confidence = min(confidence, 0.7)
                # If the support is from low-quality evidence, consider it uncertain
                support_reasons = [item.get("reason", "").lower() for item in support_items]
                weak_supports = sum(1 for reason in support_reasons if
                                   "general information" in reason or 
                                   "doesn't specify" in reason or 
                                   "does not directly" in reason)
                if weak_supports / max(1, len(support_items)) > 0.5:
                    return "Uncertain", 0.6
            
            return "True (Based on Evidence)", max(0.6, confidence)
        
    if contradict_items and not support_items:
        # All contradicting evidence
        confidence = max([item.get("confidence", 0) for item in contradict_items])
        
        # For claims with strong assertions, increase confidence in contradiction
        if claim_has_strong_assertions:
            confidence = max(confidence, 0.7)
            
        return "False (Based on Evidence)", max(0.6, confidence)
    
    if insufficient_items and len(insufficient_items) > len(support_items) + len(contradict_items):
        # Mostly insufficient evidence
        # For claims with strong assertions and mainly insufficient evidence, lean toward "False"
        if claim_has_strong_assertions:
            return "False (Based on Evidence)", 0.7
        return "Uncertain", 0.5  # Medium confidence for explicitly uncertain
    
    if support_items and contradict_items:
        # Competing evidence - compare confidence and quantity
        support_confidence = max([item.get("confidence", 0) for item in support_items])
        contradict_confidence = max([item.get("confidence", 0) for item in contradict_items])
        
        # For claims with strong assertions, require stronger support to overcome contradiction
        if claim_has_strong_assertions:
            # Higher threshold for strong assertions
            if support_confidence > contradict_confidence + 0.3:
                return "True (Based on Evidence)", support_confidence * 0.9  # Apply a confidence penalty
            elif contradict_confidence >= support_confidence - 0.1:  # Lower threshold for contradiction
                return "False (Based on Evidence)", max(contradict_confidence, 0.7)  # Minimum 0.7 confidence
            else:
                # Default to uncertain for close calls on strong assertions
                return "Uncertain", 0.6
        else:
            # Standard threshold for regular claims
            if support_confidence > contradict_confidence + 0.2:
                return "True (Based on Evidence)", support_confidence
            elif contradict_confidence > support_confidence + 0.2:
                return "False (Based on Evidence)", contradict_confidence
            else:
                # Close call - check quantity of evidence
                if len(support_items) > len(contradict_items) * 2:
                    return "True (Based on Evidence)", support_confidence * 0.9  # Slight confidence penalty
                elif len(contradict_items) > len(support_items) * 2:
                    return "False (Based on Evidence)", contradict_confidence * 0.9  # Slight confidence penalty
                else:
                    # Truly conflicting evidence
                    return "Uncertain", 0.5  # Medium confidence
    
    # Check for evidence quality issues
    all_unknown = all(item.get("label") == "unknown" for item in classification_results)
    evidence_text = " ".join([str(item.get("evidence", "")) for item in classification_results])
    
    # General case: For any claims with all unknown labels that contain markers of strong assertions
    if all_unknown and claim_has_strong_assertions:
        # Absence of clear supporting evidence for claims with strong assertions points to "False"
        return "False (Based on Evidence)", 0.7
    
    # For general claims, if all items are unknown but evidence clearly mentions the claim
    if all_unknown:
        # Examples of direct evidence matching as fallback
        if re.search(r'\bllama\s*4\b', evidence_text, re.IGNORECASE) and re.search(r'\bmeta\b|\bfacebook\b', evidence_text, re.IGNORECASE) and re.search(r'\breleas', evidence_text, re.IGNORECASE):
            return "True (Based on Evidence)", 0.7
        elif re.search(r'\bnot\s+releas', evidence_text, re.IGNORECASE) or re.search(r'\bdenies\b|\bdenied\b', evidence_text, re.IGNORECASE):
            return "False (Based on Evidence)", 0.7
    
    # Default to uncertain if no clear pattern - but with special case for claims with strong assertions
    if claim_has_strong_assertions:
        # For claims with strong assertions with no clear evidence, default to false
        return "False (Based on Evidence)", 0.7
    
    return "Uncertain", 0.3