Spaces:
Sleeping
Sleeping
import gradio as gr | |
import json | |
import logging | |
import argparse | |
import sys | |
import os | |
import math | |
import pickle | |
from deep_translator import GoogleTranslator | |
from gematria import calculate_gematria | |
from collections import defaultdict | |
from typing import Dict, List, Any, Optional | |
# --- Configuration --- | |
# Logging is kept for file-based or production logging, but we'll use print() for immediate console debug | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
BOOK_RANGE = range(1, 40) | |
CACHE_FILE = "tanakh_phrasedict.cache" | |
# --- Core Logic Functions --- | |
def get_power_result(total_sum: int, query_value: int) -> int: | |
"""Calculates the power or root result.""" | |
if query_value <= 1: | |
return 1 | |
if query_value < total_sum: | |
try: | |
exponent = int(math.floor(math.log(total_sum, query_value))) | |
return query_value ** exponent | |
except (ValueError, OverflowError): | |
return 1 | |
def find_all_matching_phrases(target_sum: int, phrase_dictionary: Dict[int, List[Dict]]) -> List[Dict]: | |
"""Finds all phrases matching a target Gematria.""" | |
return phrase_dictionary.get(int(target_sum), []) | |
# --- Global State: Load dictionary once at startup --- | |
try: | |
if not os.path.exists(CACHE_FILE): | |
raise FileNotFoundError(f"ERROR: Cache file '{CACHE_FILE}' not found. Please run 'build_indices.py' first.") | |
logging.info(f"Loading phrase dictionary from cache: {CACHE_FILE}") | |
with open(CACHE_FILE, 'rb') as f: | |
phrase_dictionary: Optional[Dict[int, List[Dict]]] = pickle.load(f) | |
logging.info("Phrase dictionary loaded successfully for the Gradio app.") | |
except (FileNotFoundError, IOError, pickle.UnpicklingError) as e: | |
logging.error(str(e)) | |
phrase_dictionary = None | |
# --- Main Analysis Function for Gradio --- | |
def run_analysis(query: str, translate: bool, process_verses: int, results_per_verse: int, xor_depth: int, progress=gr.Progress(track_tqdm=True)): | |
"""The main analysis function called by the Gradio interface.""" | |
if phrase_dictionary is None: | |
return "## Fatal Error\nCould not start analysis. The phrase dictionary cache file (`tanakh_phrasedict.cache`) is missing or corrupt. Please run `build_indices.py` and restart the app." | |
print("\n--- NEW ANALYSIS RUN ---") # Console Debug | |
output_lines = [] | |
try: | |
query_value = calculate_gematria(query) | |
if query_value <= 1 and query: | |
return f"## Error\nQuery '{query}' has an invalid Gematria value ({query_value}). Please enter a valid query." | |
except Exception as e: | |
return f"## Error\nCould not calculate Gematria for query '{query}'. Details: {e}" | |
progress(0, desc="Initializing...") | |
translator = None | |
if translate: | |
try: | |
translator = GoogleTranslator(source='iw', target='en') | |
except Exception as e: | |
logging.error(f"Could not initialize translator: {e}") | |
output_lines.append(f"**Warning:** Could not initialize translator: {e}") | |
output_lines.append(f"## XOR Gematria Resonance Analysis for: `{query}`") | |
verses_processed = 0 | |
resonance_count = 0 | |
# Using a generator to handle the nested loops cleanly and break out | |
def get_verses(): | |
for book_num in BOOK_RANGE: | |
filepath = f"texts/torah/{book_num:02}.json" | |
if not os.path.exists(filepath): continue | |
with open(filepath, 'r', encoding='utf-8') as f: | |
data = json.load(f) | |
for chap_idx, chapter in enumerate(data.get("text", []), start=1): | |
for verse_idx, verse_text in enumerate(chapter, start=1): | |
yield (book_num, chap_idx, verse_idx, verse_text) | |
for book_num, chap_idx, verse_idx, verse_text in get_verses(): | |
# Correctly handle the processing limit | |
if process_verses and verses_processed >= process_verses: | |
print(f"DEBUG: Processing limit of {process_verses} verses reached. Stopping analysis.") | |
break | |
verses_processed += 1 | |
progress(verses_processed / process_verses, desc=f"Analyzing Verse {verses_processed}/{process_verses}") | |
verse_sum = calculate_gematria(verse_text) | |
if verse_sum <= 1: continue | |
if query_value < verse_sum: | |
power_result = get_power_result(verse_sum, query_value) | |
main_target_sum = verse_sum ^ power_result | |
elif query_value > verse_sum: | |
main_target_sum = query_value ^ verse_sum | |
power_result= 0 | |
elif query_value == verse_sum: | |
main_target_sum = verse_sum | |
power_result=0 | |
main_matches = find_all_matching_phrases(main_target_sum, phrase_dictionary) | |
verse_ref = f"B{book_num:02d}, C{chap_idx}, V{verse_idx}" | |
if power_result == 0: | |
print(f"DEBUG: Analyzing [{verse_ref}] | Verse Sum: {verse_sum}, Main Target: {main_target_sum}") # Console Debug | |
print(f"DEBUG: Analyzing [{verse_ref}] | Verse Sum: {verse_sum}, Power/Root: {power_result}, Main Target: {main_target_sum}") # Console Debug | |
if not main_matches: | |
print("DEBUG: No main resonance found. Skipping.") # Console Debug | |
continue | |
resonance_count += 1 | |
print(f"DEBUG: Found Resonance #{resonance_count}!") # Console Debug | |
output_lines.append("\n---\n") | |
output_lines.append(f"### Resonance #{resonance_count} in [{verse_ref}]") | |
output_lines.append(f"> {verse_text.strip()}\n") | |
output_lines.append("```") | |
output_lines.append(f"Verse Sum (X) : {verse_sum} | Query: \"{query}\" (G: {query_value}) | Power/Root (Y): {power_result}") | |
output_lines.append("```\n") | |
def format_matches(matches: List[Dict], title: str, calculation_str: str): | |
if not matches: return | |
matches.sort(key=lambda p: (p.get('freq', 0) / p.get('words', 99)), reverse=True) | |
matches_to_show = matches[:results_per_verse] | |
output_lines.append(f"**{title}:** `{calculation_str}`") | |
for match in matches_to_show: | |
translation_str = "" | |
if translator: | |
try: | |
translation_str = translator.translate(match['text']) | |
except Exception: | |
translation_str = "[Translation failed]" | |
score = (p.get('freq', 0) / p.get('words', 99)) if (p:=match).get('words') else 0 | |
gematria_val = calculate_gematria(match['text']) | |
output_lines.append(f" * **{match['text']}**") | |
output_lines.append(f" `G: {gematria_val}, Words: {match.get('words', 'N/A')}, Freq: {match.get('freq', 'N/A')}, Score: {score:.2f}`") | |
if translation_str: | |
output_lines.append(f"\n*Translation: \"{translation_str}\"*") | |
output_lines.append("") | |
if power_result!=0: | |
calc_str = f"[{verse_sum}] ^ [{power_result}] โ [G_target:{main_target_sum}]" | |
elif power_result==0: | |
calc_str = f"[{verse_sum}] ^ [{query_value}] โ [G_target:{main_target_sum}]" | |
format_matches(main_matches, "Main Resonance", calc_str) | |
if xor_depth > 0: | |
output_lines.append(f"**Bitplane Variations of the Result ({main_target_sum}):**") | |
for depth in range(xor_depth): | |
bit_flip = 1 << depth | |
target_sum = main_target_sum ^ bit_flip | |
bitplane_matches = find_all_matching_phrases(target_sum, phrase_dictionary) | |
if bitplane_matches: | |
bitplane_calc_str = f"[{main_target_sum}] ^ [Bit {depth+1}] โ [G_target:{target_sum}]" | |
format_matches(bitplane_matches, f"Variation (Depth {depth + 1})", bitplane_calc_str) | |
if resonance_count == 0: | |
output_lines.append("\n**No resonances found. Consider increasing 'Verses to Process' or trying a different query.**") | |
print("--- ANALYSIS COMPLETE ---") # Console Debug | |
return "\n".join(output_lines) | |
# --- Gradio UI Definition --- | |
# Custom CSS fse:or a professional dark theme inspired by the screenshot | |
custom_css = """ | |
#output_markdown h3 { | |
color: #f97316; /* Vibrant orange for main resonance headers */ | |
border-bottom: 2px solid #374151; | |
padding-bottom: 8px; | |
margin-top: 24px; | |
} | |
#output_markdown blockquote { | |
background-color: #1f2937; | |
border-left: 5px solid #f97316; | |
padding: 12px; | |
font-style: italic; | |
color: #d1d5db; | |
} | |
#output_markdown code { | |
background-color: #374151; | |
color: #e5e7eb; | |
padding: 3px 6px; | |
border-radius: 5px; | |
font-size: 0.9em; | |
} | |
""" | |
# Using the robust Default theme and customizing it for the desired dark look | |
dark_theme = gr.themes.Default( | |
primary_hue=gr.themes.colors.orange, | |
secondary_hue=gr.themes.colors.blue, | |
neutral_hue=gr.themes.colors.slate | |
).set( | |
body_background_fill="#0f172a", | |
background_fill_primary="#1e293b", | |
background_fill_secondary="#334155", | |
body_text_color="#e2e8f0", | |
color_accent_soft="#1e293b", | |
border_color_accent="#334155", | |
border_color_primary="#334155", | |
button_primary_background_fill="#f97316", | |
button_primary_text_color="#ffffff", | |
button_secondary_background_fill="#334155", | |
button_secondary_text_color="#e2e8f0", | |
) | |
with gr.Blocks(theme=dark_theme, css=custom_css, title="Tanakh XOR Gematria Resonance") as demo: | |
gr.Markdown("# ๐ Tanakh XOR Gematria Resonance") | |
with gr.Tabs(): | |
with gr.TabItem("XOR Gematria Resonance"): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
query = gr.Textbox( | |
label="Query Phrase", | |
placeholder="e.g., ืืืื, ืืืืื, light...", | |
) | |
run_button = gr.Button("๐ฎ Divine Resonance", variant="primary") | |
with gr.Accordion("Advanced Parameters", open=False): | |
process_verses = gr.Slider( | |
label="Verses to Process", minimum=1, maximum=35000, step=1, value=10, | |
info="How many verses to analyze from the start of the Tanakh." | |
) | |
results_per_verse = gr.Slider( | |
label="Results per Resonance", minimum=1, maximum=10, step=1, value=1, | |
info="How many top phrases to show for each found resonance type." | |
) | |
xor_depth = gr.Slider( | |
label="Bitplane Variation Depth", minimum=0, maximum=16, step=1, value=2, | |
info="How many bit-levels of the main result to vary and analyze." | |
) | |
translate = gr.Checkbox(label="Translate to English", value=True) | |
gr.Examples( | |
examples=[ | |
["ืืืื"], ["ืืืืื"], ["ืฉืืื ื"], | |
["ืืฉืื ืืฉืืขืื"], ["ืืื ืฉืืืฉืื ืืฉืืข"], ["ืฆืืง ืืืฉืคื"] | |
], | |
inputs=[query] | |
) | |
with gr.Column(scale=3): | |
output_markdown = gr.Markdown(label="Resonances", elem_id="output_markdown") | |
with gr.TabItem("About & Help"): | |
gr.Markdown( | |
""" | |
### How It Works | |
This tool explores the numerological and structural connections within the Tanakh based on Gematria and bitwise XOR operations. It is an instrument for textual exploration, not a historical or theological authority. | |
1. **Gematria Calculation:** The Gematria (numerical value) of your **Query Phrase** and each **Verse** in the Tanakh is calculated. | |
2. **Power/Root Operator (Y):** To create a non-obvious link, the Query's Gematria is transformed. If it's smaller than the Verse's Gematria, its highest possible power is taken. If larger, its n-th root is taken. This becomes the "Operator" (Y). | |
3. **Main Resonance:** The core operation is `Verse_Gematria (X) ^ Operator (Y)`. The result is a **Target Gematria**. The app then finds all phrases in the Tanakh with this exact numerical value. This is the "Main Resonance". | |
4. **Bitplane Variations:** To explore the "fractal neighborhood" of the Main Resonance, the app then "flips" each bit of the result, one by one. For each flipped bit (`depth`), it calculates a new Target Gematria (`Main_Result ^ 2^depth`) and finds corresponding phrases. This reveals concepts that are numerologically "close" to the main result. | |
5. **Scoring:** Results are sorted by a relevance score calculated as `Frequency / Word_Count` to prioritize short, common phrases. | |
### Parameters | |
- **Verses to Process:** Limits how many verses the script analyzes. Higher numbers take longer. | |
- **Results per Resonance:** Limits how many phrases are shown for the main resonance and each variation. | |
- **Bitplane Variation Depth:** Controls how many "bit-flips" are tested. A depth of 5 will test flipping Bit 1, Bit 2, Bit 3, Bit 4, and Bit 5. | |
""" | |
) | |
run_button.click( | |
fn=run_analysis, | |
inputs=[query, translate, process_verses, results_per_verse, xor_depth], | |
outputs=[output_markdown] | |
) | |
if __name__ == "__main__": | |
if phrase_dictionary is None: | |
print("CRITICAL: Phrase dictionary could not be loaded. The application cannot start.") | |
print("Please ensure 'tanakh_phrasedict.cache' exists and is valid. Run 'build_indices.py' if necessary.") | |
else: | |
# The share=True argument creates a public link for easy sharing. Remove it if you only want local access. | |
demo.launch() | |