File size: 6,980 Bytes
f054e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import json
import logging
import argparse
import sys
import os
import re
import math
import pickle
from deep_translator import GoogleTranslator
from gematria import calculate_gematria

# --- Konfiguration ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
BOOK_RANGE = range(1, 40)
INDICES_DIR = "indices_by_book"
CACHE_FILE = "tanakh_oracledata.cache" # Eigene Cache-Datei für dieses Skript

# --- Kernfunktionen ---

def xor_with_highest_power(total_sum, query_value):
    """Ihre XOR-Logik."""
    if total_sum <= 0 or query_value <= 1: return None
    if query_value > total_sum: power = 1
    else:
        try:
            exponent = int(math.floor(math.log(total_sum, query_value)))
            power = query_value ** exponent
        except ValueError: return None
    return total_sum ^ power

def load_or_create_phrase_dictionary(use_cache=True):
    """
    Lädt oder erstellt das universelle Phrasen-Wörterbuch (Gematria-Tabelle).
    Struktur: { gematria_value: [phrase_obj_1, ...], ... }
    """
    if use_cache and os.path.exists(CACHE_FILE):
        logging.info(f"Lade Phrasen-Wörterbuch aus Cache: {CACHE_FILE}")
        with open(CACHE_FILE, 'rb') as f:
            return pickle.load(f)

    logging.info("Erstelle universelles Phrasen-Wörterbuch aus allen Indizes (dies dauert einen Moment)...")
    phrase_dict = {}

    all_indices = {}
    for i in BOOK_RANGE:
        index_path = os.path.join(INDICES_DIR, f"book_{i:02}_index.json")
        if os.path.exists(index_path):
            with open(index_path, 'r', encoding='utf-8') as f:
                all_indices[i] = json.load(f)
    if not all_indices:
        sys.exit("Keine Index-Dateien gefunden. Bitte 'build_indices.py' ausführen.")

    for book_num, index in all_indices.items():
        for gematria_val_str, data in index.items():
            gematria_val = int(gematria_val_str)
            if gematria_val not in phrase_dict:
                phrase_dict[gematria_val] = []

            pagerank = data.get('pagerank', 0)
            for phrase_data in data.get('phrases', []):
                count = phrase_data.get('count', 1)
                score = pagerank / count if count > 0 else 0
                # Speichere nur Phrasen mit einem minimalen Score, um Rauschen zu reduzieren
                if score > 0:
                    phrase_dict[gematria_val].append({
                        "text": phrase_data['text'],
                        "score": score,
                        "source": f"B{book_num:02d}"
                    })

    # Sortiere die Phrasenlisten innerhalb jedes Eintrags nach Score
    for gematria_val in phrase_dict:
        phrase_dict[gematria_val].sort(key=lambda x: x['score'], reverse=True)

    logging.info(f"{len(phrase_dict)} einzigartige Gematria-Werte im Wörterbuch.")

    if use_cache:
        logging.info(f"Speichere Phrasen-Wörterbuch in Cache: {CACHE_FILE}")
        with open(CACHE_FILE, 'wb') as f:
            pickle.dump(phrase_dict, f)

    return phrase_dict

def find_most_meaningful_phrase(target_sum, phrase_dictionary):
    """Findet die eine, bedeutungsvollste Phrase für eine gegebene Summe."""
    if target_sum in phrase_dictionary and phrase_dictionary[target_sum]:
        # Gibt die Phrase mit dem höchsten Score zurück (da die Liste vorsortiert ist)
        return phrase_dictionary[target_sum][0]
    return None

# --- Hauptprogramm ---

def main(args):
    # 1. Lade das universelle Phrasen-Wörterbuch
    phrase_dictionary = load_or_create_phrase_dictionary(use_cache=not args.no_cache)

    # 2. Berechne Gematria-Wert der Anfrage
    query_value = calculate_gematria(args.query)
    if query_value <= 1:
        sys.exit(f"Anfrage '{args.query}' hat einen ungültigen Gematria-Wert ({query_value}).")

    # Initialisiere den Übersetzer
    try:
        translator = GoogleTranslator(source='iw', target='en')
    except Exception as e:
        logging.error(f"Konnte Übersetzer nicht initialisieren: {e}")
        translator = None

    # 3. Iteriere durch jeden Vers des Tanach
    logging.info(f"Starte Orakel-Analyse für '{args.query}' (Gematria: {query_value})...")
    print("\n" + "="*20 + f" ORAKEL-ANTWORTEN FÜR '{args.query}' " + "="*20)

    resonance_count = 0

    for book_num in BOOK_RANGE:
        filepath = f"texts/torah/{book_num:02}.json"
        try:
            with open(filepath, 'r', encoding='utf-8') as file:
                data = json.load(file)
                for chap_idx, chapter in enumerate(data.get("text", []), start=1):
                    for verse_idx, verse_text in enumerate(chapter, start=1):
                        verse_sum = calculate_gematria(verse_text)
                        if verse_sum <= 1: continue

                        # Führe die XOR-Operation durch
                        target_sum = xor_with_highest_power(verse_sum, query_value)
                        if target_sum is None: continue

                        # Finde die beste Resonanz-Phrase
                        best_match = find_most_meaningful_phrase(target_sum, phrase_dictionary)

                        if best_match:
                            resonance_count += 1
                            verse_ref = f"B{book_num:02d}, K{chap_idx}, V{verse_idx}"

                            # Übersetze die gefundene Phrase
                            translation = ""
                            if translator:
                                try:
                                    translation = translator.translate(best_match['text'])
                                except Exception:
                                    translation = "[Übersetzung fehlgeschlagen]"

                            print(f"\n--- Resonanz in [{verse_ref}] (G_sum:{verse_sum}) ---")
                            print(f"Originalvers: {verse_text.strip()}")
                            print(f"   ↳ Orakel-Antwort (G_ziel:{target_sum}): {best_match['text']} (aus {best_match['source']})")
                            if translation:
                                print(f"   ↳ Englische Interpretation: \"{translation}\"")

                            if resonance_count >= args.limit:
                                logging.info(f"Ausgabelimit von {args.limit} Resonanzen erreicht.")
                                return
        except FileNotFoundError:
            continue

    logging.info(f"Analyse abgeschlossen. {resonance_count} Resonanzen gefunden.")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Tanakh Numerological Oracle Engine.")
    parser.add_argument("query", type=str, help="Die Abfragephrase (z.B. 'יהוה').")
    parser.add_argument("--limit", type=int, default=10, help="Maximale Anzahl der auszugebenden Orakel-Antworten.")
    parser.add_argument("--no-cache", action="store_true", help="Erzwingt das Neuerstellen des Phrasen-Wörterbuchs.")
    args = parser.parse_args()
    main(args)