File size: 7,603 Bytes
f054e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a49d431
f054e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a49d431
f054e62
 
 
 
 
 
 
a8e06c6
f054e62
 
 
 
 
 
a8e06c6
f054e62
 
 
 
 
 
 
 
a8e06c6
f054e62
 
a8e06c6
 
 
 
 
 
 
 
 
a49d431
a8e06c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a49d431
 
 
 
 
a8e06c6
a49d431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8e06c6
f054e62
 
 
 
 
a49d431
f054e62
 
 
 
a49d431
f054e62
a8e06c6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import json
import logging
import argparse
import sys
import os
import math
import pickle
from deep_translator import GoogleTranslator
from gematria import calculate_gematria
from collections import defaultdict

# --- Konfiguration ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
BOOK_RANGE = range(1, 40)
CACHE_FILE = "tanakh_phrasedict.cache"

# --- Kernfunktionen ---
def get_power_result(total_sum, query_value):
    """Berechnet das Potenz-Ergebnis basierend auf dem höchsten möglichen Exponenten."""
    if total_sum <= 0 or query_value <= 1 or query_value > total_sum:
        return 1
    try:
        exponent = int(math.floor(math.log(total_sum, query_value)))
        return query_value ** exponent
    except (ValueError, OverflowError):
        return 1

def load_phrase_dictionary():
    if not os.path.exists(CACHE_FILE):
        sys.exit(f"FEHLER: Cache-Datei '{CACHE_FILE}' nicht gefunden. Bitte 'build_indices.py' ausführen.")
    logging.info(f"Lade Phrasen-Wörterbuch aus Cache: {CACHE_FILE}")
    try:
        with open(CACHE_FILE, 'rb') as f:
            return pickle.load(f)
    except Exception as e:
        sys.exit(f"FEHLER: Cache-Datei ist korrupt. Bitte löschen und 'build_indices.py' erneut ausführen. Fehler: {e}")

def find_all_matching_phrases(target_sum, phrase_dictionary):
    return phrase_dictionary.get(target_sum, [])

# --- Hauptprogramm ---
def main(args):
    phrase_dictionary = load_phrase_dictionary()
    query_value = calculate_gematria(args.query)
    if query_value <= 1:
        sys.exit(f"Anfrage '{args.query}' hat einen ungültigen Gematria-Wert ({query_value}).")

    translator = None
    if args.translate:
        try:
            translator = GoogleTranslator(source='iw', target='en')
        except Exception as e:
            logging.error(f"Konnte Übersetzer nicht initialisieren: {e}")

    logging.info(f"Starte Orakel-Analyse für '{args.query}' (G:{query_value}) mit isolierter Bitplane-Tiefe {args.xor_depth}...")
    print("\n" + "="*20 + f" ORAKEL-ANTWORTEN FÜR '{args.query}' " + "="*20)

    verses_processed = 0
    resonance_count = 0

    for book_num in BOOK_RANGE:
        if args.process_verses and verses_processed >= args.process_verses: break
        
        filepath = f"texts/torah/{book_num:02}.json"
        try:
            with open(filepath, 'r', encoding='utf-8') as file:
                data = json.load(file)
                for chap_idx, chapter in enumerate(data.get("text", []), start=1):
                    if args.process_verses and verses_processed >= args.process_verses: break
                    
                    for verse_idx, verse_text in enumerate(chapter, start=1):
                        if args.process_verses and verses_processed >= args.process_verses: break
                        verses_processed += 1

                        verse_sum = calculate_gematria(verse_text)
                        if verse_sum <= 1: continue

                        power_result = get_power_result(verse_sum, query_value)
                        
                        header_printed = False

                        def print_matches(matches, title, calculation_str):
                            nonlocal header_printed, resonance_count
                            if not matches: return
                            
                            if not header_printed:
                                resonance_count += 1
                                verse_ref = f"B{book_num:02d}, K{chap_idx}, V{verse_idx}"
                                print(f"\n--- Resonanz #{resonance_count} in [{verse_ref}] (G_sum:{verse_sum}) ---")
                                print(f"Originalvers: {verse_text.strip()}")
                                print(f"   [INFO] X={verse_sum}, Y={power_result}")
                                header_printed = True
                            
                            matches.sort(key=lambda p: (p.get('freq', 0) / p.get('words', 99)), reverse=True)
                            matches_to_show = matches[:args.results_per_verse]

                            print(f"  ↳ {title}: {calculation_str}")
                            
                            for match in matches_to_show:
                                translation_str = ""
                                if translator:
                                    try: translation_str = translator.translate(match['text'])
                                    except Exception: translation_str = "[Übersetzung fehlgeschlagen]"

                                score = (match.get('freq', 0) / match.get('words', 99))
                                info = f"(Wörter: {match.get('words', 'N/A')}, Freq: {match.get('freq', 'N/A')}, Score: {score:.2f})"
                                print(f"     - {match['text']} {info}")
                                if translation_str:
                                    print(f"       ↳ Interpretation: \"{translation_str}\"")

                        # 1. Die normale, vollständige XOR-Operation als Referenz
                        main_target_sum = verse_sum ^ power_result
                        main_matches = find_all_matching_phrases(main_target_sum, phrase_dictionary)
                        calc_str = f"[X:{verse_sum}] ^ [Y:{power_result}] → [G_ziel:{main_target_sum}]"
                        print_matches(main_matches, "Gesamt-Resonanz", calc_str)
                        
                        # 2. Die isolierte Bitplane-Analyse
                        if args.xor_depth > 0 and header_printed:
                            for depth in range(args.xor_depth):
                                bit_mask = 1 << depth
                                
                                bitplane_x = verse_sum & bit_mask
                                bitplane_y = power_result & bit_mask
                                
                                target_sum = bitplane_x ^ bitplane_y
                                
                                # Wir suchen nur nach Phrasen, wenn das Ergebnis > 0 ist,
                                # da Gematria 0 nicht aussagekräftig ist.
                                if target_sum > 0:
                                    bitplane_matches = find_all_matching_phrases(target_sum, phrase_dictionary)
                                    bitplane_calc_str = f"Bitplane[{depth}](X:{bitplane_x}) ^ Bitplane[{depth}](Y:{bitplane_y}) → [G_ziel:{target_sum}]"
                                    print_matches(bitplane_matches, f"Bitplane-Tiefe {depth}", bitplane_calc_str)
                            
        except FileNotFoundError: continue

    logging.info(f"Analyse abgeschlossen. {resonance_count} Resonanz-Verse in {verses_processed} analysierten Versen gefunden.")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Tanakh Universal Resonance Analyzer mit isolierter Bitplane-Analyse.")
    parser.add_argument("query", type=str, help="Die Abfragephrase (z.B. 'יהוה').")
    parser.add_argument("--translate", action="store_true", help="Aktiviert die automatische Übersetzung.")
    parser.add_argument("--process-verses", type=int, help="Maximale Anzahl der zu analysierenden Start-Verse.")
    parser.add_argument("--results-per-verse", type=int, default=3, help="Maximale Orakel-Antworten pro gefundener Resonanz (Standard: 3).")
    parser.add_argument("--xor-depth", type=int, default=16, help="Maximale zu prüfende Bit-Ebene (0-15) (Standard: 16).")
    args = parser.parse_args()
    main(args)