File size: 7,378 Bytes
f054e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ed8a97
a49d431
64e199b
fbdb959
5ed8a97
f054e62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c52408e
f054e62
 
 
 
 
 
 
a8e06c6
f054e62
 
 
 
 
 
a8e06c6
f054e62
 
 
 
 
 
 
 
a8e06c6
c52408e
 
 
f054e62
c52408e
 
 
 
 
 
 
 
 
a8e06c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c52408e
 
 
a8e06c6
c52408e
 
 
a49d431
c52408e
a49d431
c52408e
 
a49d431
c52408e
a49d431
c52408e
 
 
a8e06c6
f054e62
 
 
 
 
c52408e
f054e62
 
5ed8a97
c52408e
 
f054e62
5ed8a97
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import json
import logging
import argparse
import sys
import os
import math
import pickle
from deep_translator import GoogleTranslator
from gematria import calculate_gematria
from collections import defaultdict

# --- Konfiguration ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
BOOK_RANGE = range(1, 40)
CACHE_FILE = "tanakh_phrasedict.cache"

# --- Kernfunktionen ---
def get_power_result(total_sum, query_value):
    """Berechnet das Potenz-Ergebnis basierend auf dem höchsten möglichen Exponenten."""
    if total_sum <= 0 or query_value <= 1:
        return 1
    elif query_value > total_sum:
        return math.ceil(math.sqrt(query_value))

    try:
        exponent = int(math.floor(math.log(total_sum, query_value)))
        return query_value ** exponent
    except (ValueError, OverflowError):
        return 1

def load_phrase_dictionary():
    if not os.path.exists(CACHE_FILE):
        sys.exit(f"FEHLER: Cache-Datei '{CACHE_FILE}' nicht gefunden. Bitte 'build_indices.py' ausführen.")
    logging.info(f"Lade Phrasen-Wörterbuch aus Cache: {CACHE_FILE}")
    try:
        with open(CACHE_FILE, 'rb') as f:
            return pickle.load(f)
    except Exception as e:
        sys.exit(f"FEHLER: Cache-Datei ist korrupt. Bitte löschen und 'build_indices.py' erneut ausführen. Fehler: {e}")

def find_all_matching_phrases(target_sum, phrase_dictionary):
    return phrase_dictionary.get(target_sum, [])

# --- Hauptprogramm ---
def main(args):
    phrase_dictionary = load_phrase_dictionary()
    query_value = calculate_gematria(args.query)
    if query_value <= 1:
        sys.exit(f"Anfrage '{args.query}' hat einen ungültigen Gematria-Wert ({query_value}).")

    translator = None
    if args.translate:
        try:
            translator = GoogleTranslator(source='iw', target='en')
        except Exception as e:
            logging.error(f"Konnte Übersetzer nicht initialisieren: {e}")

    logging.info(f"Starte Orakel-Analyse für '{args.query}' (G:{query_value}) mit Bitplane-Variationstiefe {args.xor_depth}...")
    print("\n" + "="*20 + f" ORAKEL-ANTWORTEN FÜR '{args.query}' " + "="*20)

    verses_processed = 0
    resonance_count = 0

    for book_num in BOOK_RANGE:
        if args.process_verses and verses_processed >= args.process_verses: break
        
        filepath = f"texts/torah/{book_num:02}.json"
        try:
            with open(filepath, 'r', encoding='utf-8') as file:
                data = json.load(file)
                for chap_idx, chapter in enumerate(data.get("text", []), start=1):
                    if args.process_verses and verses_processed >= args.process_verses: break
                    
                    for verse_idx, verse_text in enumerate(chapter, start=1):
                        if args.process_verses and verses_processed >= args.process_verses: break
                        verses_processed += 1

                        verse_sum = calculate_gematria(verse_text)
                        if verse_sum <= 1: continue

                        power_result = get_power_result(verse_sum, query_value)
                        
                        # Zuerst das Haupt-Ergebnis berechnen
                        main_target_sum = verse_sum ^ power_result
                        main_matches = find_all_matching_phrases(main_target_sum, phrase_dictionary)

                        # Nur fortfahren, wenn die Haupt-Resonanz existiert
                        if not main_matches:
                            continue

                        resonance_count += 1
                        verse_ref = f"B{book_num:02d}, K{chap_idx}, V{verse_idx}"
                        print(f"\n--- Resonanz #{resonance_count} in [{verse_ref}] (G_sum:{verse_sum}) ---")
                        print(f"Originalvers: {verse_text.strip()}")
                        
                        def print_matches(matches, title, calculation_str):
                            if not matches: return
                            
                            matches.sort(key=lambda p: (p.get('freq', 0) / p.get('words', 99)), reverse=True)
                            matches_to_show = matches[:args.results_per_verse]

                            print(f"  ↳ {title}: {calculation_str}")
                            
                            for match in matches_to_show:
                                translation_str = ""
                                if translator:
                                    try: translation_str = translator.translate(match['text'])
                                    except Exception: translation_str = "[Übersetzung fehlgeschlagen]"

                                score = (match.get('freq', 0) / match.get('words', 99))
                                info = f"(Wörter: {match.get('words', 'N/A')}, Freq: {match.get('freq', 'N/A')}, Score: {score:.2f})"
                                print(f"     - {match['text']} {info}")
                                if translation_str:
                                    print(f"       ↳ Interpretation: \"{translation_str}\"")

                        # 1. Die Haupt-Resonanz anzeigen
                        calc_str = f"[{verse_sum}] ^ [{power_result}] → [G_ziel:{main_target_sum}]"
                        print_matches(main_matches, "Haupt-Resonanz", calc_str)
                        
                        # 2. Die Bitplane-Variationen des ERGEBNISSES anzeigen
                        if args.xor_depth > 0:
                            print(f"   [INFO] Bitplane-Variationen des Ergebnisses ({main_target_sum}):")
                            for depth in range(args.xor_depth):
                                bit_flip = 1 << depth
                                
                                # Flippe das Bit 'd' im Hauptergebnis
                                target_sum = main_target_sum ^ bit_flip
                                
                                bitplane_matches = find_all_matching_phrases(target_sum, phrase_dictionary)
                                
                                if bitplane_matches:
                                    bitplane_calc_str = f"[{main_target_sum}] ^ [Bit {depth}] → [G_ziel:{target_sum}]"
                                    print_matches(bitplane_matches, f"Variation (Tiefe {depth})", bitplane_calc_str)
                            
        except FileNotFoundError: continue

    logging.info(f"Analyse abgeschlossen. {resonance_count} Resonanz-Verse in {verses_processed} analysierten Versen gefunden.")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Tanakh Universal Resonance Analyzer mit Bitplane-Variationen.")
    parser.add_argument("query", type=str, help="Die Abfragephrase (z.B. 'יהוה').")
    parser.add_argument("--translate", action="store_true", help="Aktiviert die automatische Übersetzung.")
    parser.add_argument("--process-verses", type=int, default=10, help="Maximale Anzahl der zu analysierenden Start-Verse.")
    parser.add_argument("--results-per-verse", type=int, default=3, help="Maximale Orakel-Antworten pro Resonanz-Typ (Standard: 3).")
    parser.add_argument("--xor-depth", type=int, default=16, help="Maximale Tiefe für Bitplane-Variationen des Ergebnisses (0-15) (Standard: 16).")
    args = parser.parse_args()
    main(args)