import sqlite3 import os from vocab import get_word_info from ai_sentence import generate_sentence from tqdm import tqdm DATA_DIR = "./data" DB_PATH = os.path.join(DATA_DIR, "sentences.db") def init_db(): conn = sqlite3.connect(DB_PATH) c = conn.cursor() c.execute(''' CREATE TABLE IF NOT EXISTS sentences ( word TEXT, phonetic TEXT, sentence TEXT, source TEXT, model TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (word, source, model) ) ''') conn.commit() conn.close() def get_sentences_by_word(word): conn = sqlite3.connect(DB_PATH) c = conn.cursor() c.execute('SELECT word, phonetic, sentence, source, model FROM sentences WHERE word=?', (word,)) results = c.fetchall() conn.close() return results def save_sentence(word, phonetic, sentence, source, model): conn = sqlite3.connect(DB_PATH) c = conn.cursor() c.execute(''' INSERT INTO sentences (word, phonetic, sentence, source, model) VALUES (?, ?, ?, ?, ?) ON CONFLICT(word, source, model) DO UPDATE SET sentence=excluded.sentence, phonetic=excluded.phonetic ''', (word, phonetic, sentence, source, model)) conn.commit() conn.close() def generate_sentences(words, source, use_ai, model_name): result_display = "" status_log = [] for word in tqdm(words, desc="處理單字"): # 1. 查單字音標 word_info = get_word_info(source, word) phonetic = word_info['phonetic'] if word_info else "無" # 2. 查句庫 sentence_records = get_sentences_by_word(word) # 3. 判斷是否用AI if use_ai or not sentence_records: try: sentence = generate_sentence(word, model_name) save_sentence(word, phonetic, sentence, 'ai', model_name) source_used = 'ai' model_used = model_name except Exception as e: sentence = f"[AI生成失敗:{e}]" source_used = "error" model_used = None else: # 取第一筆句庫資料 sentence = sentence_records[0][2] source_used = sentence_records[0][3] model_used = sentence_records[0][4] # 4. 組裝顯示內容 result_display += f"""