Spaces:
Running
Running
File size: 5,346 Bytes
a210442 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import os
import json
import random
import threading
import logging
import sqlite3
from datetime import datetime
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from sentence_transformers import SentenceTransformer, util
# Logging setup
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load Oracle model (FP32, CPU-only)
logger.info("Loading Oracle model...")
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.1-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.1-8B-Instruct",
torch_dtype=torch.float32,
device_map="cpu"
)
model.eval()
# Load SentenceTransformer for semantic similarity
logger.info("Loading SentenceTransformer model...")
st_model = SentenceTransformer('all-MiniLM-L6-v2')
# Database setup (SQLite)
DB_PATH = "game_data.db"
conn = sqlite3.connect(DB_PATH, check_same_thread=False)
c = conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS rounds (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp TEXT,
prompt TEXT,
full_guess TEXT,
idea_guess TEXT,
completion TEXT,
score_full INTEGER,
score_idea INTEGER,
round_points INTEGER
)
""")
conn.commit()
# Load prompts from JSON
PROMPTS_PATH = "oracle_prompts.json"
with open(PROMPTS_PATH, 'r') as f:
PROMPTS = json.load(f)
# Helper functions
def get_next_prompt(state):
if not state["prompts"]:
prompts = PROMPTS.copy()
random.shuffle(prompts)
state["prompts"] = prompts
state["used"] = []
prompt = state["prompts"].pop(0)
state["used"].append(prompt)
state["round"] += 1
return prompt
def compute_score(guess, completion):
if not guess.strip():
return 0
emb_guess = st_model.encode(guess, convert_to_tensor=True)
emb_comp = st_model.encode(completion, convert_to_tensor=True)
cos_sim = util.pytorch_cos_sim(emb_guess, emb_comp).item()
if cos_sim > 0.9:
return 5
elif cos_sim > 0.7:
return 3
elif cos_sim > 0.5:
return 1
else:
return 0
def log_round(prompt, full_guess, idea_guess, completion, score_full, score_idea, round_points):
ts = datetime.utcnow().isoformat()
c.execute(
"INSERT INTO rounds (timestamp, prompt, full_guess, idea_guess, completion, score_full, score_idea, round_points) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
(ts, prompt, full_guess, idea_guess, completion, score_full, score_idea, round_points)
)
conn.commit()
logger.info(f"Round logged at {ts}")
def play_round(full_guess, idea_guess, state):
prompt = state.get("current_prompt", "")
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
def generate():
model.generate(
input_ids=input_ids,
max_new_tokens=200,
do_sample=True,
temperature=0.8,
streamer=streamer
)
thread = threading.Thread(target=generate)
thread.start()
completion = ""
for token in streamer:
completion += token
yield completion, "", ""
score_full = compute_score(full_guess, completion)
score_idea = compute_score(idea_guess, completion)
round_points = score_full + score_idea
state["score"] += round_points
log_round(prompt, full_guess, idea_guess, completion, score_full, score_idea, round_points)
score_text = f"Full Guess: {score_full} pts | Idea Guess: {score_idea} pts | Round Total: {round_points} pts"
reflection = "🔮 The Oracle ponders your insights..."
if state["round"] >= 5 and state["score"] >= 15:
secret = random.choice([p for p in PROMPTS if p not in state["used"]])
reflection += f"\n\n✨ **Secret Oracle Prompt:** {secret}"
yield completion, score_text, reflection, state["score"]
def next_round_fn(state):
prompt = get_next_prompt(state)
state["current_prompt"] = prompt
return prompt, "", "", "", "", "", state["score"]
# Gradio UI
demo = gr.Blocks()
with demo:
state = gr.State({"prompts": [], "used": [], "round": 0, "score": 0, "current_prompt": ""})
gr.Markdown("⚠️ **Your input and the Oracle’s response will be stored for AI training and research. By playing, you consent to this.**")
prompt_display = gr.Markdown("", elem_id="prompt_display")
with gr.Row():
full_guess = gr.Textbox(label="🧠 Exact Full Completion Guess")
idea_guess = gr.Textbox(label="💡 General Idea Guess")
submit = gr.Button("Submit Guess")
completion_box = gr.Textbox(label="Oracle's Completion", interactive=False)
score_box = gr.Textbox(label="Score", interactive=False)
reflection_box = gr.Textbox(label="Mystical Reflection", interactive=False)
next_btn = gr.Button("Next Round")
total_score_display = gr.Textbox(label="Total Score", interactive=False)
next_btn.click(next_round_fn, inputs=state, outputs=[prompt_display, full_guess, idea_guess, completion_box, score_box, reflection_box, total_score_display])
submit.click(play_round, inputs=[full_guess, idea_guess, state], outputs=[completion_box, score_box, reflection_box, total_score_display])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|