Spaces:
No application file
No application file
File size: 5,574 Bytes
5c64065 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import gradio as gr
import random
import os
# If you want to run Stable Diffusion XL locally with diffusers:
# from diffusers import StableDiffusionXLPipeline
# import torch
# -----------------------------
# 1) LOAD QUESTION BANK
# -----------------------------
def load_question_bank(filepath="question_bank.txt"):
"""
Reads the question bank file.
Each line should be in the format: question|answer
Returns a list of (question, answer) tuples.
"""
questions = []
if os.path.exists(filepath):
with open(filepath, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
for line in lines:
if "|" in line:
q, a = line.split("|", 1)
questions.append((q.strip(), a.strip()))
return questions
QUESTION_BANK = load_question_bank("question_bank.txt")
# -----------------------------
# 2) GLOBAL OR SESSION STATE
# -----------------------------
# Gradio does not allow normal Python global modifications in a multi-user setting,
# but we can store user-specific data in a dictionary or use `gr.State`.
# We'll keep track of points, current question index, etc. using gr.State.
# For local stable diffusion usage, you could instantiate a pipeline:
# pipe = StableDiffusionXLPipeline.from_pretrained(
# "stabilityai/stable-diffusion-xl-base-1.0",
# torch_dtype=torch.float16
# ).to("cuda")
# For demonstration, we'll just simulate the image generation.
def generate_image(prompt):
"""
Example function that, in a real environment, would run a Stable Diffusion XL pipeline.
For demonstration, let's just return a placeholder or a mock image URL.
"""
# Uncomment if using a local pipeline
# image = pipe(prompt).images[0]
# return image
# For now, we return a placeholder (a black image or mock).
# You can use an online image or a local placeholder.
# If you have an actual pipeline, return image instead.
placeholder_url = "https://via.placeholder.com/512x512.png?text=Stable+Diffusion+XL+Result"
return placeholder_url
# -----------------------------
# 3) CORE LOGIC
# -----------------------------
def get_new_question(state):
"""
Updates the state with a new random question from the question bank
and resets the user answer display.
"""
if not QUESTION_BANK:
state["current_question"] = "No questions available!"
state["correct_answer"] = ""
return "No questions available!", ""
# Randomly pick a question from the bank
question, answer = random.choice(QUESTION_BANK)
state["current_question"] = question
state["correct_answer"] = answer
return question, ""
def check_answer(user_answer, state):
"""
Checks the user's answer, updates points, and returns feedback.
"""
correct_answer = state["correct_answer"]
if user_answer.strip().lower() == correct_answer.lower():
# Increase user points by 1000
state["points"] += 1000
feedback = f"Correct! You have earned 1000 points. Total points: {state['points']}"
else:
feedback = f"Wrong! The correct answer was '{correct_answer}'. Total points: {state['points']}"
# Provide next question automatically or the user can press a button
question, _ = get_new_question(state)
return feedback, question, ""
def on_generate_image(prompt, state):
"""
Generates an image if the user has at least 2000 points.
Otherwise returns an error message.
"""
if state["points"] >= 2000:
image_url = generate_image(prompt)
return image_url
else:
return "You need at least 2000 points to generate an image!"
# -----------------------------
# 4) BUILD THE GRADIO INTERFACE
# -----------------------------
def quiz_app():
# We'll use gr.State to keep track of user state across function calls in one session.
state = gr.State({
"points": 0,
"current_question": "",
"correct_answer": ""
})
with gr.Blocks(theme='NoCrypt/miku') as demo:
gr.Markdown("# Quiz Game with Image Generation (Stable Diffusion XL)")
# Display current question
question_display = gr.Markdown(value="Click 'Load Question' to start!", label="Question")
# Button to load a new question
load_button = gr.Button("Load Question")
# Textbox for user to input answer
answer_box = gr.Textbox(lines=1, label="Your Answer")
# Button to submit answer
submit_button = gr.Button("Submit Answer")
# Feedback box
feedback_display = gr.Markdown()
# A text prompt to generate image
image_prompt_box = gr.Textbox(lines=1, label="Image Prompt")
# Button to generate image
generate_button = gr.Button("Generate Image with SDXL")
# Image output
image_output = gr.Image(label="Generated Image", height=300, width=170)
# Function bindings
load_button.click(
fn=get_new_question,
inputs=state,
outputs=[question_display, answer_box]
)
submit_button.click(
fn=check_answer,
inputs=[answer_box, state],
outputs=[feedback_display, question_display, answer_box]
)
generate_button.click(
fn=on_generate_image,
inputs=[image_prompt_box, state],
outputs=image_output
)
return demo
if __name__ == "__main__":
demo_app = quiz_app()
demo_app.launch(debug=True)
|