File size: 2,002 Bytes
d7f0a49
cac4f75
 
5972db7
cac4f75
 
 
 
 
 
36df19a
 
 
2111976
36df19a
cac4f75
9dd2063
 
cac4f75
 
9dd2063
2111976
cac4f75
 
 
9dd2063
04f8f11
 
 
cac4f75
9dd2063
 
 
36df19a
5972db7
 
 
9dd2063
36df19a
 
 
 
 
cac4f75
 
 
 
 
 
 
36df19a
8c486b2
 
 
 
cac4f75
2111976
 
5972db7
 
c125255
36df19a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import re

# Load model and tokenizer from Hugging Face Hub
model_name = "mjpsm/Positive-Affirmations-Model"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Generation function
def generate_affirmation(description):
    # Structured prompt to guide model output
    prompt = f"[SUBJECT] learning [/SUBJECT] [STREAK] current performance context [/STREAK] [CONTEXT] {description} [/CONTEXT] [AFFIRMATION]"

    inputs = tokenizer(prompt, return_tensors="pt")
    input_ids = inputs["input_ids"]
    
    with torch.no_grad():
        outputs = model.generate(
            input_ids,
            max_new_tokens=60,
            temperature=0.7,
            top_k=50,
            top_p=0.95,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id,
            repetition_penalty=1.2,
            no_repeat_ngram_size=3
        )

    full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Try to extract text between [AFFIRMATION] and [/AFFIRMATION]
    match = re.search(r"\[AFFIRMATION\](.*?)\[/AFFIRMATION\]", full_output, re.DOTALL)
    if match:
        return match.group(1).strip()
    else:
        # Fallback: try to extract everything after [AFFIRMATION]
        fallback_match = re.search(r"\[AFFIRMATION\](.*)", full_output, re.DOTALL)
        if fallback_match:
            return fallback_match.group(1).strip()
        return "⚠️ No affirmation found in the response."

# Gradio interface
demo = gr.Interface(
    fn=generate_affirmation,
    inputs=gr.Textbox(label="Describe the player situation (e.g., 'struggled with algebra')"),
    outputs=gr.Textbox(label="AI Affirmation"),
    title="Positive Affirmation Generator",
    description="Describe a learning moment, and receive an uplifting affirmation generated by AI."
)

if __name__ == "__main__":
    demo.launch()