Spaces:
Sleeping
Sleeping
File size: 5,496 Bytes
51a7d9e 13880c3 51a7d9e edb9e8a 13880c3 c8e2710 13880c3 51a7d9e 02ffc17 1854cbf 51a7d9e c701791 51a7d9e 1e18916 c8e2710 c701791 c8e2710 13880c3 e339ee0 44c2b23 e339ee0 c8e2710 d8a8bf1 e339ee0 13880c3 e4c72cc 02ffc17 c8e2710 44c2b23 3738ef6 13880c3 659ca36 c8e2710 c701791 44c2b23 7d8fde2 66578c0 171c713 c701791 77f6cf9 c701791 02ffc17 15990fb c701791 1854cbf c701791 3738ef6 51a7d9e c701791 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import torch
import spaces
import gradio as gr
from threading import Thread
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
TextIteratorStreamer,
StoppingCriteria,
StoppingCriteriaList
)
MODEL_ID ="NousResearch/DeepHermes-3-Llama-3-8B-Preview"
#
#
DEFAULT_SYSTEM_PROMPT ="""
You are Deepseek R1, an AI engineered for rigorous, transparent reasoning. Your responses must **strictly follow this protocol:**
1. **THINK FIRST:**
- Begin every interaction by generating a raw, unfiltered internal monologue.
- Enclose this step-by-step reasoning process—including doubts, methodical evaluations, and logical pivots—between `<think>` and `</think>` tags.
- Example: `<think>Analyzing query... Is the user asking for X or Y? Cross-checking definitions... Prioritizing accuracy...</think>`
2. **ANSWER AFTER:**
- Only after completing the `<think>` block, deliver a concise, precise answer enclosed between `<answer>` and `</answer>` tags.
- This answer must directly reflect conclusions from your reasoning phase.
**RULES:**
- **Tag Compliance:** Omitting or altering `<think>`, `</think>`, `<answer>`, or `</answer>` tags is **prohibited.**
- **No Shortcuts:** The `<think>` block must detail **every critical step**, even uncertain or exploratory thoughts.
- **Order Enforcement:** Never output an answer without a preceding `<think>` analysis.
Failure to adhere to this structure will result in termination."
"""
CSS = """
.gr-chatbot { min-height: 500px; border-radius: 15px; }
.special-tag { color: #2ecc71; font-weight: 600; }
footer { display: none !important; }
"""
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids[0][-1] == tokenizer.eos_token_id
def initialize_model():
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
bnb_8bit_compute_dtype=torch.bfloat16,
bnb_8bit_quant_type="nf4",
bnb_8bit_use_double_quant=True,
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
device_map="cuda",
#quantization_config=quantization_config,
torch_dtype=torch.bfloat16,
trust_remote_code=True
)
return model, tokenizer
def format_response(text):
return text.replace("[Understand]", '\n<strong class="special-tag">[Understand]</strong>\n') \
.replace("[/Reason]", '\n<strong class="special-tag">[/Reason]</strong>\n') \
.replace("[/Answer]", '\n<strong class="special-tag">[/Answer]</strong>\n') \
.replace("[Reason]", '\n<strong class="special-tag">[Reason]</strong>\n') \
.replace("[Answer]", '\n<strong class="special-tag">[Answer]</strong>\n')
@spaces.GPU(duration=360)
def generate_response(message, chat_history, system_prompt, temperature, max_tokens):
# Create conversation history for model
conversation = [{"role": "system", "content": system_prompt}]
for user_msg, bot_msg in chat_history:
conversation.extend([
{"role": "user", "content": user_msg},
{"role": "assistant", "content": bot_msg}
])
conversation.append({"role": "user", "content": message})
# Tokenize input
input_ids = tokenizer.apply_chat_template(
conversation,
add_generation_prompt=True,
return_tensors="pt"
).to(model.device)
# Setup streaming
streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_tokens,
temperature=temperature,
stopping_criteria=StoppingCriteriaList([StopOnTokens()])
)
# Start generation thread
Thread(target=model.generate, kwargs=generate_kwargs).start()
# Initialize response buffer
partial_message = ""
new_history = chat_history + [(message, "")]
# Stream response
for new_token in streamer:
partial_message += new_token
formatted = format_response(partial_message)
new_history[-1] = (message, formatted + "▌")
yield new_history
# Final update without cursor
new_history[-1] = (message, format_response(partial_message))
yield new_history
model, tokenizer = initialize_model()
with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
gr.Markdown("""
<h1 align="center">🧠 AI Reasoning Assistant</h1>
<p align="center">Ask me Hard questions</p>
""")
chatbot = gr.Chatbot(label="Conversation", elem_id="chatbot")
msg = gr.Textbox(label="Your Question", placeholder="Type your question...")
with gr.Accordion("⚙️ Settings", open=False):
system_prompt = gr.TextArea(value=DEFAULT_SYSTEM_PROMPT, label="System Instructions")
temperature = gr.Slider(0, 1, value=0.6, label="Creativity")
max_tokens = gr.Slider(128, 8192, 2048, label="Max Response Length")
clear = gr.Button("Clear History")
msg.submit(
generate_response,
[msg, chatbot, system_prompt, temperature, max_tokens],
[chatbot],
show_progress=True
)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.queue().launch() |