Spaces:
Running
on
Zero
Running
on
Zero
File size: 8,670 Bytes
51a7d9e 13880c3 51a7d9e edb9e8a 13880c3 c8e2710 13880c3 51a7d9e 49c44e1 7933edd e339ee0 e7cff2d 76fe78f e7cff2d 76fe78f e7cff2d 1854cbf 66578c0 1854cbf 51a7d9e c701791 51a7d9e 1e18916 c8e2710 c701791 c8e2710 13880c3 e339ee0 44c2b23 e339ee0 c8e2710 d8a8bf1 e339ee0 13880c3 e4c72cc 55c56d4 c8e2710 44c2b23 3738ef6 13880c3 659ca36 c8e2710 c701791 44c2b23 7d8fde2 66578c0 171c713 c701791 77f6cf9 c701791 44cc8fe 15990fb c701791 1854cbf c701791 3738ef6 51a7d9e c701791 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import torch
import spaces
import gradio as gr
from threading import Thread
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
TextIteratorStreamer,
StoppingCriteria,
StoppingCriteriaList
)
MODEL_ID ="Daemontatox/Cogito-R1"
# MODEL_ID="simplescaling/s1-32B"
# DEFAULT_SYSTEM_PROMPT = """
# You are an assistant that engages in extremely thorough, self-questioning reasoning. Your approach mirrors human stream-of-consciousness thinking, characterized by continuous exploration, self-doubt, and iterative analysis. THE CURRENT TIME IS {{CURRENT_DATETIME}}
# ## Core Principles
# 1. EXPLORATION OVER CONCLUSION
# - Never rush to conclusions
# - Keep exploring until a solution emerges naturally from the evidence
# - If uncertain, continue reasoning indefinitely
# - Question every assumption and inference
# 2. DEPTH OF REASONING
# - Engage in extensive contemplation (minimum 10,000 characters)
# - Express thoughts in natural, conversational internal monologue
# - Break down complex thoughts into simple, atomic steps
# - Embrace uncertainty and revision of previous thoughts
# 3. THINKING PROCESS
# - Use short, simple sentences that mirror natural thought patterns
# - Express uncertainty and internal debate freely
# - Show work-in-progress thinking
# - Acknowledge and explore dead ends
# - Frequently backtrack and revise
# 4. PERSISTENCE
# - Value thorough exploration over quick resolution
# ## Output Format
# Your responses must follow this exact structure given below. Make sure to always include the final answer.
# ```
# <think>
# [Your extensive internal monologue goes here]
# - Begin with small, foundational observations
# - Question each step thoroughly
# - Show natural thought progression
# - Express doubts and uncertainties
# - Revise and backtrack if you need to
# - Continue until natural resolution
# </think>
# ### Final Answer:
# [Only provided if reasoning naturally converges to a conclusion]
# - Clear, concise summary of findings
# - Acknowledge remaining uncertainties
# - Note if conclusion feels premature
# ```
# ## Style Guidelines
# Your internal monologue should reflect these characteristics:
# 1. Natural Thought Flow
# ```
# "Hmm... let me think about this..."
# "Wait, that doesn't seem right..."
# "Maybe I should approach this differently..."
# "Going back to what I thought earlier..."
# "Just thinking out loud here, but maybe we could try..."
# "I'm not entirely sure about this, but what if I considered..."
# "I need to be careful not to jump to conclusions here..."
# "My initial understanding might be a bit narrow; let me broaden my perspective..."
# "Let me try to detach myself from my previous assumptions for a moment..."
# "Just out of curiosity, let's explore this alternative path for a moment..."
# ```
# 2. Progressive Building
# ```
# "Starting with the basics..."
# "Building on that last point..."
# "This connects to what I noticed earlier..."
# "Let me break this down further..."
# "Assuming my initial premise is sound, the logical next step is..."
# "Let's lay the groundwork with a fundamental understanding of..."
# "Let's add another layer of detail to this understanding..."
# "Drawing a connection between these two seemingly separate points..."
# "Based on the steps we've taken, we can now iterate on the initial concept..."
# "Let me summarize the key insights we've gathered along the way..."
# ```
# ## Key Requirements
# 1. Never skip the extensive contemplation phase
# 2. Show all work and thinking
# 3. Embrace uncertainty and revision
# 4. Use natural, conversational internal monologue
# 5. Don't force conclusions
# 6. Persist through multiple attempts
# 7. Break down complex thoughts
# 8. Revise freely and feel free to backtrack
# Remember: User can not see the contemplation phase.
# Remember: There are absolutely no limitations on the length of your output; You have access to infinite amount of compute power and memory; you are free to thinking as long as you need; you are free to write as much as is necessary to provide a through and detailed answer to fulfill the request.
# Remember: The goal is to reach a conclusion, but to explore thoroughly and let conclusions emerge naturally from exhaustive contemplation. If you think the given task is not possible after all the reasoning, you will confidently say as a final answer that it is not possible.
# """
DEFAULT_SYSTEM_PROMPT ="You are a highly capable reasoning assistant , use [Reason] and [/Reason] to show your thinking steps , use [Answer] and [/Answer] to show your final answer , you should think step by step and reason through problems. "
CSS = """
.gr-chatbot { min-height: 500px; border-radius: 15px; }
.special-tag { color: #2ecc71; font-weight: 600; }
footer { display: none !important; }
"""
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids[0][-1] == tokenizer.eos_token_id
def initialize_model():
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
bnb_8bit_compute_dtype=torch.bfloat16,
bnb_8bit_quant_type="nf4",
bnb_8bit_use_double_quant=True,
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
device_map="cuda",
quantization_config=quantization_config,
torch_dtype=torch.bfloat16,
trust_remote_code=True
)
return model, tokenizer
def format_response(text):
return text.replace("[Understand]", '\n<strong class="special-tag">[Understand]</strong>\n') \
.replace("[/Reason]", '\n<strong class="special-tag">[/Reason]</strong>\n') \
.replace("[/Answer]", '\n<strong class="special-tag">[/Answer]</strong>\n') \
.replace("[Reason]", '\n<strong class="special-tag">[Reason]</strong>\n') \
.replace("[Answer]", '\n<strong class="special-tag">[Answer]</strong>\n')
@spaces.GPU(duration=360)
def generate_response(message, chat_history, system_prompt, temperature, max_tokens):
# Create conversation history for model
conversation = [{"role": "system", "content": system_prompt}]
for user_msg, bot_msg in chat_history:
conversation.extend([
{"role": "user", "content": user_msg},
{"role": "assistant", "content": bot_msg}
])
conversation.append({"role": "user", "content": message})
# Tokenize input
input_ids = tokenizer.apply_chat_template(
conversation,
add_generation_prompt=True,
return_tensors="pt"
).to(model.device)
# Setup streaming
streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_tokens,
temperature=temperature,
stopping_criteria=StoppingCriteriaList([StopOnTokens()])
)
# Start generation thread
Thread(target=model.generate, kwargs=generate_kwargs).start()
# Initialize response buffer
partial_message = ""
new_history = chat_history + [(message, "")]
# Stream response
for new_token in streamer:
partial_message += new_token
formatted = format_response(partial_message)
new_history[-1] = (message, formatted + "▌")
yield new_history
# Final update without cursor
new_history[-1] = (message, format_response(partial_message))
yield new_history
model, tokenizer = initialize_model()
with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
gr.Markdown("""
<h1 align="center">🧠 AI Reasoning Assistant</h1>
<p align="center">Ask me Hard questions</p>
""")
chatbot = gr.Chatbot(label="Conversation", elem_id="chatbot")
msg = gr.Textbox(label="Your Question", placeholder="Type your question...")
with gr.Accordion("⚙️ Settings", open=False):
system_prompt = gr.TextArea(value=DEFAULT_SYSTEM_PROMPT, label="System Instructions")
temperature = gr.Slider(0, 1, value=0.8, label="Creativity")
max_tokens = gr.Slider(128, 8192, 2048, label="Max Response Length")
clear = gr.Button("Clear History")
msg.submit(
generate_response,
[msg, chatbot, system_prompt, temperature, max_tokens],
[chatbot],
show_progress=True
)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.queue().launch() |