import gradio as gr from huggingface_hub import InferenceClient client = InferenceClient("lambdaindie/lambdai") css = r""" /* Fonte e cores gerais */ * { font-family: 'JetBrains Mono', monospace; } .gradio-container { background-color: #111; color: #e0e0e0; } /* Inputs e chat bubbles */ textarea, input, .block, .wrap, .chatbot { background-color: #1a1a1a !important; color: #e0e0e0 !important; border: 1px solid #333 !important; border-radius: 10px; } /* Botão com pulse animation */ @keyframes pulse { 0% { transform: scale(1); box-shadow: 0 0 0 0 rgba(255,255,255,0.5); } 70% { transform: scale(1.05); box-shadow: 0 0 0 10px rgba(255,255,255,0); } 100% { transform: scale(1); box-shadow: 0 0 0 0 rgba(255,255,255,0); } } button.pulse { background-color: #272727 !important; border: 1px solid #444 !important; color: #e0e0e0 !important; border-radius: 10px; animation: pulse 2s infinite; } /* Hover no botão */ button.pulse:hover { background-color: #444 !important; } /* Spinner de thinking */ @keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } } .loader { border: 3px solid #2b2b2b; border-top: 3px solid #e0e0e0; border-radius: 50%; width: 18px; height: 18px; animation: spin 1s linear infinite; display: inline-block; margin-right: 8px; vertical-align: middle; } /* Markdown de thinking dentro do chat */ .thinking-html { background-color: #2b2b2b; padding: 8px; border-radius: 8px; margin-bottom: 8px; font-style: italic; color: #aaaaaa; display: flex; align-items: center; } /* Opções de parâmetros ocultas até clique */ #settings-panel { display: none; background-color: #1a1a1a; padding: 10px; border-radius: 10px; margin-top: 10px; } #settings-panel.show { display: block; } """ with gr.Blocks(css=css) as demo: gr.Markdown("

Lambdai-v1-1B Chat

") chatbot = gr.Chatbot(elem_id="chatbot", height=480, render_markdown=True, type='messages') with gr.Row(): system_message = gr.Textbox(value="You are a helpful assistant.", label="System message", lines=1) with gr.Row(): user_input = gr.Textbox(show_label=False, placeholder="Type your message here...", lines=2) send_button = gr.Button("Λ Think", elem_classes="pulse") with gr.Row(): settings_button = gr.Button("Show Settings", elem_classes="pulse") settings_panel = gr.Column( gr.Slider(128, 2048, value=512, step=1, label="Max tokens"), gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature"), gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top‑p"), elem_id="settings-panel" ) def toggle_settings(): return gr.update(visible=True) if not settings_panel.visible else gr.update(visible=False) def respond(message, chat_history, system_message, max_tokens, temperature, top_p): # Exibe o spinner + "thinking" thinking_html = ( f"
" f"
" f"Thinking… generating reasoning path…" f"
" ) yield chat_history + [{"role": "user", "content": message}, {"role": "assistant", "content": thinking_html}] # Prepara payload para API messages = [{"role": "system", "content": system_message}] for u, a in chat_history: if u: messages.append({"role":"user", "content":u}) if a: messages.append({"role":"assistant","content":a}) messages.append({"role": "user", "content": message}) # Chama API response = "" for chunk in client.chat_completion( messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True ): delta = chunk.choices[0].delta.content or "" response += delta yield chat_history + [{"role": "user", "content": message}, {"role": "assistant", "content": response}] send_button.click( fn=respond, inputs=[user_input, chatbot, system_message, 512, 0.7, 0.95], outputs=chatbot ) settings_button.click( fn=toggle_settings, outputs=settings_panel ) demo.launch()