File size: 2,538 Bytes
eb450e3 e3c453c 968b137 eb450e3 e3c453c e86214a 6e60b60 e86214a 6e60b60 fa8b0f1 e86214a 6e60b60 e86214a fa8b0f1 e3c453c fa8b0f1 e86214a fa8b0f1 e3c453c e86214a 6e60b60 e86214a fa8b0f1 e3c453c 6e60b60 e3c453c eb450e3 e86214a e3c453c 09742af e3c453c e86214a e3c453c e86214a eb450e3 91e7ac0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import gradio as gr
from huggingface_hub import InferenceClient
# Inicia o cliente para o modelo
client = InferenceClient("lambdaindie/lambdai")
# CSS simples
css = r"""
* { font-family: 'JetBrains Mono', monospace; }
.gradio-container { background-color: #111; color: #e0e0e0; }
textarea, input, .block, .wrap, .chatbot {
background-color: #1a1a1a !important;
color: #e0e0e0 !important;
border: 1px solid #333 !important;
border-radius: 10px;
}
button.pulse {
background-color: #272727 !important;
border: 1px solid #444 !important;
color: #e0e0e0 !important;
border-radius: 10px;
animation: pulse 2s infinite;
}
@keyframes pulse {
0% { transform: scale(1); box-shadow: 0 0 0 0 rgba(255,255,255,0.5); }
70% { transform: scale(1.05); box-shadow: 0 0 0 10px rgba(255,255,255,0); }
100% { transform: scale(1); box-shadow: 0 0 0 0 rgba(255,255,255,0); }
}
.loader {
border: 3px solid #2b2b2b;
border-top: 3px solid #e0e0e0;
border-radius: 50%;
width: 18px;
height: 18px;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.thinking-html {
background-color: #2b2b2b;
padding: 8px;
border-radius: 8px;
margin-bottom: 8px;
font-style: italic;
color: #aaaaaa;
display: flex;
align-items: center;
}
"""
# Função principal para responder
def respond(message, chat_history):
thinking_html = (
f"<div class='thinking-html'>"
f"<div class='loader'></div>"
f"Thinking… generating response..."
f"</div>"
)
yield chat_history + [{"role": "user", "content": message}, {"role": "assistant", "content": thinking_html}]
response = client.chat_completion([{"role": "user", "content": message}], stream=False)
answer = response['choices'][0]['message']['content']
yield chat_history + [{"role": "user", "content": message}, {"role": "assistant", "content": answer}]
# Interface Gradio
with gr.Blocks(css=css) as demo:
gr.Markdown("<h1 style='text-align:center;color:#e0e0e0;'>Lambdai-v1-1B</h1>")
chatbot = gr.Chatbot(elem_id="chatbot", height=480, render_markdown=True)
with gr.Row():
user_input = gr.Textbox(show_label=False, placeholder="Type your message here...", lines=2)
send_button = gr.Button("Send", elem_classes="pulse")
# Aciona a função ao clicar no botão
send_button.click(
fn=respond,
inputs=[user_input, chatbot],
outputs=chatbot
)
demo.launch() |