Spaces:
Sleeping
Sleeping
File size: 4,483 Bytes
4a48aba 1122788 4a48aba 7982b4e c11acf6 1122788 fa136e4 1122788 4a48aba 1122788 d69301c 1122788 d69301c 1122788 d69301c 4a48aba 1122788 c11acf6 1122788 c11acf6 7982b4e 1122788 7982b4e 1122788 c11acf6 1122788 7982b4e 1122788 c11acf6 1122788 c11acf6 7982b4e c11acf6 7982b4e c11acf6 7982b4e c11acf6 1122788 4a48aba 1122788 c11acf6 1122788 7982b4e c11acf6 1122788 7982b4e c11acf6 1122788 4a48aba 1122788 7982b4e 1122788 c11acf6 1122788 c11acf6 1122788 c11acf6 1122788 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import random
# Load the model and tokenizer
model_id = "microsoft/phi-2" # Change to your desired model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# --- Functions ---
def get_response(input_text, temperature, top_p, top_k, max_length):
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_length=max_length,
temperature=temperature,
top_p=top_p,
top_k=top_k,
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
def analyze_text(text):
num_tokens = len(tokenizer.tokenize(text))
return {
"Number of characters": len(text),
"Number of words": len(text.split()),
"Number of tokens": num_tokens,
}
# --- Interface ---
css = """
.gradio-container {
background-color: #000000; /* Black background */
font-family: 'Roboto', sans-serif;
}
.gradio-interface {
background-color: rgba(0, 0, 0, 0.8); /* Dark translucent background */
border: 3px solid #00FF00; /* Green border */
padding: 20px;
box-shadow: 0 0 10px rgba(0, 255, 0, 0.5); /* Green shadow */
}
.gradio-button {
background-color: #00FF00; /* Green button */
color: white;
border: none;
padding: 10px 20px;
font-size: 16px;
cursor: pointer;
}
.gradio-button:hover {
background-color: #00CC00; /* Darker green on hover */
}
.gradio-textbox {
background-color: #111111; /* Dark gray background */
color: #00FF00; /* Green text */
border: 1px solid #00FF00; /* Green border */
padding: 10px;
margin-bottom: 10px;
}
.gradio-text-area {
background-color: #111111; /* Dark gray background */
color: #00FF00; /* Green text */
border: 1px solid #00FF00; /* Green border */
padding: 10px;
}
.gradio-slider {
background-color: #111111; /* Dark gray background */
color: #00FF00; /* Green text */
}
.gradio-slider .slider-bar {
background-color: #00FF00; /* Green slider bar */
}
.gradio-slider .slider-thumb {
background-color: #00FF00; /* Green slider thumb */
}
.gradio-label {
color: #00FF00; /* Green labels */
}
h1, h2 {
color: #00FF00; /* Green headings */
text-align: center;
}
.analysis-container {
margin-top: 20px;
padding: 10px;
border: 1px solid #00FF00;
}
"""
iface = gr.Interface(
fn=get_response,
inputs=[
gr.Textbox(label="Your message:", lines=5, placeholder="Ask me anything...", show_label=True,
style={'background-color': '#111111', 'color': '#00FF00', 'border': '1px solid #00FF00'}),
gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7),
gr.Slider(label="Top p", minimum=0.1, maximum=1.0, step=0.1, value=0.9),
gr.Slider(label="Top k", minimum=1, maximum=100, step=1, value=50),
gr.Slider(label="Max length", minimum=10, maximum=1000, step=10, value=250),
],
outputs=[
gr.TextArea(label="AI Response:", lines=10,
style={'background-color': '#111111', 'color': '#00FF00', 'border': '1px solid #00FF00'}),
gr.HTML(elem_id="analysis", style={'color': '#00FF00'}),
],
title="NVIDIA AI Chat",
description="Engage in a conversation with our advanced AI model. Customize the response using various parameters.",
theme="default",
css=css,
layout="vertical",
allow_flagging="never",
)
# --- Dynamic Background ---
def update_background():
while True:
r = random.randint(0, 255)
g = 255 # Keep the green component constant
b = random.randint(0, 255)
iface.root.style.background_color = f"rgb({r}, {g}, {b})"
time.sleep(1)
# Start a separate thread to update the background color
gr.Interface.update(update_background, inputs=[], outputs=[], live=True)
# --- Analysis Logic ---
def update_analysis(response):
analysis = analyze_text(response)
analysis_str = f"<div class='analysis-container'>Number of characters: {analysis['Number of characters']}<br>" \
f"Number of words: {analysis['Number of words']}<br>" \
f"Number of tokens: {analysis['Number of tokens']}</div>"
iface.update(analysis=analysis_str, live=True)
iface.outputs[0].postprocess = update_analysis
iface.launch(debug=True) |