|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import time |
|
import random |
|
|
|
|
|
model_name = "Canstralian/text2shellcommands" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_shell_command(prompt): |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
outputs = model.generate(**inputs, max_length=50, num_return_sequences=1) |
|
command = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return command |
|
|
|
|
|
def terminal_ui(prompt): |
|
|
|
fake_typing_effect = [ |
|
"Initializing...\n", |
|
"Boot sequence complete...\n", |
|
"Connecting to secure network...\n", |
|
"Accessing restricted files...\n", |
|
"Running diagnostics...\n", |
|
"Command input: " + prompt + "\n" |
|
] |
|
|
|
|
|
for line in fake_typing_effect: |
|
time.sleep(random.uniform(0.5, 1.5)) |
|
print(line) |
|
time.sleep(0.3) |
|
|
|
|
|
command_response = generate_shell_command(prompt) |
|
|
|
|
|
result_output = f"\n[ SYSTEM STATUS: OK ]\n[ {random.choice(['OK', 'ERROR', 'WARNING'])} ]\n\n" |
|
result_output += f"Command executed: {command_response}\n" |
|
result_output += "[ End of output ]" |
|
|
|
return result_output |
|
|
|
|
|
def retro_terminal_interface(prompt): |
|
result = terminal_ui(prompt) |
|
return result |
|
|
|
|
|
iface = gr.Interface( |
|
fn=retro_terminal_interface, |
|
inputs=gr.Textbox(placeholder="Type your shell command here...", label="Enter Command:"), |
|
outputs=gr.Textbox(label="Terminal Output", lines=20, interactive=False), |
|
theme="compact", |
|
live=True |
|
) |
|
|
|
iface.launch() |
|
|