Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load your text generation model from Hugging Face using its identifier | |
model_identifier = "curiouscurrent/omnicode" | |
model = AutoModelForCausalLM.from_pretrained(model_identifier) | |
tokenizer = AutoTokenizer.from_pretrained(model_identifier) | |
def generate_response(input_prompt): | |
# Tokenize input prompt | |
input_ids = tokenizer.encode(input_prompt, return_tensors="pt", max_length=512, truncation=True) | |
# Generate response | |
output_ids = model.generate(input_ids, max_length=100, num_return_sequences=1) | |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
return response | |
# Create Gradio interface | |
input_prompt = gr.inputs.Textbox(lines=5, label="Input Prompt") | |
output_text = gr.outputs.Textbox(label="Response") | |
gr.Interface( | |
generate_response, | |
inputs=input_prompt, | |
outputs=output_text, | |
title="OmniCode", | |
description="Multi programming coding assistant", | |
theme="compact" | |
).launch() | |