import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM def code_v1(input_text): # load tokenizer tokenizer = AutoTokenizer.from_pretrained('replit/replit-code-v1-3b', trust_remote_code=True) # load model model = AutoModelForCausalLM.from_pretrained('replit/replit-code-v1-3b') # single input encoding + generation x = tokenizer.encode(input_text, return_tensors='pt') y = model.generate(x) # decoding, clean_up_tokenization_spaces=False to ensure syntactical correctness generated_code = tokenizer.decode(y[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) return generated_code prompt = gr.inputs.Textbox(label="Prompt") run_button = gr.outputs.Button(label="Run") output_prompt = gr.outputs.Textbox(label="Output") iface = gr.Interface(fn=code_v1, inputs=prompt, outputs=output_prompt) iface.launch()