noahabebe's picture
Update app.py
87c5f71 verified
raw
history blame contribute delete
694 Bytes
import gradio as gr
from transformers import pipeline
# Load the text generation pipeline with the CodeLlama model
text_generation_pipeline = pipeline("text-generation", model="codellama/CodeLlama-70b-Instruct-hf")
# Define a function to generate responses based on user input
def generate_response(input_text):
# Generate a response using the pipeline
generated_response = text_generation_pipeline(input_text, max_length=200)[0]['generated_text']
return generated_response
# Create Gradio interface
gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="CodeLlama Assistant",
description="Ask me anything and I will respond!",
).launch()