File size: 694 Bytes
214c96a
e2ed786
214c96a
87c5f71
 
e2ed786
87c5f71
 
 
 
 
e2ed786
87c5f71
 
 
 
 
 
 
 
e2ed786
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import gradio as gr
from transformers import pipeline

# Load the text generation pipeline with the CodeLlama model
text_generation_pipeline = pipeline("text-generation", model="codellama/CodeLlama-70b-Instruct-hf")

# Define a function to generate responses based on user input
def generate_response(input_text):
    # Generate a response using the pipeline
    generated_response = text_generation_pipeline(input_text, max_length=200)[0]['generated_text']
    return generated_response

# Create Gradio interface
gr.Interface(
    fn=generate_response,
    inputs="text",
    outputs="text",
    title="CodeLlama Assistant",
    description="Ask me anything and I will respond!",
).launch()