Abhishek Mamdapure
Added models file
c8262aa
raw
history blame
511 Bytes
import gradio as gr
from llama_cpp import Llama
llm = Llama(model_path="ggml-alpaca-7b-q4.bin")
def generate_text(input_text):
output = llm(input_text, max_tokens=32, stop=["Q:", "\n"], echo=True)
return output
input_text = gr.inputs.Textbox(label="Enter your input text")
output_text = gr.outputs.Textbox(label="Output text")
gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Llama Language Model", description="Enter your input text to generate output text.").launch()