File size: 1,779 Bytes
2fd2ef5 debd572 2fd2ef5 fd10164 2fd2ef5 ca02937 2fd2ef5 40ffe10 84b81f1 2fd2ef5 806371b 24122c4 2fd2ef5 24122c4 2fd2ef5 08b93fb 2fd2ef5 fd10164 2fd2ef5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
from ctransformers import AutoModelForCausalLM
import gradio as gr
greety = """
Follow us [Gathnex](https://medium.com/@gathnex), [linkedin](https://www.linkedin.com/company/gathnex/) and [Github](https://github.com/gathnexadmin) for more update on Genrative AI, LLM,etc. A special thanks to the Gathnex team members who made a significant contribution to this project.
"""
llm = AutoModelForCausalLM.from_pretrained("zephyr-7b-beta.Q4_K_S.gguf",
model_type='mistral',
max_new_tokens = 1096,
threads = 3,
)
def stream(prompt, UL):
system_prompt = 'You are a helpful AI assistant'
E_INST = "</s>"
user, assistant = "<|user|>", "<|assistant|>"
prompt = f"{system_prompt}{E_INST}\n{user}\n{prompt.strip()}{E_INST}\n{assistant}\n"
return llm(prompt)
css = """
h1 {
text-align: center;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
.contain {
max-width: 900px;
margin: auto;
padding-top: 1.5rem;
}
"""
chat_interface = gr.ChatInterface(
fn=stream,
#additional_inputs_accordion_name = "Credentials",
#additional_inputs=[
# gr.Textbox(label="OpenAI Key", lines=1),
# gr.Textbox(label="Linkedin Access Token", lines=1),
#],
stop_btn=None,
examples=[
["explain Large language model"],
["what is quantum computing"]
],
)
with gr.Blocks(css=css) as demo:
gr.HTML("<h1><center>Gathnex Free LLM Deployment Space<h1><center>")
gr.HTML("<h3><center><a href='https://medium.com/@gathnex'>Gathnex AI</a>💬<h3><center>")
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
chat_interface.render()
gr.Markdown(greety)
if __name__ == "__main__":
demo.queue(max_size=10).launch() |