|
import streamlit as st |
|
from langchain.prompts import PromptTemplate |
|
from langchain.llms import CTransformers |
|
import gradio as gr |
|
|
|
|
|
def getLLamaresponse(message): |
|
|
|
input_text = "home decoration" |
|
no_words = "100" |
|
blog_style = "lifestyle" |
|
|
|
|
|
llm=CTransformers(model='TheBloke/OpenHermes-2.5-Mistral-7B-GGUF', |
|
model_type='llama', |
|
config={'max_new_tokens':256, |
|
'temperature':0.01}) |
|
|
|
|
|
|
|
template=""" |
|
Write a blog for {blog_style} job profile for a topic {input_text} |
|
within {no_words} words. |
|
""" |
|
|
|
prompt=PromptTemplate(input_variables=["blog_style","input_text",'no_words'], |
|
template=template) |
|
|
|
|
|
response=llm(prompt.format(blog_style=blog_style,input_text=input_text,no_words=no_words)) |
|
print(response) |
|
return response |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# AI Patient Chatbot") |
|
with gr.Group(): |
|
with gr.Tab("Patient Chatbot"): |
|
chatbot = gr.Chatbot() |
|
message = gr.Textbox(label="Enter your message to Barry", placeholder="Type here...", lines=2) |
|
send_message = gr.Button("Submit") |
|
send_message.click(getLLamaresponse, inputs=[message], outputs=[chatbot]) |
|
save_chatlog = gr.Button("Save Chatlog") |
|
|
|
|
|
|
|
|
|
|
|
demo.launch(debug=True) |