demo-chat-gpt / app.py
jchauhan's picture
Initial Commit
6f58cbf
raw
history blame
977 Bytes
import os
import gradio as gr
import instrumentation
instrumentation.init("llm-chat-app")
from langchain.globals import set_debug
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
set_debug(True)
import logging
logging.basicConfig(level=logging.DEBUG)
llm = ChatOpenAI(temperature=0.5, max_tokens=100, model="gpt-3.5-turbo")
output_parser = StrOutputParser()
prompt = ChatPromptTemplate.from_messages([
("system", "You are Responsible AI assistant to the user. "),
("user", "{input}")
])
def handle_message(message, _history):
chain = prompt | llm | output_parser
return chain.invoke({"input": message})
server_name = os.environ.get("GR_SERVER_NAME", "127.0.0.1")
server_port = os.environ.get("GR_SERVER_PORT", "7860")
options = {}
gr.ChatInterface(handle_message, **options).launch(server_name=server_name, server_port=int(server_port))