|
import os |
|
from langchain_ai21 import ChatAI21 |
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain |
|
|
|
|
|
os.environ["AI21_API_KEY"] = "your-ai21-api-key" |
|
|
|
|
|
prompt = PromptTemplate( |
|
input_variables=["user_input"], |
|
template=""" |
|
You are a helpful and friendly chatbot. Respond concisely and informatively. |
|
|
|
User: {user_input} |
|
Chatbot: |
|
""" |
|
) |
|
llm = ChatAI21(model="jamba-instruct", temperature=0) |
|
|
|
from langchain.memory import ConversationBufferMemory |
|
|
|
memory = ConversationBufferMemory() |
|
chat_chain = LLMChain(llm=llm, prompt=prompt, memory=memory) |
|
|
|
llm.streaming = True |
|
|
|
import gradio as gr |
|
|
|
def chatbot_response(user_input): |
|
return chat_chain.invoke({"user_input": user_input}) |
|
|
|
gr.Interface( |
|
fn=chatbot_response, |
|
inputs="text", |
|
outputs="text", |
|
title="AI Chatbot with Jamba" |
|
).launch() |