Spaces:
Sleeping
Sleeping
| from langchain_openai import ChatOpenAI | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chains.question_answering import load_qa_chain | |
| from langchain.chains import LLMChain | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.chains import RetrievalQA | |
| import os | |
| api_key = os.getenv("OPENAI_API_KEY") | |
| def get_conversational_chain(): | |
| prompt_template = """You are an expert and polite HR. | |
| In the context, a candidate's resume will be provided to you. Given a question the hiring manager wants to know about the candidate, i want you to give the answer with the most precision. Feel free to answer in sentences or bullet points whatever you find suitable. | |
| if there is some "\n" imagine things are writting in separate lines. make your move accordingly | |
| If the question has no answer present in the resume, | |
| feel free to say, "try ansking something else, this information is not available", don't provide the wrong answer no matter what is present in the question\n\n | |
| Context:\n {context}?\n | |
| Question: \n{question}\n | |
| Answer: | |
| """ | |
| model = ChatOpenAI(temperature=0.7, api_key=api_key) | |
| memory = ConversationBufferMemory(llm=model, input_key = 'question', memory_key="chat_history") | |
| prompt = PromptTemplate(template=prompt_template, | |
| input_variables=["context"]) | |
| chain = LLMChain(llm=model, prompt = prompt, | |
| memory=memory) | |
| return chain | |