abhinand2 commited on
Commit
5feb488
·
verified ·
1 Parent(s): dc92420

Create chain.py

Browse files
Files changed (1) hide show
  1. chain.py +50 -0
chain.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from langchain.schema.runnable import RunnableParallel
3
+ from langchain_core.runnables import RunnableLambda
4
+ from langchain_core.prompts import PromptTemplate
5
+ from langchain_huggingface import HuggingFaceEndpoint
6
+ from langchain_core.output_parsers import StrOutputParser
7
+
8
+
9
+ def get_chain(
10
+ vectordb,
11
+ repo_id="HuggingFaceH4/zephyr-7b-beta",
12
+ task="text-generation",
13
+ max_new_tokens=512,
14
+ top_k=30,
15
+ temperature=0.1,
16
+ repetition_penalty=1.03,
17
+ search_type="mmr",
18
+ k=3,
19
+ fetch_k=5,
20
+ template="""Use the following sentences of context to answer the question at the end.
21
+ If you don't know the answer, that is if the answer is not in the context, then just say that you don't know, don't try to make up an answer.
22
+ Always say "Thanks for asking!" at the end of the answer.
23
+
24
+ {context}
25
+
26
+ Question: {question}
27
+ Helpful Answer:"""
28
+ ):
29
+ retriever = vectordb.as_retriever(search_type=search_type, search_kwargs={"k": k, "fetch_k": fetch_k})
30
+
31
+ retrieval = RunnableParallel(
32
+ {
33
+ "context": RunnableLambda(lambda x: retriever.invoke(x["question"])),
34
+ "question": RunnableLambda(lambda x: x["question"])
35
+ }
36
+ )
37
+
38
+ prompt = PromptTemplate(input_variables=["context", "question"], template=template)
39
+
40
+ llm = HuggingFaceEndpoint(
41
+ repo_id=repo_id,
42
+ task=task,
43
+ max_new_tokens=max_new_tokens,
44
+ top_k=top_k,
45
+ temperature=temperature,
46
+ repetition_penalty=repetition_penalty,
47
+ )
48
+
49
+ return retrieval | prompt | llm | StrOutputParser()
50
+