Mohamed-Maher commited on
Commit
c37e376
·
verified ·
1 Parent(s): 1b759ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -72
app.py CHANGED
@@ -6,75 +6,74 @@ from langchain.embeddings import HuggingFaceEmbeddings
6
  from langchain.vectorstores import Qdrant
7
  from langchain_openai import ChatOpenAI
8
 
9
-
10
- QDRANT_URL= os.getenv('QDRANT_URL')
11
- QDRANT_API_KEY= os.getenv('QDRANT_API_KEY')
12
- OPEN_AI_TOKEN= os.getenv('OPEN_AI_TOKEN')
13
-
14
-
15
- def clean_text(text):
16
- text = re.sub(r'<[^>]*>', '', text)
17
- text = re.sub(r'[^\w\s]', '', text)
18
- text = re.sub(r'\s+', ' ', text)
19
- return text.lower().strip()
20
-
21
-
22
- collection_name = "Rag-with-Langchain-qdrant-Hadith"
23
-
24
- client = qdrant_client.QdrantClient(
25
- url=QDRANT_URL,
26
- api_key=QDRANT_API_KEY
27
- )
28
-
29
- collection_config = qdrant_client.http.models.VectorParams(
30
- size = 384,
31
- distance = qdrant_client.http.models.Distance.COSINE
32
- )
33
-
34
- embeddings = HuggingFaceEmbeddings(
35
- model_name = "intfloat/multilingual-e5-small"
36
- )
37
-
38
- vectorStore = Qdrant(
39
- client = client,
40
- collection_name = collection_name,
41
- embeddings = embeddings
42
- )
43
-
44
-
45
- def get_relevant_docs(question,k):
46
- relevant_docs = vectorStore.similarity_search_with_score(query=question,k=k)
47
- return relevant_docs
48
-
49
- def extract_contexts(relevant_docs):
50
- contexts = []
51
- for doc in relevant_docs:
52
- contexts.append(doc[0].page_content)
53
- return contexts
54
-
55
- def create_template(question,k):
56
- relevant_docs = get_relevant_docs(question,k)
57
- contexts = extract_contexts(relevant_docs)
58
- template = f"""
59
- Engage in a conversation with the user, responding to their question:
60
- {question}
61
- within this contexts of Hadiths:
62
- {contexts}
63
- Encourage the model to provide informative and culturally sensitive answers, reflecting Islamic teachings. Maintain a conversational tone and aim for clarity in responses and make sure they are restricted extracted from the provided contexts and i want you to answer me in arabic."""
64
- return template
65
-
66
-
67
- chat = ChatOpenAI(openai_api_key=OPEN_AI_TOKEN, model='gpt-3.5-turbo', temperature=0.5)
68
-
69
- def generate_answer(question):
70
- cleaned_question= clean_text(question)
71
- query = create_template(cleaned_question,10)
72
- response = clean_text(chat.invoke(query).content)
73
- return response
74
-
75
- def greet(question):
76
- answer= generate_answer(question)
77
- return answer
78
-
79
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
80
- iface.launch(inline = False)
 
6
  from langchain.vectorstores import Qdrant
7
  from langchain_openai import ChatOpenAI
8
 
9
+ class HadithChatApp:
10
+ def __init__(self):
11
+ self.QDRANT_URL = os.getenv('QDRANT_URL')
12
+ self.QDRANT_API_KEY = os.getenv('QDRANT_API_KEY')
13
+ self.OPEN_AI_TOKEN = os.getenv('OPEN_AI_TOKEN')
14
+
15
+ self.collection_name = "Rag-with-Langchain-qdrant-Hadith"
16
+
17
+ self.client = qdrant_client.QdrantClient(
18
+ url=self.QDRANT_URL,
19
+ api_key=self.QDRANT_API_KEY
20
+ )
21
+
22
+ self.collection_config = qdrant_client.http.models.VectorParams(
23
+ size=384,
24
+ distance=qdrant_client.http.models.Distance.COSINE
25
+ )
26
+
27
+ self.embeddings = HuggingFaceEmbeddings(
28
+ model_name="intfloat/multilingual-e5-small"
29
+ )
30
+
31
+ self.vectorStore = Qdrant(
32
+ client=self.client,
33
+ collection_name=self.collection_name,
34
+ embeddings=self.embeddings
35
+ )
36
+
37
+ self.chat = ChatOpenAI(openai_api_key=self.OPEN_AI_TOKEN, model='gpt-3.5-turbo', temperature=0.5)
38
+
39
+ def clean_text(self, text):
40
+ text = re.sub(r'<[^>]*>', '', text)
41
+ text = re.sub(r'[^\w\s]', '', text)
42
+ text = re.sub(r'\s+', ' ', text)
43
+ return text.lower().strip()
44
+
45
+ def get_relevant_docs(self, question, k):
46
+ relevant_docs = self.vectorStore.similarity_search_with_score(query=question, k=k)
47
+ return relevant_docs
48
+
49
+ def extract_contexts(self, relevant_docs):
50
+ contexts = []
51
+ for doc in relevant_docs:
52
+ contexts.append(doc[0].page_content)
53
+ return contexts
54
+
55
+ def create_template(self, question, k):
56
+ relevant_docs = self.get_relevant_docs(question, k)
57
+ contexts = self.extract_contexts(relevant_docs)
58
+ template = f"""
59
+ Engage in a conversation with the user, responding to their question:
60
+ {question}
61
+ within this contexts of Hadiths:
62
+ {contexts}
63
+ Encourage the model to provide informative and culturally sensitive answers, reflecting Islamic teachings. Maintain a conversational tone and aim for clarity in responses and make sure they are restricted extracted from the provided contexts and i want you to answer me in arabic."""
64
+ return template
65
+
66
+ def generate_answer(self, question):
67
+ cleaned_question = self.clean_text(question)
68
+ query = self.create_template(cleaned_question, 10)
69
+ response = self.clean_text(self.chat.invoke(query).content)
70
+ return response
71
+
72
+ def greet(self, question):
73
+ answer = self.generate_answer(question)
74
+ return answer
75
+
76
+ if __name__ == "__main__":
77
+ hadith_chat_app = HadithChatApp()
78
+ iface = gr.Interface(fn=hadith_chat_app.greet, inputs="text", outputs="text")
79
+ iface.launch(inline=False)