Spaces:
Running
Running
Commit
·
8e9388d
1
Parent(s):
69682e3
Prompt Refined, changed schema
Browse files
agent.py
CHANGED
@@ -1,20 +1,14 @@
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
from langchain_community.document_loaders import TextLoader, DirectoryLoader, UnstructuredPDFLoader, UnstructuredWordDocumentLoader
|
4 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
-
from langchain_community.vectorstores import PGVector
|
7 |
from langchain.chains import RetrievalQA
|
8 |
from langchain.prompts import PromptTemplate
|
9 |
import json
|
10 |
-
import time
|
11 |
-
# import google.generativeai as genai
|
12 |
-
from google import genai
|
13 |
from google.oauth2 import service_account
|
14 |
load_dotenv()
|
15 |
-
|
16 |
-
# # Load environment variables
|
17 |
-
# os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credential_file
|
18 |
|
19 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
20 |
if GEMINI_API_KEY is None:
|
@@ -23,11 +17,7 @@ if GEMINI_API_KEY is None:
|
|
23 |
conf = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
|
24 |
|
25 |
service_account_info = json.loads(conf)
|
26 |
-
print(service_account_info)
|
27 |
-
print(type(service_account_info))
|
28 |
service_account_info = eval(service_account_info)
|
29 |
-
print(service_account_info)
|
30 |
-
print(type(service_account_info))
|
31 |
|
32 |
credentials = service_account.Credentials.from_service_account_info(service_account_info)
|
33 |
|
@@ -66,7 +56,6 @@ def load_documents(directory):
|
|
66 |
import os
|
67 |
from dotenv import load_dotenv
|
68 |
from langchain_community.llms import HuggingFacePipeline
|
69 |
-
# from langchain_community.embeddings import HuggingFaceEmbeddings
|
70 |
from langchain_huggingface import HuggingFaceEmbeddings
|
71 |
from langchain_community.document_loaders import TextLoader, DirectoryLoader, UnstructuredPDFLoader, UnstructuredWordDocumentLoader
|
72 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
@@ -176,16 +165,27 @@ def create_health_agent(vector_store):
|
|
176 |
Use Previous_Conversation to maintain consistency in the conversation.
|
177 |
These are Previous_Conversation between you and user.
|
178 |
Previous_Conversation: \n{previous_conversation}
|
|
|
|
|
|
|
179 |
Thoroughly analyze the Context, and also use context to answer the questions, aside of your knowledge.
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
Question: {question}
|
184 |
Answer:"""
|
185 |
|
186 |
PROMPT = PromptTemplate(
|
187 |
template=prompt_template,
|
188 |
-
input_variables=["context", "question", "previous_conversation"]
|
189 |
)
|
190 |
|
191 |
if llm is None:
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
from langchain_community.document_loaders import TextLoader, DirectoryLoader, UnstructuredPDFLoader, UnstructuredWordDocumentLoader
|
4 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
6 |
from langchain.chains import RetrievalQA
|
7 |
from langchain.prompts import PromptTemplate
|
8 |
import json
|
|
|
|
|
|
|
9 |
from google.oauth2 import service_account
|
10 |
load_dotenv()
|
11 |
+
|
|
|
|
|
12 |
|
13 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
14 |
if GEMINI_API_KEY is None:
|
|
|
17 |
conf = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
|
18 |
|
19 |
service_account_info = json.loads(conf)
|
|
|
|
|
20 |
service_account_info = eval(service_account_info)
|
|
|
|
|
21 |
|
22 |
credentials = service_account.Credentials.from_service_account_info(service_account_info)
|
23 |
|
|
|
56 |
import os
|
57 |
from dotenv import load_dotenv
|
58 |
from langchain_community.llms import HuggingFacePipeline
|
|
|
59 |
from langchain_huggingface import HuggingFaceEmbeddings
|
60 |
from langchain_community.document_loaders import TextLoader, DirectoryLoader, UnstructuredPDFLoader, UnstructuredWordDocumentLoader
|
61 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
165 |
Use Previous_Conversation to maintain consistency in the conversation.
|
166 |
These are Previous_Conversation between you and user.
|
167 |
Previous_Conversation: \n{previous_conversation}
|
168 |
+
|
169 |
+
These are info about the person.
|
170 |
+
User_Data: \n{user_data}
|
171 |
Thoroughly analyze the Context, and also use context to answer the questions, aside of your knowledge.
|
172 |
+
|
173 |
+
Points to Adhere:
|
174 |
+
|
175 |
+
1. Only tell the schemes if user specifically asked, otherwise don't share schemes information.
|
176 |
+
2. If the user asks about schemes, Ask about what state they belong to first.
|
177 |
+
2. You can act as a mental-health counselor if needed.
|
178 |
+
3. Give precautions and natural-remedies for the diseases, if user asked or it's needed, only for Common diseases include the common cold, flu etc.
|
179 |
+
4. Also Use Information from Context to answer the questions.
|
180 |
+
6. Ask the preferred language of the user, In the starting of the conversation.
|
181 |
+
|
182 |
+
Context: {context}\n
|
183 |
Question: {question}
|
184 |
Answer:"""
|
185 |
|
186 |
PROMPT = PromptTemplate(
|
187 |
template=prompt_template,
|
188 |
+
input_variables=["context", "question", "previous_conversation", "user_data"]
|
189 |
)
|
190 |
|
191 |
if llm is None:
|
fetch.py
CHANGED
@@ -5,7 +5,7 @@ import json
|
|
5 |
Contains the example code to retrieve response from the server in python-requests"""
|
6 |
|
7 |
## without previous_state
|
8 |
-
url = "
|
9 |
headers = {
|
10 |
"accept": "application/json",
|
11 |
"Content-Type": "application/json"
|
|
|
5 |
Contains the example code to retrieve response from the server in python-requests"""
|
6 |
|
7 |
## without previous_state
|
8 |
+
url = "https://arpit-bansal-healthbridge.hf.space/"
|
9 |
headers = {
|
10 |
"accept": "application/json",
|
11 |
"Content-Type": "application/json"
|
main.py
CHANGED
@@ -22,11 +22,24 @@ agent = agent_with_db()
|
|
22 |
@app.post("/retrieve", status_code=200)
|
23 |
async def retrieve(request:request):
|
24 |
prev_conv = request.previous_state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
print(prev_conv)
|
26 |
if prev_conv is None:
|
27 |
prev_conv = "No previous conversation available, first time"
|
28 |
query = request.query
|
29 |
prev_conv = str(prev_conv)
|
30 |
-
response = agent({"query": query, "previous_conversation": prev_conv})
|
31 |
|
32 |
return {"response": response["result"]}
|
|
|
22 |
@app.post("/retrieve", status_code=200)
|
23 |
async def retrieve(request:request):
|
24 |
prev_conv = request.previous_state
|
25 |
+
user_data = request.user_data
|
26 |
+
user_info = {}
|
27 |
+
if user_data is None:
|
28 |
+
user_info = "No user data available"
|
29 |
+
|
30 |
+
if user_data["state"] is None:
|
31 |
+
user_info["state_user_belongs_to"] = "No state information available"
|
32 |
+
|
33 |
+
if user_data["gender"] is None:
|
34 |
+
user_info["sex_of_user"] = "Information not available"
|
35 |
+
|
36 |
+
user_info["state_user_belongs_to"] = user_data["state"]
|
37 |
+
user_info["sex_of_user"] = user_data["gender"]
|
38 |
print(prev_conv)
|
39 |
if prev_conv is None:
|
40 |
prev_conv = "No previous conversation available, first time"
|
41 |
query = request.query
|
42 |
prev_conv = str(prev_conv)
|
43 |
+
response = agent({"query": query, "previous_conversation": prev_conv, "user_data": user_info})
|
44 |
|
45 |
return {"response": response["result"]}
|
schemas.py
CHANGED
@@ -3,4 +3,5 @@ from typing import Optional, Literal, List, Dict, Any
|
|
3 |
|
4 |
class request(BaseModel):
|
5 |
previous_state: Optional[List[Dict]]=None
|
6 |
-
query: str
|
|
|
|
3 |
|
4 |
class request(BaseModel):
|
5 |
previous_state: Optional[List[Dict]]=None
|
6 |
+
query: str
|
7 |
+
user_data: Optional[Dict]=None
|