Thomas Stone commited on
Commit
f8b5cf4
ยท
verified ยท
1 Parent(s): f45b71b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -6,14 +6,14 @@ from sentence_transformers import SentenceTransformer
6
  from huggingface_hub import InferenceClient
7
 
8
  # Load embedding model
9
- model = SentenceTransformer('all-MiniLM-L6-v2')
10
 
11
  # File paths
12
  TEXT_FILE = "combined_text_documents.txt"
13
  EMBEDDINGS_FILE = "policy_embeddings.npy"
14
  INDEX_FILE = "faiss_index.bin"
15
 
16
- # Load policy text from the file
17
  if os.path.exists(TEXT_FILE):
18
  with open(TEXT_FILE, "r", encoding="utf-8") as f:
19
  POLICY_TEXT = f.read()
@@ -22,7 +22,7 @@ else:
22
  print("โŒ ERROR: combined_text_documents.txt not found! Ensure it's uploaded.")
23
  POLICY_TEXT = ""
24
 
25
- # Split text into chunks
26
  chunk_size = 500
27
  chunks = [POLICY_TEXT[i:i+chunk_size] for i in range(0, len(POLICY_TEXT), chunk_size)] if POLICY_TEXT else []
28
 
@@ -79,22 +79,26 @@ def respond(
79
  if val[1]:
80
  messages.append({"role": "assistant", "content": val[1]})
81
 
82
- # ๐Ÿ”น Retrieve policy info related to the query
83
  policy_context = search_policy(message)
84
 
85
  if policy_context:
86
- # ๐Ÿ”น Force the LLM to use retrieved policy text
87
- system_context = f"""
88
- You are an expert in Colorado public assistance policies.
89
- Below is relevant information retrieved from official policy documents:
 
 
90
 
91
  {policy_context}
92
 
93
- Based on this information, answer the user's question accurately and concisely.
 
94
  """
95
- messages.append({"role": "system", "content": system_context})
96
-
97
- messages.append({"role": "user", "content": message})
 
98
 
99
  response = ""
100
  for message in client.chat_completion(
@@ -108,7 +112,6 @@ def respond(
108
  response += token
109
  yield response
110
 
111
-
112
  # ๐Ÿ”น Gradio Chat Interface
113
  demo = gr.ChatInterface(
114
  respond,
 
6
  from huggingface_hub import InferenceClient
7
 
8
  # Load embedding model
9
+ model = SentenceTransformer('all-MiniLM-L6-v2")
10
 
11
  # File paths
12
  TEXT_FILE = "combined_text_documents.txt"
13
  EMBEDDINGS_FILE = "policy_embeddings.npy"
14
  INDEX_FILE = "faiss_index.bin"
15
 
16
+ # Load policy text from file
17
  if os.path.exists(TEXT_FILE):
18
  with open(TEXT_FILE, "r", encoding="utf-8") as f:
19
  POLICY_TEXT = f.read()
 
22
  print("โŒ ERROR: combined_text_documents.txt not found! Ensure it's uploaded.")
23
  POLICY_TEXT = ""
24
 
25
+ # Split text into chunks for FAISS indexing
26
  chunk_size = 500
27
  chunks = [POLICY_TEXT[i:i+chunk_size] for i in range(0, len(POLICY_TEXT), chunk_size)] if POLICY_TEXT else []
28
 
 
79
  if val[1]:
80
  messages.append({"role": "assistant", "content": val[1]})
81
 
82
+ # ๐Ÿ”น Retrieve relevant policy info from FAISS
83
  policy_context = search_policy(message)
84
 
85
  if policy_context:
86
+ # ๐Ÿ”น Display retrieved context in chat
87
+ messages.append({"role": "assistant", "content": f"๐Ÿ“„ **Relevant Policy Context:**\n\n{policy_context}"})
88
+
89
+ # ๐Ÿ”น Force the LLM to use the retrieved policy text
90
+ user_query_with_context = f"""
91
+ The following is the most relevant policy information retrieved from the official Colorado public assistance policies:
92
 
93
  {policy_context}
94
 
95
+ Based on this information, answer the following question:
96
+ {message}
97
  """
98
+ messages.append({"role": "user", "content": user_query_with_context})
99
+ else:
100
+ # If no relevant policy info is found, use the original message
101
+ messages.append({"role": "user", "content": message})
102
 
103
  response = ""
104
  for message in client.chat_completion(
 
112
  response += token
113
  yield response
114
 
 
115
  # ๐Ÿ”น Gradio Chat Interface
116
  demo = gr.ChatInterface(
117
  respond,