sabazo commited on
Commit
f71d212
·
unverified ·
2 Parent(s): 7a29166 f4bba44

Merge pull request #7 from almutareb/intergrate-database

Browse files
app_gui.py CHANGED
@@ -1,7 +1,9 @@
1
  # Import Gradio for UI, along with other necessary libraries
2
  import gradio as gr
3
  from rag_app.loading_data.load_S3_vector_stores import get_chroma_vs
 
4
  from rag_app.agents.react_agent import agent_executor
 
5
 
6
  get_chroma_vs()
7
 
 
1
  # Import Gradio for UI, along with other necessary libraries
2
  import gradio as gr
3
  from rag_app.loading_data.load_S3_vector_stores import get_chroma_vs
4
+ from rag_app.loading_data.load_S3_vector_stores import get_chroma_vs
5
  from rag_app.agents.react_agent import agent_executor
6
+ from config import db
7
 
8
  get_chroma_vs()
9
 
config.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from rag_app.database.db_handler import DataBaseHandler
4
+
5
+ load_dotenv()
6
+
7
+ SQLITE_FILE_NAME = os.getenv('SOURCES_CACHE')
8
+ PERSIST_DIRECTORY = os.getenv('VECTOR_DATABASE_LOCATION')
9
+ EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL")
10
+
11
+
12
+ db = DataBaseHandler()
13
+
14
+ db.create_all_tables()
15
+
rag_app/database/__init__.py CHANGED
@@ -0,0 +1 @@
 
 
1
+ from rag_app.database.db_handler import DataBaseHandler
rag_app/database/db_handler.py CHANGED
@@ -3,112 +3,222 @@ from rag_app.database.schema import Sources
3
  from rag_app.utils.logger import get_console_logger
4
  import os
5
  from dotenv import load_dotenv
6
-
7
- load_dotenv()
8
-
9
- sqlite_file_name = os.getenv('SOURCES_CACHE')
10
-
11
- sqlite_url = f"sqlite:///{sqlite_file_name}"
12
- engine = create_engine(sqlite_url, echo=False)
13
-
14
- logger = get_console_logger("db_handler")
15
-
16
- SQLModel.metadata.create_all(engine)
17
-
18
-
19
- def read_one(hash_id: dict):
20
- with Session(engine) as session:
21
- statement = select(Sources).where(Sources.hash_id == hash_id)
22
- sources = session.exec(statement).first()
23
- return sources
24
-
25
-
26
- def add_one(data: dict):
27
- with Session(engine) as session:
28
- if session.exec(
29
- select(Sources).where(Sources.hash_id == data.get("hash_id"))
30
- ).first():
31
- logger.warning(f"Item with hash_id {data.get('hash_id')} already exists")
32
- return None # or raise an exception, or handle as needed
33
- sources = Sources(**data)
34
- session.add(sources)
35
- session.commit()
36
- session.refresh(sources)
37
- logger.info(f"Item with hash_id {data.get('hash_id')} added to the database")
38
- return sources
39
-
40
-
41
- def update_one(hash_id: dict, data: dict):
42
- with Session(engine) as session:
43
- # Check if the item with the given hash_id exists
44
- sources = session.exec(
45
- select(Sources).where(Sources.hash_id == hash_id)
46
- ).first()
47
- if not sources:
48
- logger.warning(f"No item with hash_id {hash_id} found for update")
49
- return None # or raise an exception, or handle as needed
50
- for key, value in data.items():
51
- setattr(sources, key, value)
52
- session.commit()
53
- logger.info(f"Item with hash_id {hash_id} updated in the database")
54
- return sources
55
-
56
-
57
- def delete_one(id: int):
58
- with Session(engine) as session:
59
- # Check if the item with the given hash_id exists
60
- sources = session.exec(
61
- select(Sources).where(Sources.hash_id == id)
62
- ).first()
63
- if not sources:
64
- logger.warning(f"No item with hash_id {id} found for deletion")
65
- return None # or raise an exception, or handle as needed
66
- session.delete(sources)
67
- session.commit()
68
- logger.info(f"Item with hash_id {id} deleted from the database")
69
-
70
-
71
- def add_many(data: list):
72
- with Session(engine) as session:
73
- for info in data:
74
- # Reuse add_one function for each item
75
- result = add_one(info)
76
- if result is None:
77
- logger.warning(
78
- f"Item with hash_id {info.get('hash_id')} could not be added"
79
- )
80
- else:
81
- logger.info(
82
- f"Item with hash_id {info.get('hash_id')} added to the database"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  )
84
- session.commit() # Commit at the end of the loop
85
-
86
-
87
- def delete_many(ids: list):
88
- with Session(engine) as session:
89
- for id in ids:
90
- # Reuse delete_one function for each item
91
- result = delete_one(id)
92
- if result is None:
93
- logger.warning(f"No item with hash_id {id} found for deletion")
94
- else:
95
- logger.info(f"Item with hash_id {id} deleted from the database")
96
- session.commit() # Commit at the end of the loop
97
-
98
-
99
- def read_all(query: dict = None):
100
- with Session(engine) as session:
101
- statement = select(Sources)
102
- if query:
103
- statement = statement.where(
104
- *[getattr(Sources, key) == value for key, value in query.items()]
105
- )
106
- sources = session.exec(statement).all()
107
- return sources
108
-
109
-
110
- def delete_all():
111
- with Session(engine) as session:
112
- session.exec(Sources).delete()
113
- session.commit()
114
- logger.info("All items deleted from the database")
 
3
  from rag_app.utils.logger import get_console_logger
4
  import os
5
  from dotenv import load_dotenv
6
+ import uuid
7
+ from datetime import datetime
8
+
9
+
10
+ class DataBaseHandler():
11
+ """
12
+ A class for managing the database.
13
+
14
+ Attributes:
15
+ sqlite_file_name (str): The SQLite file name for the database.
16
+ logger (Logger): The logger for logging database operations.
17
+ engine (Engine): The SQLAlchemy engine for the database.
18
+
19
+ Methods:
20
+ create_all_tables: Create all tables in the database.
21
+ read_one: Read a single entry from the database by its hash_id.
22
+ add_one: Add a single entry to the database.
23
+ update_one: Update a single entry in the database by its hash_id.
24
+ delete_one: Delete a single entry from the database by its id.
25
+ add_many: Add multiple entries to the database.
26
+ delete_many: Delete multiple entries from the database by their ids.
27
+ read_all: Read all entries from the database, optionally filtered by a query.
28
+ delete_all: Delete all entries from the database.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ sqlite_file_name = os.getenv('SOURCES_CACHE'),
34
+ logger = get_console_logger("db_handler"),
35
+ # *args,
36
+ # **kwargs,
37
+ ):
38
+ self.sqlite_file_name = sqlite_file_name
39
+ self.logger = logger
40
+
41
+ sqlite_url = f"sqlite:///{self.sqlite_file_name}"
42
+ self.engine = create_engine(sqlite_url, echo=False)
43
+
44
+
45
+ self.session_id = str(uuid.uuid4())
46
+ self.session_date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
47
+
48
+ def create_all_tables(self) -> None:
49
+ SQLModel.metadata.create_all(self.engine)
50
+
51
+ def create_new_session(self) -> None:
52
+ """creates a new session_id and date time
53
+
54
+ """
55
+ self.session_id = str(uuid.uuid4())
56
+ self.session_date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
57
+
58
+
59
+ def read_one(self,hash_id: dict):
60
+ """
61
+ Read a single entry from the database by its hash_id.
62
+
63
+ Args:
64
+ hash_id (dict): Dictionary containing the hash_id to search for.
65
+
66
+ Returns:
67
+ Sources: The matching entry from the database, or None if no match is found.
68
+ """
69
+ with Session(self.engine) as session:
70
+ statement = select(Sources).where(Sources.hash_id == hash_id)
71
+ sources = session.exec(statement).first()
72
+ return sources
73
+
74
+
75
+ def add_one(self,data: dict):
76
+ """
77
+ Add a single entry to the database.
78
+
79
+ Args:
80
+ data (dict): Dictionary containing the data for the new entry.
81
+
82
+ Returns:
83
+ Sources: The added entry, or None if the entry already exists.
84
+ """
85
+ with Session(self.engine) as session:
86
+ if session.exec(
87
+ select(Sources).where(Sources.hash_id == data.get("hash_id"))
88
+ ).first():
89
+ self.logger.warning(f"Item with hash_id {data.get('hash_id')} already exists")
90
+ return None # or raise an exception, or handle as needed
91
+ sources = Sources(**data)
92
+ session.add(sources)
93
+ session.commit()
94
+ session.refresh(sources)
95
+ self.logger.info(f"Item with hash_id {data.get('hash_id')} added to the database")
96
+ return sources
97
+
98
+
99
+ def update_one(self,hash_id: dict, data: dict):
100
+ """
101
+ Update a single entry in the database by its hash_id.
102
+
103
+ Args:
104
+ hash_id (dict): Dictionary containing the hash_id to search for.
105
+ data (dict): Dictionary containing the updated data for the entry.
106
+
107
+ Returns:
108
+ Sources: The updated entry, or None if no match is found.
109
+ """
110
+ with Session(self.engine) as session:
111
+ # Check if the item with the given hash_id exists
112
+ sources = session.exec(
113
+ select(Sources).where(Sources.hash_id == hash_id)
114
+ ).first()
115
+ if not sources:
116
+ self.logger.warning(f"No item with hash_id {hash_id} found for update")
117
+ return None # or raise an exception, or handle as needed
118
+ for key, value in data.items():
119
+ setattr(sources, key, value)
120
+ session.commit()
121
+ self.logger.info(f"Item with hash_id {hash_id} updated in the database")
122
+ return sources
123
+
124
+
125
+ def delete_one(self,id: int):
126
+ """
127
+ Delete a single entry from the database by its id.
128
+
129
+ Args:
130
+ id (int): The id of the entry to delete.
131
+
132
+ Returns:
133
+ None
134
+ """
135
+ with Session(self.engine) as session:
136
+ # Check if the item with the given hash_id exists
137
+ sources = session.exec(
138
+ select(Sources).where(Sources.hash_id == id)
139
+ ).first()
140
+ if not sources:
141
+ self.logger.warning(f"No item with hash_id {id} found for deletion")
142
+ return None # or raise an exception, or handle as needed
143
+ session.delete(sources)
144
+ session.commit()
145
+ self.logger.info(f"Item with hash_id {id} deleted from the database")
146
+
147
+
148
+ def add_many(self,data: list):
149
+ """
150
+ Add multiple entries to the database.
151
+
152
+ Args:
153
+ data (list): List of dictionaries, each containing the data for a new entry.
154
+
155
+ Returns:
156
+ None
157
+ """
158
+ with Session(self.engine) as session:
159
+ for info in data:
160
+ # Reuse add_one function for each item
161
+ result = self.add_one(info)
162
+ if result is None:
163
+ self.logger.warning(
164
+ f"Item with hash_id {info.get('hash_id')} could not be added"
165
+ )
166
+ else:
167
+ self.logger.info(
168
+ f"Item with hash_id {info.get('hash_id')} added to the database"
169
+ )
170
+ session.commit() # Commit at the end of the loop
171
+
172
+
173
+ def delete_many(self,ids: list):
174
+ """
175
+ Delete multiple entries from the database by their ids.
176
+
177
+ Args:
178
+ ids (list): List of ids of the entries to delete.
179
+
180
+ Returns:
181
+ None
182
+ """
183
+ with Session(self.engine) as session:
184
+ for id in ids:
185
+ # Reuse delete_one function for each item
186
+ result = self.delete_one(id)
187
+ if result is None:
188
+ self.logger.warning(f"No item with hash_id {id} found for deletion")
189
+ else:
190
+ self.logger.info(f"Item with hash_id {id} deleted from the database")
191
+ session.commit() # Commit at the end of the loop
192
+
193
+
194
+ def read_all(self,query: dict = None):
195
+ """
196
+ Read all entries from the database, optionally filtered by a query.
197
+
198
+ Args:
199
+ query (dict, optional): Dictionary containing the query parameters. Defaults to None.
200
+
201
+ Returns:
202
+ list: List of matching entries from the database.
203
+ """
204
+ with Session(self.engine) as session:
205
+ statement = select(Sources)
206
+ if query:
207
+ statement = statement.where(
208
+ *[getattr(Sources, key) == value for key, value in query.items()]
209
  )
210
+ sources = session.exec(statement).all()
211
+ return sources
212
+
213
+
214
+ def delete_all(self,):
215
+ """
216
+ Delete all entries from the database.
217
+
218
+ Returns:
219
+ None
220
+ """
221
+ with Session(self.engine) as session:
222
+ session.exec(Sources).delete()
223
+ session.commit()
224
+ self.logger.info("All items deleted from the database")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rag_app/database/schema.py CHANGED
@@ -1,9 +1,22 @@
1
  from sqlmodel import SQLModel, Field
2
  from typing import Optional
3
-
4
  import datetime
5
 
6
  class Sources(SQLModel, table=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  id: Optional[int] = Field(default=None, primary_key=True)
8
  url: str = Field()
9
  title: Optional[str] = Field(default="NA", unique=False)
@@ -11,5 +24,7 @@ class Sources(SQLModel, table=True):
11
  created_at: float = Field(default=datetime.datetime.now().timestamp())
12
  summary: str = Field(default="")
13
  embedded: bool = Field(default=False)
 
 
14
 
15
- __table_args__ = {"extend_existing": True}
 
1
  from sqlmodel import SQLModel, Field
2
  from typing import Optional
 
3
  import datetime
4
 
5
  class Sources(SQLModel, table=True):
6
+ """
7
+ Database schema for the Sources table.
8
+
9
+ Attributes:
10
+ id (Optional[int]): The primary key for the table.
11
+ url (str): The URL of the source.
12
+ title (Optional[str]): The title of the source.
13
+ hash_id (str): A unique identifier for the source.
14
+ created_at (float): Timestamp indicating when the entry was created.
15
+ summary (str): A summary of the source content.
16
+ embedded (bool): Flag indicating whether the source is embedded.
17
+ session_id (str): A unique identifier for the session when the entry was added.
18
+ session_date_time (str): The timestamp when the session was created.
19
+ """
20
  id: Optional[int] = Field(default=None, primary_key=True)
21
  url: str = Field()
22
  title: Optional[str] = Field(default="NA", unique=False)
 
24
  created_at: float = Field(default=datetime.datetime.now().timestamp())
25
  summary: str = Field(default="")
26
  embedded: bool = Field(default=False)
27
+ session_id: str = Field(default="")
28
+ session_date_time: str = Field(default="")
29
 
30
+ __table_args__ = {"extend_existing": True}
rag_app/structured_tools/structured_tools.py CHANGED
@@ -1,7 +1,4 @@
1
- from langchain.tools import BaseTool, StructuredTool, tool
2
- from langchain_community.tools import WikipediaQueryRun
3
- from langchain_community.utilities import WikipediaAPIWrapper
4
- #from langchain.tools import Tool
5
  from langchain_google_community import GoogleSearchAPIWrapper
6
  from langchain_community.embeddings.sentence_transformer import (
7
  SentenceTransformerEmbeddings,
@@ -14,16 +11,11 @@ import chromadb
14
  from rag_app.utils.utils import (
15
  parse_list_to_dicts, format_search_results
16
  )
17
- from rag_app.database.db_handler import (
18
- add_many
19
- )
20
-
21
  import os
22
- # from innovation_pathfinder_ai.utils import create_wikipedia_urls_from_text
23
 
24
- persist_directory = os.getenv('VECTOR_DATABASE_LOCATION')
25
- embedding_model = os.getenv("EMBEDDING_MODEL")
26
- if not os.path.exists(persist_directory):
27
  get_chroma_vs()
28
 
29
  @tool
@@ -32,14 +24,14 @@ def memory_search(query:str) -> str:
32
  This is your primary source to start your search with checking what you already have learned from the past, before going online."""
33
  # Since we have more than one collections we should change the name of this tool
34
  client = chromadb.PersistentClient(
35
- path=persist_directory,
36
  )
37
 
38
  collection_name = os.getenv('CONVERSATION_COLLECTION_NAME')
39
  #store using envar
40
 
41
  embedding_function = SentenceTransformerEmbeddings(
42
- model_name=embedding_model,
43
  )
44
 
45
  vector_db = Chroma(
@@ -51,8 +43,14 @@ def memory_search(query:str) -> str:
51
  retriever = vector_db.as_retriever()
52
  docs = retriever.invoke(query)
53
 
 
 
 
 
 
54
  return docs.__str__()
55
 
 
56
  @tool
57
  def knowledgeBase_search(query:str) -> str:
58
  """Suche die interne Datenbank nach passenden Versicherungsprodukten und Informationen zu den Versicherungen"""
@@ -65,7 +63,7 @@ def knowledgeBase_search(query:str) -> str:
65
  #store using envar
66
 
67
  embedding_function = SentenceTransformerEmbeddings(
68
- model_name=embedding_model
69
  )
70
 
71
  # vector_db = Chroma(
@@ -73,16 +71,22 @@ def knowledgeBase_search(query:str) -> str:
73
  # #collection_name=collection_name,
74
  # embedding_function=embedding_function,
75
  # )
76
- vector_db = Chroma(persist_directory=persist_directory, embedding_function=embedding_function)
77
  retriever = vector_db.as_retriever(search_type="mmr", search_kwargs={'k':5, 'fetch_k':10})
78
  # This is deprecated, changed to invoke
79
  # LangChainDeprecationWarning: The method `BaseRetriever.get_relevant_documents` was deprecated in langchain-core 0.1.46 and will be removed in 0.3.0. Use invoke instead.
80
  docs = retriever.invoke(query)
 
 
 
 
 
81
  for doc in docs:
82
  print(doc)
83
 
84
  return docs.__str__()
85
 
 
86
  @tool
87
  def google_search(query: str) -> str:
88
  """Verbessere die Ergebnisse durch eine Suche über die Webseite der Versicherung. Erstelle eine neue Suchanfrage, um die Erfolgschancen zu verbesseren."""
@@ -91,10 +95,15 @@ def google_search(query: str) -> str:
91
  search_results:dict = websearch.results(query, 3)
92
  print(search_results)
93
  if len(search_results)>1:
 
94
  cleaner_sources =format_search_results(search_results)
95
  parsed_csources = parse_list_to_dicts(cleaner_sources)
96
- add_many(parsed_csources)
 
 
 
 
97
  else:
98
  cleaner_sources = search_results
99
 
100
- return cleaner_sources.__str__()
 
1
+ from langchain.tools import tool
 
 
 
2
  from langchain_google_community import GoogleSearchAPIWrapper
3
  from langchain_community.embeddings.sentence_transformer import (
4
  SentenceTransformerEmbeddings,
 
11
  from rag_app.utils.utils import (
12
  parse_list_to_dicts, format_search_results
13
  )
14
+ import chromadb
 
 
 
15
  import os
16
+ from config import db, PERSIST_DIRECTORY, EMBEDDING_MODEL
17
 
18
+ if not os.path.exists(PERSIST_DIRECTORY):
 
 
19
  get_chroma_vs()
20
 
21
  @tool
 
24
  This is your primary source to start your search with checking what you already have learned from the past, before going online."""
25
  # Since we have more than one collections we should change the name of this tool
26
  client = chromadb.PersistentClient(
27
+ path=PERSIST_DIRECTORY,
28
  )
29
 
30
  collection_name = os.getenv('CONVERSATION_COLLECTION_NAME')
31
  #store using envar
32
 
33
  embedding_function = SentenceTransformerEmbeddings(
34
+ model_name=EMBEDDING_MODEL,
35
  )
36
 
37
  vector_db = Chroma(
 
43
  retriever = vector_db.as_retriever()
44
  docs = retriever.invoke(query)
45
 
46
+ # add the session id to each element in `docs`
47
+ [i.update({"session_id":db.session_id}) for i in docs]
48
+ db.add_many(docs)
49
+
50
+
51
  return docs.__str__()
52
 
53
+
54
  @tool
55
  def knowledgeBase_search(query:str) -> str:
56
  """Suche die interne Datenbank nach passenden Versicherungsprodukten und Informationen zu den Versicherungen"""
 
63
  #store using envar
64
 
65
  embedding_function = SentenceTransformerEmbeddings(
66
+ model_name=EMBEDDING_MODEL
67
  )
68
 
69
  # vector_db = Chroma(
 
71
  # #collection_name=collection_name,
72
  # embedding_function=embedding_function,
73
  # )
74
+ vector_db = Chroma(persist_directory=PERSIST_DIRECTORY, embedding_function=embedding_function)
75
  retriever = vector_db.as_retriever(search_type="mmr", search_kwargs={'k':5, 'fetch_k':10})
76
  # This is deprecated, changed to invoke
77
  # LangChainDeprecationWarning: The method `BaseRetriever.get_relevant_documents` was deprecated in langchain-core 0.1.46 and will be removed in 0.3.0. Use invoke instead.
78
  docs = retriever.invoke(query)
79
+
80
+ # add the session id to each element in `docs`
81
+ [i.update({"session_id":db.session_id}) for i in docs]
82
+ db.add_many(docs)
83
+
84
  for doc in docs:
85
  print(doc)
86
 
87
  return docs.__str__()
88
 
89
+
90
  @tool
91
  def google_search(query: str) -> str:
92
  """Verbessere die Ergebnisse durch eine Suche über die Webseite der Versicherung. Erstelle eine neue Suchanfrage, um die Erfolgschancen zu verbesseren."""
 
95
  search_results:dict = websearch.results(query, 3)
96
  print(search_results)
97
  if len(search_results)>1:
98
+ # add session id
99
  cleaner_sources =format_search_results(search_results)
100
  parsed_csources = parse_list_to_dicts(cleaner_sources)
101
+
102
+ # add the session id to each element in `parsed_csources`
103
+ [i.update({"session_id":db.session_id}) for i in parsed_csources]
104
+
105
+ db.add_many(parsed_csources)
106
  else:
107
  cleaner_sources = search_results
108
 
109
+ return cleaner_sources.__str__()
rag_app/utils/utils.py CHANGED
@@ -2,7 +2,8 @@ import hashlib
2
  import datetime
3
  import os
4
  import uuid
5
-
 
6
  # from rag_app.utils import logger
7
 
8
  # logger = logger.get_console_logger("utils")
@@ -112,4 +113,27 @@ def generate_uuid() -> str:
112
  Returns:
113
  str: A UUID string.
114
  """
115
- return str(uuid.uuid4())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import datetime
3
  import os
4
  import uuid
5
+ from typing import Dict
6
+ import re
7
  # from rag_app.utils import logger
8
 
9
  # logger = logger.get_console_logger("utils")
 
113
  Returns:
114
  str: A UUID string.
115
  """
116
+ return str(uuid.uuid4())
117
+
118
+ def extract_responses(text: str) -> Dict[str, str]:
119
+ """
120
+ Extracts the user response and AI response from the provided text.
121
+
122
+ Args:
123
+ text (str): The input text containing user and AI responses.
124
+
125
+ Returns:
126
+ Dict[str, str]: A dictionary with keys 'USER' and 'AI' containing the respective responses.
127
+ """
128
+ user_pattern = re.compile(r'USER: (.*?) \n', re.DOTALL)
129
+ ai_pattern = re.compile(r'AI: (.*?)$', re.DOTALL)
130
+
131
+ user_match = user_pattern.search(text)
132
+ ai_match = ai_pattern.search(text)
133
+
134
+ responses = {
135
+ "USER": user_match.group(1) if user_match else "",
136
+ "AI": ai_match.group(1) if ai_match else ""
137
+ }
138
+
139
+ return responses