Pijush2023 commited on
Commit
a167bcb
·
verified ·
1 Parent(s): 612163b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -2
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import requests
3
  import os
@@ -43,10 +44,12 @@ from langchain_core.messages import AIMessage, HumanMessage
43
  from langchain_core.output_parsers import StrOutputParser
44
  from langchain_core.runnables import RunnableBranch, RunnableLambda, RunnableParallel, RunnablePassthrough
45
 
46
- # Set environment variables for CUDA
47
  os.environ['PYTORCH_USE_CUDA_DSA'] = '1'
48
  os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
49
 
 
 
50
  hf_token = os.getenv("HF_TOKEN")
51
  if hf_token is None:
52
  print("Please set your Hugging Face token in the environment variables.")
@@ -55,6 +58,8 @@ else:
55
 
56
  logging.basicConfig(level=logging.DEBUG)
57
 
 
 
58
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
59
 
60
  # Pinecone setup
@@ -115,6 +120,8 @@ In light of this, how can I assist you today? Feel free to ask any questions or
115
  Question: {{question}}
116
  Helpful Answer:"""
117
 
 
 
118
  QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
119
  QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
120
 
@@ -125,6 +132,25 @@ graph = Neo4jGraph(
125
  password="B_sZbapCTZoQDWj1JrhwqElsNa-jm5Zq1m_mAnyPYog"
126
  )
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  class Entities(BaseModel):
129
  names: list[str] = Field(..., description="All the person, organization, or business entities that appear in the text")
130
 
@@ -135,12 +161,17 @@ entity_prompt = ChatPromptTemplate.from_messages([
135
 
136
  entity_chain = entity_prompt | chat_model.with_structured_output(Entities)
137
 
 
 
 
138
  def remove_lucene_chars(input: str) -> str:
139
  return input.translate(str.maketrans({"\\": r"\\", "+": r"\+", "-": r"\-", "&": r"\&", "|": r"\|", "!": r"\!",
140
  "(": r"\(", ")": r"\)", "{": r"\{", "}": r"\}", "[": r"\[", "]": r"\]",
141
  "^": r"\^", "~": r"\~", "*": r"\*", "?": r"\?", ":": r"\:", '"': r'\"',
142
  ";": r"\;", " ": r"\ "}))
143
 
 
 
144
  def generate_full_text_query(input: str) -> str:
145
  full_text_query = ""
146
  words = [el for el in remove_lucene_chars(input).split() if el]
@@ -149,6 +180,8 @@ def generate_full_text_query(input: str) -> str:
149
  full_text_query += f" {words[-1]}~2"
150
  return full_text_query.strip()
151
 
 
 
152
  def structured_retriever(question: str) -> str:
153
  result = ""
154
  entities = entity_chain.invoke({"question": question})
@@ -215,11 +248,12 @@ _search_query = RunnableBranch(
215
  # Answer:"""
216
 
217
 
218
- template = f"""As an expert concierge known for being helpful and a renowned guide for Birmingham, Alabama, I assist visitors in discovering the best that the city has to offer. Given today's sunny and bright weather on {current_date}, I am well-equipped to provide valuable insights and recommendations without revealing specific locations. I draw upon my extensive knowledge of the area, including perennial events and historical context.
219
  In light of this, how can I assist you today? Feel free to ask any questions or seek recommendations for your day in Birmingham. If there's anything specific you'd like to know or experience, please share, and I'll be glad to help. Remember, keep the question concise for a quick and accurate response.
220
  "It was my pleasure!"
221
  {{context}}
222
  Question: {{question}}
 
223
  Helpful Answer:"""
224
 
225
 
@@ -827,6 +861,13 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
827
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
828
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
829
 
 
 
 
 
 
 
 
830
  with gr.Column():
831
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
832
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
 
1
+ #Main code header Library
2
  import gradio as gr
3
  import requests
4
  import os
 
44
  from langchain_core.output_parsers import StrOutputParser
45
  from langchain_core.runnables import RunnableBranch, RunnableLambda, RunnableParallel, RunnablePassthrough
46
 
47
+ # Set environment variables for Torch- CUDA
48
  os.environ['PYTORCH_USE_CUDA_DSA'] = '1'
49
  os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
50
 
51
+ #Hugging face token Initilization
52
+
53
  hf_token = os.getenv("HF_TOKEN")
54
  if hf_token is None:
55
  print("Please set your Hugging Face token in the environment variables.")
 
58
 
59
  logging.basicConfig(level=logging.DEBUG)
60
 
61
+ #Embedding the vector with openai
62
+
63
  embeddings = OpenAIEmbeddings(api_key=os.environ['OPENAI_API_KEY'])
64
 
65
  # Pinecone setup
 
120
  Question: {{question}}
121
  Helpful Answer:"""
122
 
123
+ #QA_Chain_templates
124
+
125
  QA_CHAIN_PROMPT_1 = PromptTemplate(input_variables=["context", "question"], template=template1)
126
  QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], template=template2)
127
 
 
132
  password="B_sZbapCTZoQDWj1JrhwqElsNa-jm5Zq1m_mAnyPYog"
133
  )
134
 
135
+ # Avoid pushing the graph documents to Neo4j every time
136
+
137
+ # Only push the documents once and comment the code below after the initial push
138
+ # dataset_name = "Pijush2023/birmindata07312024"
139
+ # page_content_column = 'events_description'
140
+ # loader = HuggingFaceDatasetLoader(dataset_name, page_content_column)
141
+ # data = loader.load()
142
+
143
+ # text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50)
144
+ # documents = text_splitter.split_documents(data)
145
+
146
+ # llm_transformer = LLMGraphTransformer(llm=chat_model)
147
+ # graph_documents = llm_transformer.convert_to_graph_documents(documents)
148
+ # graph.add_graph_documents(graph_documents, baseEntityLabel=True, include_source=True)
149
+
150
+
151
+ #Neo4j Setup
152
+
153
+
154
  class Entities(BaseModel):
155
  names: list[str] = Field(..., description="All the person, organization, or business entities that appear in the text")
156
 
 
161
 
162
  entity_chain = entity_prompt | chat_model.with_structured_output(Entities)
163
 
164
+
165
+ #Remove Lucene Characther
166
+
167
  def remove_lucene_chars(input: str) -> str:
168
  return input.translate(str.maketrans({"\\": r"\\", "+": r"\+", "-": r"\-", "&": r"\&", "|": r"\|", "!": r"\!",
169
  "(": r"\(", ")": r"\)", "{": r"\{", "}": r"\}", "[": r"\[", "]": r"\]",
170
  "^": r"\^", "~": r"\~", "*": r"\*", "?": r"\?", ":": r"\:", '"': r'\"',
171
  ";": r"\;", " ": r"\ "}))
172
 
173
+ #Full Text query Generator
174
+
175
  def generate_full_text_query(input: str) -> str:
176
  full_text_query = ""
177
  words = [el for el in remove_lucene_chars(input).split() if el]
 
180
  full_text_query += f" {words[-1]}~2"
181
  return full_text_query.strip()
182
 
183
+ # Neo4j Retrieval connection
184
+
185
  def structured_retriever(question: str) -> str:
186
  result = ""
187
  entities = entity_chain.invoke({"question": question})
 
248
  # Answer:"""
249
 
250
 
251
+ template = f"""As an expert concierge known for being helpful and a renowned guide for Birmingham, Alabama, I assist visitors in discovering the best that the city has to offer. Given today's sunny and bright weather on {current_date}, I am well-equipped to provide valuable insights and recommendations with specific locations. I draw upon my extensive knowledge of the area, including perennial events and historical context.
252
  In light of this, how can I assist you today? Feel free to ask any questions or seek recommendations for your day in Birmingham. If there's anything specific you'd like to know or experience, please share, and I'll be glad to help. Remember, keep the question concise for a quick and accurate response.
253
  "It was my pleasure!"
254
  {{context}}
255
  Question: {{question}}
256
+ Use natural language and be concise
257
  Helpful Answer:"""
258
 
259
 
 
861
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
862
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
863
 
864
+ #Api Integration to gradio call function
865
+
866
+ # with gr.Column():
867
+ # weather_output = gr.HTML(value=fetch_local_weather())
868
+ # news_output = gr.HTML(value=fetch_local_news())
869
+ # events_output = gr.HTML(value=fetch_local_events())
870
+
871
  with gr.Column():
872
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
873
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)