Spaces:
Runtime error
Runtime error
Commit
·
32f0c73
1
Parent(s):
4ad5775
correct env
Browse files
app.py
CHANGED
|
@@ -244,7 +244,7 @@ class DB_Search2(BaseTool):
|
|
| 244 |
|
| 245 |
def Text2Sound(text):
|
| 246 |
|
| 247 |
-
speech_config = speechsdk.SpeechConfig(subscription=os.
|
| 248 |
audio_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
|
| 249 |
speech_config.speech_synthesis_voice_name='en-US-JennyNeural'
|
| 250 |
# speech_synthesizer = ""
|
|
@@ -380,18 +380,18 @@ python_tool2 = Tool(
|
|
| 380 |
|
| 381 |
|
| 382 |
os.environ["OPENAI_API_TYPE"] = "azure"
|
| 383 |
-
os.environ["OPENAI_API_KEY"] = os.
|
| 384 |
-
os.environ["OPENAI_API_BASE"] = os.
|
| 385 |
-
os.environ["OPENAI_API_VERSION"] = os.
|
| 386 |
# os.environ["OPENAI_API_VERSION"] = "2023-05-15"
|
| 387 |
-
username = os.
|
| 388 |
-
password = os.
|
| 389 |
-
SysLock = os.
|
| 390 |
|
| 391 |
# deployment_name="Chattester"
|
| 392 |
|
| 393 |
chat = AzureChatOpenAI(
|
| 394 |
-
deployment_name=os.
|
| 395 |
temperature=0,
|
| 396 |
)
|
| 397 |
llm = chat
|
|
@@ -416,7 +416,7 @@ embeddings = OpenAIEmbeddings(deployment="model_embedding", chunk_size=15)
|
|
| 416 |
|
| 417 |
|
| 418 |
pinecone.init(
|
| 419 |
-
api_key = os.
|
| 420 |
environment='asia-southeast1-gcp-free',
|
| 421 |
# openapi_config=openapi_config
|
| 422 |
)
|
|
@@ -511,7 +511,7 @@ agent_ZEROSHOT_REACT = initialize_agent(tools2, llm,
|
|
| 511 |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 512 |
verbose = True,
|
| 513 |
handle_parsing_errors = True,
|
| 514 |
-
max_iterations = int(os.
|
| 515 |
early_stopping_method="generate",
|
| 516 |
agent_kwargs={
|
| 517 |
'prefix': PREFIX,
|
|
@@ -540,7 +540,7 @@ agent_ZEROSHOT_AGENT = AgentExecutor.from_agent_and_tools(
|
|
| 540 |
verbose=True,
|
| 541 |
# memory=memory,
|
| 542 |
handle_parsing_errors = True,
|
| 543 |
-
max_iterations = int(os.
|
| 544 |
early_stopping_method="generate",
|
| 545 |
)
|
| 546 |
|
|
@@ -550,7 +550,7 @@ agent_OPENAI_MULTI = AgentExecutor.from_agent_and_tools(
|
|
| 550 |
verbose=True,
|
| 551 |
# memory=memory_openai,
|
| 552 |
handle_parsing_errors = True,
|
| 553 |
-
max_iterations = int(os.
|
| 554 |
early_stopping_method="generate",
|
| 555 |
)
|
| 556 |
|
|
@@ -559,7 +559,7 @@ agent_OPENAI_MULTI = AgentExecutor.from_agent_and_tools(
|
|
| 559 |
# agent.early_stopping_method = "generate"
|
| 560 |
|
| 561 |
global agent
|
| 562 |
-
Choice = os.
|
| 563 |
|
| 564 |
if Choice =='Zero Short Agent':
|
| 565 |
agent = agent_ZEROSHOT_AGENT
|
|
@@ -654,7 +654,7 @@ def func_upload_file(files, chat_history2):
|
|
| 654 |
for file in files:
|
| 655 |
chat_history2 = chat_history2 + [((file.name,), None)]
|
| 656 |
yield chat_history2
|
| 657 |
-
if os.
|
| 658 |
UpdateDb()
|
| 659 |
test_msg = ["Request Upload File into DB", "Operation Finished"]
|
| 660 |
chat_history2.append(test_msg)
|
|
@@ -992,7 +992,7 @@ def QAQuery_p(question: str):
|
|
| 992 |
global vectordb_p
|
| 993 |
# vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
|
| 994 |
retriever = vectordb_p.as_retriever()
|
| 995 |
-
retriever.search_kwargs['k'] = int(os.
|
| 996 |
# retriever.search_kwargs['fetch_k'] = 100
|
| 997 |
|
| 998 |
qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff",
|
|
|
|
| 244 |
|
| 245 |
def Text2Sound(text):
|
| 246 |
|
| 247 |
+
speech_config = speechsdk.SpeechConfig(subscription=os.environ['SPEECH_KEY'], region=os.environ['SPEECH_REGION'])
|
| 248 |
audio_config = speechsdk.audio.AudioOutputConfig(use_default_speaker=True)
|
| 249 |
speech_config.speech_synthesis_voice_name='en-US-JennyNeural'
|
| 250 |
# speech_synthesizer = ""
|
|
|
|
| 380 |
|
| 381 |
|
| 382 |
os.environ["OPENAI_API_TYPE"] = "azure"
|
| 383 |
+
os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY"]
|
| 384 |
+
os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE"]
|
| 385 |
+
os.environ["OPENAI_API_VERSION"] = os.environ["OPENAI_API_VERSION"]
|
| 386 |
# os.environ["OPENAI_API_VERSION"] = "2023-05-15"
|
| 387 |
+
username = os.environ["username1"]
|
| 388 |
+
password = os.environ["password"]
|
| 389 |
+
SysLock = os.environ["SysLock"] # 0=unlock 1=lock
|
| 390 |
|
| 391 |
# deployment_name="Chattester"
|
| 392 |
|
| 393 |
chat = AzureChatOpenAI(
|
| 394 |
+
deployment_name=os.environ["deployment_name"],
|
| 395 |
temperature=0,
|
| 396 |
)
|
| 397 |
llm = chat
|
|
|
|
| 416 |
|
| 417 |
|
| 418 |
pinecone.init(
|
| 419 |
+
api_key = os.environ["pinecone_api_key"],
|
| 420 |
environment='asia-southeast1-gcp-free',
|
| 421 |
# openapi_config=openapi_config
|
| 422 |
)
|
|
|
|
| 511 |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 512 |
verbose = True,
|
| 513 |
handle_parsing_errors = True,
|
| 514 |
+
max_iterations = int(os.environ["max_iterations"]),
|
| 515 |
early_stopping_method="generate",
|
| 516 |
agent_kwargs={
|
| 517 |
'prefix': PREFIX,
|
|
|
|
| 540 |
verbose=True,
|
| 541 |
# memory=memory,
|
| 542 |
handle_parsing_errors = True,
|
| 543 |
+
max_iterations = int(os.environ["max_iterations"]),
|
| 544 |
early_stopping_method="generate",
|
| 545 |
)
|
| 546 |
|
|
|
|
| 550 |
verbose=True,
|
| 551 |
# memory=memory_openai,
|
| 552 |
handle_parsing_errors = True,
|
| 553 |
+
max_iterations = int(os.environ["max_iterations"]),
|
| 554 |
early_stopping_method="generate",
|
| 555 |
)
|
| 556 |
|
|
|
|
| 559 |
# agent.early_stopping_method = "generate"
|
| 560 |
|
| 561 |
global agent
|
| 562 |
+
Choice = os.environ["agent_type"]
|
| 563 |
|
| 564 |
if Choice =='Zero Short Agent':
|
| 565 |
agent = agent_ZEROSHOT_AGENT
|
|
|
|
| 654 |
for file in files:
|
| 655 |
chat_history2 = chat_history2 + [((file.name,), None)]
|
| 656 |
yield chat_history2
|
| 657 |
+
if os.environ["SYS_Upload_Enable"] == "1":
|
| 658 |
UpdateDb()
|
| 659 |
test_msg = ["Request Upload File into DB", "Operation Finished"]
|
| 660 |
chat_history2.append(test_msg)
|
|
|
|
| 992 |
global vectordb_p
|
| 993 |
# vectordb = Chroma(persist_directory='db', embedding_function=embeddings)
|
| 994 |
retriever = vectordb_p.as_retriever()
|
| 995 |
+
retriever.search_kwargs['k'] = int(os.environ["search_kwargs_k"])
|
| 996 |
# retriever.search_kwargs['fetch_k'] = 100
|
| 997 |
|
| 998 |
qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff",
|