File size: 1,374 Bytes
a76862a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from langchain import OpenAI, SQLDatabase, SQLDatabaseChain
from langchain.llms import OpenAI
from api_key import open_ai_key
from speech_to_text import transcribe

llm = OpenAI(temperature=0, openai_api_key='open_ai_key')


#Not sure how the data will be stored, but my idea is that when a question or prompt is asked the audio file will be stored as text which then be fed into the llm 
#to then query the database and return the answer. 

#estbalish the question to be asked
question = transcribe

# #I feel like I need another step here so that the model takes the question, goes to the db and knows that it needs to look for the answer to the question
# # I am wondering if I need to setup an extraction algorithm here, but then how do I link the extraction algorithm to the database?
# #Creating link to db
# # I am also wondering if there should be an api for the model to call in order to access the database? Thinking that might be more better?
def database(transcribe):
    sqlite_db_path = 'sqlite:///database.db'
    db = SQLDatabase.from_uri(f'sqlite:///{sqlite_db_path}')

    db_chain = SQLDatabaseChain(llm-llm, database=db)

    db_results = db_chain.run(transcribe)
    return db_results
#After retrieving the data from the database, have llm summarize the data and return the answer to the question

if __name__ == '__main__':
    database(transcribe)