bezaime commited on
Commit
4d09653
·
verified ·
1 Parent(s): e4a1aa1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -32
app.py CHANGED
@@ -4,28 +4,26 @@ df = pd.read_csv('./Automobile_data.csv')
4
  df = df.drop(columns = ['normalized-losses','symboling'], axis = 1)
5
 
6
  context_data = []
7
- for i in range(len(df)): # Loop over rows
8
- context = ""
9
- for j in range(10): # Loop over the first 8 columns
10
- context += df.columns[j] # Add column name
11
- context += ": "
12
- context += str(df.iloc[i][j]) # Convert value to string
13
- context += " "
14
- context_data.append(context)
15
 
16
- from langchain_groq import ChatGroq
17
- llm = ChatGroq(model ="llama-3.1-70b-versatile",api_key = "beza")
18
 
19
- qa_pair = []
20
- for i in range(len(df)):
21
- Question = "Given the type of "+df['make'][i]+ " "+ df['body-style'][i][:-1]+", "" what is the price?"
22
- Answer = df['price'][i]
23
- input = f"Instruction:\n{Question}\n\nResponse:\n{Answer}"
24
- qa_pair.append(input)
 
25
 
26
- # from langchain_groq import ChatGroq
27
 
28
- # llm = ChatGroq(model="llama-3.1-70b-versatile",api_key= "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")
29
  ## Embedding model!
30
  from langchain_huggingface import HuggingFaceEmbeddings
31
  embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
@@ -34,24 +32,19 @@ embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-
34
  from langchain_chroma import Chroma
35
 
36
  vectorstore = Chroma(
37
- collection_name="car_dataset_store",
38
  embedding_function=embed_model,
39
  persist_directory="./",
40
  )
41
 
42
- vectorstore.get().keys()
43
  # add data to vector nstore
44
  vectorstore.add_texts(context_data)
45
 
46
- query = "What is make, number of doors and fuel type?"
47
- docs = vectorstore.similarity_search(query)
48
- print(docs[0].page_content)
49
-
50
  retriever = vectorstore.as_retriever()
51
 
52
  from langchain_core.prompts import PromptTemplate
53
 
54
- template = ("""You are a car expert.
55
  Use the provided context to answer the question.
56
  If you don't know the answer, say so. Explain your answer in detail.
57
  Do not discuss the context in your response; just provide the answer directly.
@@ -74,11 +67,6 @@ rag_chain = (
74
  | StrOutputParser()
75
  )
76
 
77
- from IPython.display import display, Markdown
78
-
79
- response = rag_chain.invoke("What is Capital of Rwanda?")
80
- Markdown(response)
81
-
82
  import gradio as gr
83
 
84
  def rag_memory_stream(text):
@@ -87,17 +75,21 @@ def rag_memory_stream(text):
87
  partial_text += new_text
88
  yield partial_text
89
 
 
 
 
 
90
 
91
- title = "Real-time AI App with Groq API and LangChain to Answer car questions"
92
  demo = gr.Interface(
93
  title=title,
94
  fn=rag_memory_stream,
95
  inputs="text",
96
  outputs="text",
 
97
  allow_flagging="never",
98
  )
99
 
100
- demo.launch(share=True)
101
 
102
  if __name__ == "__main__":
103
  demo.launch()
 
4
  df = df.drop(columns = ['normalized-losses','symboling'], axis = 1)
5
 
6
  context_data = []
7
+ for i in range(len(df)):
8
+ context = ""
9
+ for j in range(3):
10
+ context += df.columns[j]
11
+ context += ": "
12
+ context += df.iloc[i][j]
13
+ context += " "
14
+ context_data.append(context)
15
 
 
 
16
 
17
+ import os
18
+
19
+ # Get the secret key from the environment
20
+ groq_key = os.environ.get('groq_api_keys')
21
+
22
+ ## LLM used for RAG
23
+ from langchain_groq import ChatGroq
24
 
25
+ llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key)
26
 
 
27
  ## Embedding model!
28
  from langchain_huggingface import HuggingFaceEmbeddings
29
  embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
 
32
  from langchain_chroma import Chroma
33
 
34
  vectorstore = Chroma(
35
+ collection_name="medical_dataset_store",
36
  embedding_function=embed_model,
37
  persist_directory="./",
38
  )
39
 
 
40
  # add data to vector nstore
41
  vectorstore.add_texts(context_data)
42
 
 
 
 
 
43
  retriever = vectorstore.as_retriever()
44
 
45
  from langchain_core.prompts import PromptTemplate
46
 
47
+ template = ("""You are a medical expert.
48
  Use the provided context to answer the question.
49
  If you don't know the answer, say so. Explain your answer in detail.
50
  Do not discuss the context in your response; just provide the answer directly.
 
67
  | StrOutputParser()
68
  )
69
 
 
 
 
 
 
70
  import gradio as gr
71
 
72
  def rag_memory_stream(text):
 
75
  partial_text += new_text
76
  yield partial_text
77
 
78
+ examples = ['I feel dizzy', 'what is the possible sickness for fatigue']
79
+
80
+
81
+
82
 
83
+ title = "Real-time AI App with Groq API and LangChain to Answer medical questions"
84
  demo = gr.Interface(
85
  title=title,
86
  fn=rag_memory_stream,
87
  inputs="text",
88
  outputs="text",
89
+ examples=examples,
90
  allow_flagging="never",
91
  )
92
 
 
93
 
94
  if __name__ == "__main__":
95
  demo.launch()