bezaime commited on
Commit
07037b8
·
verified ·
1 Parent(s): 6acb117

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -21
app.py CHANGED
@@ -1,25 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "conten
23
 
24
  if __name__ == "__main__":
25
  demo.launch()
 
1
+ # Read the data
2
+ df = pd.read_csv("Automobile_data.csv")
3
+ df = df.drop(columns = ['normalized-losses','symboling'], axis = 1)
4
+
5
+ context_data = []
6
+ for i in range(len(df)): # Loop over rows
7
+ context = ""
8
+ for j in range(10): # Loop over the first 8 columns
9
+ context += df.columns[j] # Add column name
10
+ context += ": "
11
+ context += str(df.iloc[i][j]) # Convert value to string
12
+ context += " "
13
+ context_data.append(context)
14
+
15
+ from langchain_groq import ChatGroq
16
+ llm = ChatGroq(model ="llama-3.1-70b-versatile",api_key = "beza")
17
+
18
+ qa_pair = []
19
+ for i in range(len(df)):
20
+ Question = "Given the type of "+df['make'][i]+ " "+ df['body-style'][i][:-1]+", "" what is the price?"
21
+ Answer = df['price'][i]
22
+ input = f"Instruction:\n{Question}\n\nResponse:\n{Answer}"
23
+ qa_pair.append(input)
24
+
25
+ # from langchain_groq import ChatGroq
26
+
27
+ # llm = ChatGroq(model="llama-3.1-70b-versatile",api_key= "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")
28
+ ## Embedding model!
29
+ from langchain_huggingface import HuggingFaceEmbeddings
30
+ embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
31
+
32
+ # create vector store!
33
+ from langchain_chroma import Chroma
34
+
35
+ vectorstore = Chroma(
36
+ collection_name="car_dataset_store",
37
+ embedding_function=embed_model,
38
+ persist_directory="./",
39
+ )
40
+
41
+ vectorstore.get().keys()
42
+ # add data to vector nstore
43
+ vectorstore.add_texts(context_data)
44
+
45
+ query = "What is make, number of doors and fuel type?"
46
+ docs = vectorstore.similarity_search(query)
47
+ print(docs[0].page_content)
48
+
49
+ retriever = vectorstore.as_retriever()
50
+
51
+ from langchain_core.prompts import PromptTemplate
52
+
53
+ template = ("""You are a car expert.
54
+ Use the provided context to answer the question.
55
+ If you don't know the answer, say so. Explain your answer in detail.
56
+ Do not discuss the context in your response; just provide the answer directly.
57
+
58
+ Context: {context}
59
+
60
+ Question: {question}
61
+
62
+ Answer:""")
63
+
64
+ rag_prompt = PromptTemplate.from_template(template)
65
+
66
+ from langchain_core.output_parsers import StrOutputParser
67
+ from langchain_core.runnables import RunnablePassthrough
68
+
69
+ rag_chain = (
70
+ {"context": retriever, "question": RunnablePassthrough()}
71
+ | rag_prompt
72
+ | llm
73
+ | StrOutputParser()
74
+ )
75
+
76
+ from IPython.display import display, Markdown
77
+
78
+ response = rag_chain.invoke("What is Capital of Rwanda?")
79
+ Markdown(response)
80
+
81
  import gradio as gr
82
+
83
+ def rag_memory_stream(text):
84
+ partial_text = ""
85
+ for new_text in rag_chain.stream(text):
86
+ partial_text += new_text
87
+ yield partial_text
88
+
89
+
90
+ title = "Real-time AI App with Groq API and LangChain to Answer car questions"
91
+ demo = gr.Interface(
92
+ title=title,
93
+ fn=rag_memory_stream,
94
+ inputs="text",
95
+ outputs="text",
96
+ allow_flagging="never",
97
+ )
98
+
99
+ demo.launch(share=True)
 
 
 
100
 
101
  if __name__ == "__main__":
102
  demo.launch()