rajsecrets0 commited on
Commit
040f3b6
·
verified ·
1 Parent(s): 5897be4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -17
app.py CHANGED
@@ -1,15 +1,23 @@
 
1
  import streamlit as st
2
  import torch
3
  from transformers import BitsAndBytesConfig
4
 
5
- # Import llama-index and langchain modules
6
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, PromptTemplate
7
  from llama_index.llms.huggingface import HuggingFaceLLM
8
- from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
9
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
10
  from langchain.embeddings import HuggingFaceEmbeddings
11
  from llama_index.embeddings.langchain import LangchainEmbedding
12
 
 
 
 
 
 
 
 
 
13
  # ---------------------------
14
  # Configure your LLM and embeddings
15
  # ---------------------------
@@ -25,7 +33,7 @@ quantization_config = BitsAndBytesConfig(
25
  bnb_4bit_compute_dtype=torch.float16
26
  )
27
 
28
- # Initialize the HuggingFaceLLM with your model settings
29
  llm = HuggingFaceLLM(
30
  context_window=4096,
31
  max_new_tokens=256,
@@ -37,7 +45,8 @@ llm = HuggingFaceLLM(
37
  device_map="auto",
38
  model_kwargs={
39
  "torch_dtype": torch.float16,
40
- "quantization_config": quantization_config
 
41
  }
42
  )
43
 
@@ -53,8 +62,8 @@ Settings.chunk_size = 1024
53
  # ---------------------------
54
  # Load documents from repository
55
  # ---------------------------
56
- # The "data" folder should be part of your repository with your documents.
57
- DATA_DIR = "data" # Ensure this folder exists and contains your documents.
58
  try:
59
  documents = SimpleDirectoryReader(DATA_DIR).load_data()
60
  except Exception as e:
@@ -63,20 +72,21 @@ except Exception as e:
63
 
64
  if not documents:
65
  st.warning("No documents found in the data folder. Please add your documents and redeploy.")
 
66
  else:
67
- # Create the vector store index
68
  index = VectorStoreIndex.from_documents(documents)
69
  query_engine = index.as_query_engine()
70
 
71
- # ---------------------------
72
- # Streamlit Interface
73
- # ---------------------------
74
- st.title("LLama Index Q&A Assistant")
75
 
76
- user_query = st.text_input("Enter your question:")
77
 
78
- if user_query:
79
- with st.spinner("Querying..."):
80
- response = query_engine.query(user_query)
81
- st.markdown("### Response:")
82
- st.write(response)
 
1
+ import os
2
  import streamlit as st
3
  import torch
4
  from transformers import BitsAndBytesConfig
5
 
6
+ # Import necessary modules from llama-index and langchain
7
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, PromptTemplate
8
  from llama_index.llms.huggingface import HuggingFaceLLM
 
9
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
10
  from langchain.embeddings import HuggingFaceEmbeddings
11
  from llama_index.embeddings.langchain import LangchainEmbedding
12
 
13
+ # ---------------------------
14
+ # Retrieve Hugging Face Token from Environment Variables
15
+ # ---------------------------
16
+ hf_token = os.getenv("HF_TOKEN")
17
+ if hf_token is None:
18
+ st.error("Missing Hugging Face token. Please set HF_TOKEN in your Space secrets.")
19
+ st.stop()
20
+
21
  # ---------------------------
22
  # Configure your LLM and embeddings
23
  # ---------------------------
 
33
  bnb_4bit_compute_dtype=torch.float16
34
  )
35
 
36
+ # Initialize the HuggingFaceLLM with your model settings and authentication token
37
  llm = HuggingFaceLLM(
38
  context_window=4096,
39
  max_new_tokens=256,
 
45
  device_map="auto",
46
  model_kwargs={
47
  "torch_dtype": torch.float16,
48
+ "quantization_config": quantization_config,
49
+ "use_auth_token": hf_token # Pass the HF token for gated access
50
  }
51
  )
52
 
 
62
  # ---------------------------
63
  # Load documents from repository
64
  # ---------------------------
65
+ DATA_DIR = "data" # Ensure this folder exists in your repository and contains your documents
66
+
67
  try:
68
  documents = SimpleDirectoryReader(DATA_DIR).load_data()
69
  except Exception as e:
 
72
 
73
  if not documents:
74
  st.warning("No documents found in the data folder. Please add your documents and redeploy.")
75
+ st.stop()
76
  else:
77
+ # Create the vector store index and query engine
78
  index = VectorStoreIndex.from_documents(documents)
79
  query_engine = index.as_query_engine()
80
 
81
+ # ---------------------------
82
+ # Streamlit Interface
83
+ # ---------------------------
84
+ st.title("LLama Index Q&A Assistant")
85
 
86
+ user_query = st.text_input("Enter your question:")
87
 
88
+ if user_query:
89
+ with st.spinner("Querying..."):
90
+ response = query_engine.query(user_query)
91
+ st.markdown("### Response:")
92
+ st.write(response)