Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,11 +7,6 @@ from langchain.embeddings import HuggingFaceEmbeddings
|
|
| 7 |
from langchain.vectorstores import Chroma
|
| 8 |
import os
|
| 9 |
|
| 10 |
-
# Check for GPU availability
|
| 11 |
-
if not torch.cuda.is_available():
|
| 12 |
-
st.error("No GPU available. This application requires a GPU to run.")
|
| 13 |
-
st.stop()
|
| 14 |
-
|
| 15 |
# Initialize session state for storing the vector database and tenant
|
| 16 |
if 'vectordb' not in st.session_state:
|
| 17 |
st.session_state.vectordb = {}
|
|
@@ -88,7 +83,6 @@ def load_model(model_path):
|
|
| 88 |
torch_dtype=torch.float16,
|
| 89 |
low_cpu_mem_usage=True,
|
| 90 |
)
|
| 91 |
-
model.to('cuda') # Move the model to GPU
|
| 92 |
model.eval()
|
| 93 |
return model, tokenizer
|
| 94 |
|
|
@@ -120,7 +114,10 @@ Answer:"""
|
|
| 120 |
|
| 121 |
# Main app logic
|
| 122 |
def main():
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
# Model path input
|
| 126 |
model_path = st.sidebar.text_input("Enter the path to your model:",
|
|
|
|
| 7 |
from langchain.vectorstores import Chroma
|
| 8 |
import os
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
# Initialize session state for storing the vector database and tenant
|
| 11 |
if 'vectordb' not in st.session_state:
|
| 12 |
st.session_state.vectordb = {}
|
|
|
|
| 83 |
torch_dtype=torch.float16,
|
| 84 |
low_cpu_mem_usage=True,
|
| 85 |
)
|
|
|
|
| 86 |
model.eval()
|
| 87 |
return model, tokenizer
|
| 88 |
|
|
|
|
| 114 |
|
| 115 |
# Main app logic
|
| 116 |
def main():
|
| 117 |
+
if torch.cuda.is_available():
|
| 118 |
+
st.sidebar.success("GPU is available!")
|
| 119 |
+
else:
|
| 120 |
+
st.sidebar.warning("GPU is not available. This app may run slowly on CPU.")
|
| 121 |
|
| 122 |
# Model path input
|
| 123 |
model_path = st.sidebar.text_input("Enter the path to your model:",
|