Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
# app.py
|
|
|
2 |
import os
|
3 |
from pathlib import Path
|
4 |
|
@@ -6,10 +7,10 @@ import gradio as gr
|
|
6 |
from PIL import Image
|
7 |
from huggingface_hub import InferenceClient
|
8 |
|
9 |
-
# β
|
10 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
11 |
from langchain_community.vectorstores import FAISS
|
12 |
-
from langchain_community.llms import
|
13 |
|
14 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
15 |
from langchain.chains import RetrievalQA
|
@@ -24,13 +25,17 @@ FIG_DIR = Path("figures")
|
|
24 |
PDF_DIR.mkdir(exist_ok=True)
|
25 |
FIG_DIR.mkdir(exist_ok=True)
|
26 |
|
|
|
|
|
|
|
27 |
# βββββ Embeddings & LLM Setup βββββ
|
28 |
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
29 |
|
30 |
-
# LLM via
|
31 |
-
llm =
|
32 |
-
|
33 |
-
|
|
|
34 |
)
|
35 |
|
36 |
# Prompt
|
@@ -43,9 +48,9 @@ Answer (up to 3 sentences):
|
|
43 |
prompt = PromptTemplate(template=TEMPLATE, input_variables=["context", "question"])
|
44 |
|
45 |
# Inference client for image captioning
|
46 |
-
vision_client = InferenceClient("Salesforce/blip-image-captioning-base")
|
47 |
|
48 |
-
# Globals (will
|
49 |
vector_store = None
|
50 |
qa_chain = None
|
51 |
|
|
|
1 |
# app.py
|
2 |
+
|
3 |
import os
|
4 |
from pathlib import Path
|
5 |
|
|
|
7 |
from PIL import Image
|
8 |
from huggingface_hub import InferenceClient
|
9 |
|
10 |
+
# β
Community imports to avoid deprecation
|
11 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
12 |
from langchain_community.vectorstores import FAISS
|
13 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
14 |
|
15 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
16 |
from langchain.chains import RetrievalQA
|
|
|
25 |
PDF_DIR.mkdir(exist_ok=True)
|
26 |
FIG_DIR.mkdir(exist_ok=True)
|
27 |
|
28 |
+
# βββββ Read your HF_TOKEN secret βββββ
|
29 |
+
hf_token = os.environ["HF_TOKEN"]
|
30 |
+
|
31 |
# βββββ Embeddings & LLM Setup βββββ
|
32 |
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
33 |
|
34 |
+
# LLM via HF Inference API endpoint
|
35 |
+
llm = HuggingFaceEndpoint(
|
36 |
+
endpoint_url="https://api-inference.huggingface.co/models/google/flan-t5-base",
|
37 |
+
huggingfacehub_api_token=hf_token,
|
38 |
+
model_kwargs={"temperature": 0.5, "max_length": 512},
|
39 |
)
|
40 |
|
41 |
# Prompt
|
|
|
48 |
prompt = PromptTemplate(template=TEMPLATE, input_variables=["context", "question"])
|
49 |
|
50 |
# Inference client for image captioning
|
51 |
+
vision_client = InferenceClient("Salesforce/blip-image-captioning-base", token=hf_token)
|
52 |
|
53 |
+
# Globals (will initialize after processing)
|
54 |
vector_store = None
|
55 |
qa_chain = None
|
56 |
|