Muzammil6376 commited on
Commit
ecaa05c
Β·
verified Β·
1 Parent(s): 24fdc2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -1,4 +1,5 @@
1
  # app.py
 
2
  import os
3
  from pathlib import Path
4
 
@@ -6,10 +7,10 @@ import gradio as gr
6
  from PIL import Image
7
  from huggingface_hub import InferenceClient
8
 
9
- # βœ… Use community packages to avoid deprecation warnings
10
  from langchain_community.embeddings import HuggingFaceEmbeddings
11
  from langchain_community.vectorstores import FAISS
12
- from langchain_community.llms import HuggingFaceHub
13
 
14
  from langchain.text_splitter import RecursiveCharacterTextSplitter
15
  from langchain.chains import RetrievalQA
@@ -24,13 +25,17 @@ FIG_DIR = Path("figures")
24
  PDF_DIR.mkdir(exist_ok=True)
25
  FIG_DIR.mkdir(exist_ok=True)
26
 
 
 
 
27
  # β€”β€”β€”β€”β€” Embeddings & LLM Setup β€”β€”β€”β€”β€”
28
  embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
29
 
30
- # LLM via Hugging Face Inference API
31
- llm = HuggingFaceHub(
32
- repo_id="google/flan-t5-base",
33
- model_kwargs={"temperature": 0.5, "max_length": 512}
 
34
  )
35
 
36
  # Prompt
@@ -43,9 +48,9 @@ Answer (up to 3 sentences):
43
  prompt = PromptTemplate(template=TEMPLATE, input_variables=["context", "question"])
44
 
45
  # Inference client for image captioning
46
- vision_client = InferenceClient("Salesforce/blip-image-captioning-base")
47
 
48
- # Globals (will set after processing)
49
  vector_store = None
50
  qa_chain = None
51
 
 
1
  # app.py
2
+
3
  import os
4
  from pathlib import Path
5
 
 
7
  from PIL import Image
8
  from huggingface_hub import InferenceClient
9
 
10
+ # βœ… Community imports to avoid deprecation
11
  from langchain_community.embeddings import HuggingFaceEmbeddings
12
  from langchain_community.vectorstores import FAISS
13
+ from langchain_community.llms import HuggingFaceEndpoint
14
 
15
  from langchain.text_splitter import RecursiveCharacterTextSplitter
16
  from langchain.chains import RetrievalQA
 
25
  PDF_DIR.mkdir(exist_ok=True)
26
  FIG_DIR.mkdir(exist_ok=True)
27
 
28
+ # β€”β€”β€”β€”β€” Read your HF_TOKEN secret β€”β€”β€”β€”β€”
29
+ hf_token = os.environ["HF_TOKEN"]
30
+
31
  # β€”β€”β€”β€”β€” Embeddings & LLM Setup β€”β€”β€”β€”β€”
32
  embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
33
 
34
+ # LLM via HF Inference API endpoint
35
+ llm = HuggingFaceEndpoint(
36
+ endpoint_url="https://api-inference.huggingface.co/models/google/flan-t5-base",
37
+ huggingfacehub_api_token=hf_token,
38
+ model_kwargs={"temperature": 0.5, "max_length": 512},
39
  )
40
 
41
  # Prompt
 
48
  prompt = PromptTemplate(template=TEMPLATE, input_variables=["context", "question"])
49
 
50
  # Inference client for image captioning
51
+ vision_client = InferenceClient("Salesforce/blip-image-captioning-base", token=hf_token)
52
 
53
+ # Globals (will initialize after processing)
54
  vector_store = None
55
  qa_chain = None
56