Rivalcoder commited on
Commit
0d10b91
·
1 Parent(s): 22c777f

Update Prompt

Browse files
Files changed (19) hide show
  1. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/.no_exist/4ca70771034acceecb2e72475f72050fcdde4ddc/adapter_config.json +0 -0
  2. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/.no_exist/4ca70771034acceecb2e72475f72050fcdde4ddc/added_tokens.json +0 -0
  3. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/.no_exist/4ca70771034acceecb2e72475f72050fcdde4ddc/chat_template.jinja +0 -0
  4. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/refs/main +1 -0
  5. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/1_Pooling/config.json +7 -0
  6. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/README.md +114 -0
  7. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/config.json +24 -0
  8. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/config_sentence_transformers.json +7 -0
  9. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/model.safetensors +3 -0
  10. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/modules.json +14 -0
  11. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/sentence_bert_config.json +4 -0
  12. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/special_tokens_map.json +1 -0
  13. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/tokenizer.json +0 -0
  14. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/tokenizer_config.json +1 -0
  15. .cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/vocab.txt +0 -0
  16. app.py +1 -1
  17. main.py +1 -1
  18. parser.py +0 -29
  19. pdf_parser.py +43 -0
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/.no_exist/4ca70771034acceecb2e72475f72050fcdde4ddc/adapter_config.json ADDED
File without changes
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/.no_exist/4ca70771034acceecb2e72475f72050fcdde4ddc/added_tokens.json ADDED
File without changes
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/.no_exist/4ca70771034acceecb2e72475f72050fcdde4ddc/chat_template.jinja ADDED
File without changes
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/refs/main ADDED
@@ -0,0 +1 @@
 
 
1
+ 4ca70771034acceecb2e72475f72050fcdde4ddc
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 384,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: sentence-transformers
4
+ tags:
5
+ - sentence-transformers
6
+ - feature-extraction
7
+ - sentence-similarity
8
+ - transformers
9
+ datasets:
10
+ - flax-sentence-embeddings/stackexchange_xml
11
+ - s2orc
12
+ - ms_marco
13
+ - wiki_atomic_edits
14
+ - snli
15
+ - multi_nli
16
+ - embedding-data/altlex
17
+ - embedding-data/simple-wiki
18
+ - embedding-data/flickr30k-captions
19
+ - embedding-data/coco_captions
20
+ - embedding-data/sentence-compression
21
+ - embedding-data/QQP
22
+ - yahoo_answers_topics
23
+ pipeline_tag: sentence-similarity
24
+ ---
25
+
26
+ # sentence-transformers/paraphrase-MiniLM-L3-v2
27
+
28
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
29
+
30
+
31
+
32
+ ## Usage (Sentence-Transformers)
33
+
34
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
35
+
36
+ ```
37
+ pip install -U sentence-transformers
38
+ ```
39
+
40
+ Then you can use the model like this:
41
+
42
+ ```python
43
+ from sentence_transformers import SentenceTransformer
44
+ sentences = ["This is an example sentence", "Each sentence is converted"]
45
+
46
+ model = SentenceTransformer('sentence-transformers/paraphrase-MiniLM-L3-v2')
47
+ embeddings = model.encode(sentences)
48
+ print(embeddings)
49
+ ```
50
+
51
+
52
+
53
+ ## Usage (HuggingFace Transformers)
54
+ Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
55
+
56
+ ```python
57
+ from transformers import AutoTokenizer, AutoModel
58
+ import torch
59
+
60
+
61
+ #Mean Pooling - Take attention mask into account for correct averaging
62
+ def mean_pooling(model_output, attention_mask):
63
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
64
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
65
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
66
+
67
+
68
+ # Sentences we want sentence embeddings for
69
+ sentences = ['This is an example sentence', 'Each sentence is converted']
70
+
71
+ # Load model from HuggingFace Hub
72
+ tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/paraphrase-MiniLM-L3-v2')
73
+ model = AutoModel.from_pretrained('sentence-transformers/paraphrase-MiniLM-L3-v2')
74
+
75
+ # Tokenize sentences
76
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
77
+
78
+ # Compute token embeddings
79
+ with torch.no_grad():
80
+ model_output = model(**encoded_input)
81
+
82
+ # Perform pooling. In this case, max pooling.
83
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
84
+
85
+ print("Sentence embeddings:")
86
+ print(sentence_embeddings)
87
+ ```
88
+
89
+
90
+
91
+ ## Full Model Architecture
92
+ ```
93
+ SentenceTransformer(
94
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
95
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
96
+ )
97
+ ```
98
+
99
+ ## Citing & Authors
100
+
101
+ This model was trained by [sentence-transformers](https://www.sbert.net/).
102
+
103
+ If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084):
104
+ ```bibtex
105
+ @inproceedings{reimers-2019-sentence-bert,
106
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
107
+ author = "Reimers, Nils and Gurevych, Iryna",
108
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
109
+ month = "11",
110
+ year = "2019",
111
+ publisher = "Association for Computational Linguistics",
112
+ url = "http://arxiv.org/abs/1908.10084",
113
+ }
114
+ ```
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "old_models/paraphrase-MiniLM-L3-v2/0_Transformer",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 384,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1536,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 3,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.7.0",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 30522
24
+ }
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.0.0",
4
+ "transformers": "4.7.0",
5
+ "pytorch": "1.9.0+cu102"
6
+ }
7
+ }
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1e4e2d420c664973037c3c73125d7a8fc69952495093ef8f50596f8943a433
3
+ size 69569488
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 128,
3
+ "do_lower_case": false
4
+ }
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "name_or_path": "nreimers/MiniLM-L3-H384-uncased", "do_basic_tokenize": true, "never_split": null, "model_max_length": 512}
.cache/models--sentence-transformers--paraphrase-MiniLM-L3-v2/snapshots/4ca70771034acceecb2e72475f72050fcdde4ddc/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
app.py CHANGED
@@ -23,7 +23,7 @@ logging.getLogger('tensorflow').setLevel(logging.ERROR)
23
  from fastapi import FastAPI, Request, HTTPException, Depends, Header
24
  from fastapi.middleware.cors import CORSMiddleware
25
  from pydantic import BaseModel
26
- from parser import parse_pdf_from_url, parse_pdf_from_file
27
  from embedder import build_faiss_index, preload_model
28
  from retriever import retrieve_chunks
29
  from llm import query_gemini
 
23
  from fastapi import FastAPI, Request, HTTPException, Depends, Header
24
  from fastapi.middleware.cors import CORSMiddleware
25
  from pydantic import BaseModel
26
+ from pdf_parser import parse_pdf_from_url_multithreaded as parse_pdf_from_url, parse_pdf_from_file_multithreaded as parse_pdf_from_file
27
  from embedder import build_faiss_index, preload_model
28
  from retriever import retrieve_chunks
29
  from llm import query_gemini
main.py CHANGED
@@ -17,7 +17,7 @@ logging.getLogger('tensorflow').setLevel(logging.ERROR)
17
  from fastapi import FastAPI, Request, HTTPException, Depends, Header
18
  from fastapi.middleware.cors import CORSMiddleware
19
  from pydantic import BaseModel
20
- from parser import parse_pdf_from_url, parse_pdf_from_file
21
  from embedder import build_faiss_index, preload_model
22
  from retriever import retrieve_chunks
23
  from llm import query_gemini
 
17
  from fastapi import FastAPI, Request, HTTPException, Depends, Header
18
  from fastapi.middleware.cors import CORSMiddleware
19
  from pydantic import BaseModel
20
+ from pdf_parser import parse_pdf_from_url, parse_pdf_from_file
21
  from embedder import build_faiss_index, preload_model
22
  from retriever import retrieve_chunks
23
  from llm import query_gemini
parser.py DELETED
@@ -1,29 +0,0 @@
1
- import fitz # PyMuPDF
2
- import requests
3
- from io import BytesIO
4
- import time
5
-
6
- def parse_pdf_from_url(url):
7
- res = requests.get(url)
8
- doc = fitz.open(stream=BytesIO(res.content), filetype="pdf")
9
- chunks = []
10
- for page in doc:
11
- text = page.get_text()
12
- if text.strip():
13
- chunks.append(text)
14
- doc.close()
15
- return chunks
16
-
17
- def parse_pdf_from_file(file_path):
18
- """Parse a local PDF file and extract text chunks"""
19
- try:
20
- doc = fitz.open(file_path)
21
- chunks = []
22
- for page in doc:
23
- text = page.get_text()
24
- if text.strip():
25
- chunks.append(text)
26
- doc.close()
27
- return chunks
28
- except Exception as e:
29
- raise Exception(f"Error parsing PDF file {file_path}: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pdf_parser.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fitz # PyMuPDF
2
+ import requests
3
+ from io import BytesIO
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ import os
6
+
7
+ def extract_page_text(page):
8
+ text = page.get_text()
9
+ return text if text.strip() else None
10
+
11
+ def parse_pdf_from_url_multithreaded(url, max_workers=None):
12
+ # Automatically detect and use all available CPU cores if max_workers not set
13
+ if max_workers is None:
14
+ max_workers = os.cpu_count() or 8
15
+
16
+ res = requests.get(url)
17
+ doc = fitz.open(stream=BytesIO(res.content), filetype="pdf")
18
+ pages = [page for page in doc]
19
+ chunks = [None] * len(pages)
20
+
21
+ # Process pages in parallel, preserving page order
22
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
23
+ results = list(executor.map(extract_page_text, pages))
24
+
25
+ # Keep only non-empty page results, preserving order
26
+ doc.close()
27
+ return [r for r in results if r]
28
+
29
+ def parse_pdf_from_file_multithreaded(file_path, max_workers=None):
30
+ if max_workers is None:
31
+ max_workers = os.cpu_count() or 8
32
+
33
+ try:
34
+ doc = fitz.open(file_path)
35
+ pages = [page for page in doc]
36
+ chunks = [None] * len(pages)
37
+
38
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
39
+ results = list(executor.map(extract_page_text, pages))
40
+ doc.close()
41
+ return [r for r in results if r]
42
+ except Exception as e:
43
+ raise Exception(f"Error parsing PDF file {file_path}: {str(e)}")