Add new SentenceTransformer model
Browse files- 1_Pooling/config.json +10 -0
- README.md +613 -0
- config.json +25 -0
- config_sentence_transformers.json +12 -0
- model.safetensors +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +63 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 1024,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,613 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- sentence-transformers
|
4 |
+
- sentence-similarity
|
5 |
+
- feature-extraction
|
6 |
+
- generated_from_trainer
|
7 |
+
- dataset_size:400
|
8 |
+
- loss:MatryoshkaLoss
|
9 |
+
- loss:MultipleNegativesRankingLoss
|
10 |
+
base_model: Snowflake/snowflake-arctic-embed-l
|
11 |
+
widget:
|
12 |
+
- source_sentence: What actions did Mr and Mrs Harris take that led to the revelation
|
13 |
+
of the facts in the case?
|
14 |
+
sentences:
|
15 |
+
- "Perplexity’s marketing activities include promoting on its Instagram account\
|
16 |
+
\ a massive billboard \nin Times Square from September 2024 which read “Congratulations\
|
17 |
+
\ Perplexity on 250 million \nquestions answered last month.”5 \n \n4 Discover\
|
18 |
+
\ New York with Perplexity, Perplexity AI (last visited Oct. 17, 2024), \nhttps://www.perplexity.ai/encyclopedia/discovernewyork.\
|
19 |
+
\ \n5 @perplexity.ai, Instagram (Sept. 4, 2024), \nhttps://www.instagram.com/perplexity.ai/p/C_g2TonSHC5.\
|
20 |
+
\ \nCase 1:24-cv-07984 Document 1 Filed 10/21/24 Page 8 of 42"
|
21 |
+
- "31 \n \nstatus. It was not until Mr. and Mrs. Harris retained counsel, served\
|
22 |
+
\ a demand letter on May 22, \n2024, met with the then Assistant Superintendent\
|
23 |
+
\ and a lengthy “bulling investigation” that these \nfacts came to light. \n\
|
24 |
+
The Defendant’s actions and conduct, by definition, was arbitrary and capricious\
|
25 |
+
\ as was \nthe imposition of discipline that was a gross abuse of discretion\
|
26 |
+
\ when it served as a catalyst for \nthis action. Similarly, the Defendants exceeded\
|
27 |
+
\ their authority by repeatedly doubling down on \ntheir acts and conduct when\
|
28 |
+
\ given the opportunity to reverse course. The adverse action taken was \nnot\
|
29 |
+
\ based on sound, objective, adopted and approved policies and procedures regarding\
|
30 |
+
\ the use of"
|
31 |
+
- "website users, and licensing is transacted with individuals and entities residing\
|
32 |
+
\ in this State and \nDistrict. As such, the injuries alleged herein from Perplexity’s\
|
33 |
+
\ infringement and other unlawful \nconduct foreseeably occurred in this State\
|
34 |
+
\ and District. In addition, Perplexity or its agents reside \nin this District\
|
35 |
+
\ and may be found in this State and District. \n23. \nDefendant Perplexity is\
|
36 |
+
\ subject to the jurisdiction of this Court pursuant to N.Y. \nC.P.L.R. § 302(a)(1)\
|
37 |
+
\ and (3) as it has purposefully directed its activities at New York and has \n\
|
38 |
+
Case 1:24-cv-07984 Document 1 Filed 10/21/24 Page 7 of 42"
|
39 |
+
- source_sentence: How did the Plaintiffs demonstrate that the discipline and sanctions
|
40 |
+
imposed by Hingham were arbitrary and capricious?
|
41 |
+
sentences:
|
42 |
+
- "27 \n \nin the adoption and execution of policies and practices that in their\
|
43 |
+
\ judgment are needed to preserve \ninternal order and discipline and to maintain\
|
44 |
+
\ institutional security\"), such deference is not without \nlimitation. The\
|
45 |
+
\ propriety of and deference afforded to the decision making is a rebuttable \n\
|
46 |
+
presumption that may only be undone by a showing that the action taken was arbitrary\
|
47 |
+
\ and \ncapricious. See Doe v. Supt. Of Schools of Stoughton, 437 Mass. 1, 5\
|
48 |
+
\ (2002). The Plaintiffs are \nlikely to succeed on the merits because they have\
|
49 |
+
\ shown through Hingham’s own investigation \nmaterials that the discipline and\
|
50 |
+
\ sanctions imposed were arbitrary, capricious and an abuse of \ndiscretion under\
|
51 |
+
\ the circumstances."
|
52 |
+
- "7 \n \nhighly competitive curriculum with by and large top grades, a 36 ACT (highest\
|
53 |
+
\ score possible) \nand a varied solid resume. In order for RNH to apply to Stanford\
|
54 |
+
\ by November 1, which means \nsubmitting no later than October 25th, his transcript\
|
55 |
+
\ issue must be resolved by early October so \nthat when RNH requests his transcripts,\
|
56 |
+
\ they reflect grades commensurate with his achievement \nand not marred by the\
|
57 |
+
\ incident that gave rise to this case. \nLetter grades of “C” in this type of\
|
58 |
+
\ admissions environment typically lead to the applicant \nbeing excluded from\
|
59 |
+
\ consideration. Additionally, transcripts and information regarding any \ndisciplinary\
|
60 |
+
\ infraction, especially one regarding an academic integrity infraction, are a\
|
61 |
+
\ substantial"
|
62 |
+
- "as one’s own. Id. at ¶106-107. During the project, RNH and his classmate did\
|
63 |
+
\ not take someone \nelse’s work or ideas and pass them off as their own. Id.\
|
64 |
+
\ at ¶108. RNH and his classmate used AI, \nwhich generates and synthesizes new\
|
65 |
+
\ information, and did not pass off another’s work as their \nown. Id. at ¶109.\
|
66 |
+
\ Despite having this information, the Defendants exceeded the authority granted\
|
67 |
+
\ \nto them in an abuse of authority, discretion, and unfettered state action\
|
68 |
+
\ by unfairly and unjustly \nacting as investigator, judge, jury, and executioner\
|
69 |
+
\ in determining the extreme and outrageous \nsanctions imposed upon these Students.\
|
70 |
+
\ Id. at ¶110. \nAfter being unfairly and unjustly accused of cheating, plagiarism,\
|
71 |
+
\ and academic"
|
72 |
+
- source_sentence: How many students with academic infractions were inducted into
|
73 |
+
the NHS, and what was one of the reasons for their infractions?
|
74 |
+
sentences:
|
75 |
+
- "companies that want to utilize popular, high-quality, human-created journalism\
|
76 |
+
\ for use by the \ncompanies’ AI applications. Revenue received from legitimately-run\
|
77 |
+
\ AI companies supports the \ncosts of news gathering. This revenue also establishes\
|
78 |
+
\ that there is a market for the licensing of \nhuman-generated content for lawful\
|
79 |
+
\ use in AI technologies. \n42. \nPlaintiffs’ content is highly valued in this\
|
80 |
+
\ market. \n \n \nCase 1:24-cv-07984 Document 1 Filed 10/21/24 Page\
|
81 |
+
\ 12 of 42"
|
82 |
+
- "18 \n \nupon in affirming the decision through an appeal to exclude RNH and his\
|
83 |
+
\ classmate from the NHS. \nId. at ¶145. At that time, Defendant Swanson and\
|
84 |
+
\ other Defendants knew or should have known \nthat the District inducted at least\
|
85 |
+
\ seven students into NHS, who had academic infractions on their \nrecord, one\
|
86 |
+
\ of which was because of the prior use of AI. Id. at ¶146. \nThe “committee”\
|
87 |
+
\ that adjudicated selection for NHS this year did not include teachers who \n\
|
88 |
+
know and are familiar with RNH and his classmate. Id. at ¶147. This is due to\
|
89 |
+
\ the then escalating \ncontract conflict with the Hingham Educators Association\
|
90 |
+
\ (“HEA”) where HEA engaged in an"
|
91 |
+
- "is plainly and solely for a commercial purpose. Moreover, upon information and\
|
92 |
+
\ belief, it copies \ninto its index every single word of Plaintiffs’ copyrighted\
|
93 |
+
\ works that it can get its hands on. \nAdditionally, the use to which it puts\
|
94 |
+
\ these copies is to create a commercial substitute for Plaintiffs’ \nprotected\
|
95 |
+
\ works – in Perplexity’s own words, to allow and encourage users to “Skip the\
|
96 |
+
\ Links” to \nPlaintiffs’ original works. Such substitution causes substantial\
|
97 |
+
\ harm to Plaintiffs’ traditional \nadvertising and subscription revenues. Perplexity’s\
|
98 |
+
\ conduct also harms Plaintiffs’ additional, \nestablished revenue stream from\
|
99 |
+
\ licensing to more scrupulous AI companies. Nor is Perplexity’s"
|
100 |
+
- source_sentence: How many pages does the document filed in case 1:24-cv-12437-WGY
|
101 |
+
contain?
|
102 |
+
sentences:
|
103 |
+
- Case 1:24-cv-12437-WGY Document 8 Filed 10/08/24 Page 26 of 42
|
104 |
+
- "more specialized in conducting a specific task, responding to prompts specific\
|
105 |
+
\ to a subject area, or \nrecognizing nuances in particular questions. \n48. \n\
|
106 |
+
Fine-tuning might also ensure that the LLM responds to certain prompts by \nmimicking\
|
107 |
+
\ a certain linguistic style. For example, outputting a cooking recipe requires\
|
108 |
+
\ a distinct \noutput style from recounting the statistics of the Allies’ landing\
|
109 |
+
\ at Normandy’s beaches on D-Day, \nor from writing a poem about the summer wind.\
|
110 |
+
\ A medical treatise uses a distinct linguistic style \nfrom a sports recap. \n\
|
111 |
+
Case 1:24-cv-07984 Document 1 Filed 10/21/24 Page 13 of 42"
|
112 |
+
- "D. The Balancing Of The Irreparable Harm Heavily Favors The Plaintiffs \nThe\
|
113 |
+
\ balance of harms in this case clearly favors granting an injunction. If the\
|
114 |
+
\ injunction is \nnot granted, RNH will suffer irreparable harm that cannot be\
|
115 |
+
\ adequately remedied by any future \ncourt decision or monetary compensation.\
|
116 |
+
\ RNH’s academic and professional future is at stake, as \na delayed resolution\
|
117 |
+
\ of the investigation into academic sanctions could result in missed deadlines\
|
118 |
+
\ \nfor college applications, exclusion from consideration at elite universities,\
|
119 |
+
\ and a permanent stain \non his academic record. The reputational damage and\
|
120 |
+
\ uncertainty caused will undermine RNH’s \nability to compete fairly with other\
|
121 |
+
\ applicants, affecting not only his immediate educational"
|
122 |
+
- source_sentence: What challenges do professional journalists and publishers face
|
123 |
+
that may impact their ability to enforce their intellectual property rights?
|
124 |
+
sentences:
|
125 |
+
- "ban or prohibition on the use of AI by students. The Defendants were not trained\
|
126 |
+
\ on any policies \nor procedures for use of AI alone, never mind what they were\
|
127 |
+
\ “able to do” to students who used \nit. The entire purpose behind having\
|
128 |
+
\ such policies and procedures in place is to ensure notice, \nequity, fairness\
|
129 |
+
\ and to be sure: a level playing field for all. Making matters worse, there\
|
130 |
+
\ exists \nno adequate procedures and policies for the induction of an applicant\
|
131 |
+
\ into NHS when compared to \nother members who are inducted despite the same\
|
132 |
+
\ or similar infractions. This is a denial of student \nrights of the highest\
|
133 |
+
\ order. \n \nIn the case here, RNH was disciplined on an ad hoc and on-going\
|
134 |
+
\ basis over more than six"
|
135 |
+
- "19 \nrespect. They feel very good about it. And in our user interface, even though\
|
136 |
+
\ we give the answer, \nwe do show the user exactly where the answer is coming\
|
137 |
+
\ from.”16 \n68. \nAs Srinivas surely knows or should know, academic standards\
|
138 |
+
\ for avoiding \nplagiarism are wholly independent from copyright law.17 Dow Jones\
|
139 |
+
\ and NYP Holdings editors \nand journalists are not graduate students working\
|
140 |
+
\ out of a library or lab, eager to have someone \nacknowledge and utilize their\
|
141 |
+
\ research. They are professional journalists and publishers – working \nunder\
|
142 |
+
\ high-pressure deadlines, sometimes in dangerous places – whose livelihoods depend\
|
143 |
+
\ on the \nenforcement and monetization of their intellectual property rights.\
|
144 |
+
\ \n69."
|
145 |
+
- "example the school committee under Mass. G.L. c. 71, § 37, may punish a student\
|
146 |
+
\ offender without \na prior rule specifically forbidding the offending conduct;\
|
147 |
+
\ however, surely such authority cannot \nbe limitless. Moreover, this court believes\
|
148 |
+
\ that the imposition of a severe penalty without a \nspecific promulgated rule\
|
149 |
+
\ might be constitutionally deficient under certain circumstances. \nId. (emphasis\
|
150 |
+
\ supplied). “What those circumstances are can only be left to the development\
|
151 |
+
\ of the \ncase law in the area.” Id. There has been no case law developed\
|
152 |
+
\ in the area of school discipline \nCase 1:24-cv-12437-WGY Document 8 Filed\
|
153 |
+
\ 10/08/24 Page 28 of 42"
|
154 |
+
pipeline_tag: sentence-similarity
|
155 |
+
library_name: sentence-transformers
|
156 |
+
metrics:
|
157 |
+
- cosine_accuracy@1
|
158 |
+
- cosine_accuracy@3
|
159 |
+
- cosine_accuracy@5
|
160 |
+
- cosine_accuracy@10
|
161 |
+
- cosine_precision@1
|
162 |
+
- cosine_precision@3
|
163 |
+
- cosine_precision@5
|
164 |
+
- cosine_precision@10
|
165 |
+
- cosine_recall@1
|
166 |
+
- cosine_recall@3
|
167 |
+
- cosine_recall@5
|
168 |
+
- cosine_recall@10
|
169 |
+
- cosine_ndcg@10
|
170 |
+
- cosine_mrr@10
|
171 |
+
- cosine_map@100
|
172 |
+
model-index:
|
173 |
+
- name: SentenceTransformer based on Snowflake/snowflake-arctic-embed-l
|
174 |
+
results:
|
175 |
+
- task:
|
176 |
+
type: information-retrieval
|
177 |
+
name: Information Retrieval
|
178 |
+
dataset:
|
179 |
+
name: Unknown
|
180 |
+
type: unknown
|
181 |
+
metrics:
|
182 |
+
- type: cosine_accuracy@1
|
183 |
+
value: 0.7291666666666666
|
184 |
+
name: Cosine Accuracy@1
|
185 |
+
- type: cosine_accuracy@3
|
186 |
+
value: 0.8541666666666666
|
187 |
+
name: Cosine Accuracy@3
|
188 |
+
- type: cosine_accuracy@5
|
189 |
+
value: 0.9375
|
190 |
+
name: Cosine Accuracy@5
|
191 |
+
- type: cosine_accuracy@10
|
192 |
+
value: 1.0
|
193 |
+
name: Cosine Accuracy@10
|
194 |
+
- type: cosine_precision@1
|
195 |
+
value: 0.7291666666666666
|
196 |
+
name: Cosine Precision@1
|
197 |
+
- type: cosine_precision@3
|
198 |
+
value: 0.28472222222222215
|
199 |
+
name: Cosine Precision@3
|
200 |
+
- type: cosine_precision@5
|
201 |
+
value: 0.1875
|
202 |
+
name: Cosine Precision@5
|
203 |
+
- type: cosine_precision@10
|
204 |
+
value: 0.09999999999999999
|
205 |
+
name: Cosine Precision@10
|
206 |
+
- type: cosine_recall@1
|
207 |
+
value: 0.7291666666666666
|
208 |
+
name: Cosine Recall@1
|
209 |
+
- type: cosine_recall@3
|
210 |
+
value: 0.8541666666666666
|
211 |
+
name: Cosine Recall@3
|
212 |
+
- type: cosine_recall@5
|
213 |
+
value: 0.9375
|
214 |
+
name: Cosine Recall@5
|
215 |
+
- type: cosine_recall@10
|
216 |
+
value: 1.0
|
217 |
+
name: Cosine Recall@10
|
218 |
+
- type: cosine_ndcg@10
|
219 |
+
value: 0.8575788154610162
|
220 |
+
name: Cosine Ndcg@10
|
221 |
+
- type: cosine_mrr@10
|
222 |
+
value: 0.8125248015873017
|
223 |
+
name: Cosine Mrr@10
|
224 |
+
- type: cosine_map@100
|
225 |
+
value: 0.8125248015873016
|
226 |
+
name: Cosine Map@100
|
227 |
+
---
|
228 |
+
|
229 |
+
# SentenceTransformer based on Snowflake/snowflake-arctic-embed-l
|
230 |
+
|
231 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Snowflake/snowflake-arctic-embed-l](https://huggingface.co/Snowflake/snowflake-arctic-embed-l). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
232 |
+
|
233 |
+
## Model Details
|
234 |
+
|
235 |
+
### Model Description
|
236 |
+
- **Model Type:** Sentence Transformer
|
237 |
+
- **Base model:** [Snowflake/snowflake-arctic-embed-l](https://huggingface.co/Snowflake/snowflake-arctic-embed-l) <!-- at revision d8fb21ca8d905d2832ee8b96c894d3298964346b -->
|
238 |
+
- **Maximum Sequence Length:** 512 tokens
|
239 |
+
- **Output Dimensionality:** 1024 dimensions
|
240 |
+
- **Similarity Function:** Cosine Similarity
|
241 |
+
<!-- - **Training Dataset:** Unknown -->
|
242 |
+
<!-- - **Language:** Unknown -->
|
243 |
+
<!-- - **License:** Unknown -->
|
244 |
+
|
245 |
+
### Model Sources
|
246 |
+
|
247 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
248 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
249 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
250 |
+
|
251 |
+
### Full Model Architecture
|
252 |
+
|
253 |
+
```
|
254 |
+
SentenceTransformer(
|
255 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
|
256 |
+
(1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
257 |
+
(2): Normalize()
|
258 |
+
)
|
259 |
+
```
|
260 |
+
|
261 |
+
## Usage
|
262 |
+
|
263 |
+
### Direct Usage (Sentence Transformers)
|
264 |
+
|
265 |
+
First install the Sentence Transformers library:
|
266 |
+
|
267 |
+
```bash
|
268 |
+
pip install -U sentence-transformers
|
269 |
+
```
|
270 |
+
|
271 |
+
Then you can load this model and run inference.
|
272 |
+
```python
|
273 |
+
from sentence_transformers import SentenceTransformer
|
274 |
+
|
275 |
+
# Download from the 🤗 Hub
|
276 |
+
model = SentenceTransformer("llm-wizard/legal-ft")
|
277 |
+
# Run inference
|
278 |
+
sentences = [
|
279 |
+
'What challenges do professional journalists and publishers face that may impact their ability to enforce their intellectual property rights?',
|
280 |
+
'19 \nrespect. They feel very good about it. And in our user interface, even though we give the answer, \nwe do show the user exactly where the answer is coming from.”16 \n68. \nAs Srinivas surely knows or should know, academic standards for avoiding \nplagiarism are wholly independent from copyright law.17 Dow Jones and NYP Holdings editors \nand journalists are not graduate students working out of a library or lab, eager to have someone \nacknowledge and utilize their research. They are professional journalists and publishers – working \nunder high-pressure deadlines, sometimes in dangerous places – whose livelihoods depend on the \nenforcement and monetization of their intellectual property rights. \n69.',
|
281 |
+
'ban or prohibition on the use of AI by students. The Defendants were not trained on any policies \nor procedures for use of AI alone, never mind what they were “able to do” to students who used \nit. The entire purpose behind having such policies and procedures in place is to ensure notice, \nequity, fairness and to be sure: a level playing field for all. Making matters worse, there exists \nno adequate procedures and policies for the induction of an applicant into NHS when compared to \nother members who are inducted despite the same or similar infractions. This is a denial of student \nrights of the highest order. \n \nIn the case here, RNH was disciplined on an ad hoc and on-going basis over more than six',
|
282 |
+
]
|
283 |
+
embeddings = model.encode(sentences)
|
284 |
+
print(embeddings.shape)
|
285 |
+
# [3, 1024]
|
286 |
+
|
287 |
+
# Get the similarity scores for the embeddings
|
288 |
+
similarities = model.similarity(embeddings, embeddings)
|
289 |
+
print(similarities.shape)
|
290 |
+
# [3, 3]
|
291 |
+
```
|
292 |
+
|
293 |
+
<!--
|
294 |
+
### Direct Usage (Transformers)
|
295 |
+
|
296 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
297 |
+
|
298 |
+
</details>
|
299 |
+
-->
|
300 |
+
|
301 |
+
<!--
|
302 |
+
### Downstream Usage (Sentence Transformers)
|
303 |
+
|
304 |
+
You can finetune this model on your own dataset.
|
305 |
+
|
306 |
+
<details><summary>Click to expand</summary>
|
307 |
+
|
308 |
+
</details>
|
309 |
+
-->
|
310 |
+
|
311 |
+
<!--
|
312 |
+
### Out-of-Scope Use
|
313 |
+
|
314 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
315 |
+
-->
|
316 |
+
|
317 |
+
## Evaluation
|
318 |
+
|
319 |
+
### Metrics
|
320 |
+
|
321 |
+
#### Information Retrieval
|
322 |
+
|
323 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
324 |
+
|
325 |
+
| Metric | Value |
|
326 |
+
|:--------------------|:-----------|
|
327 |
+
| cosine_accuracy@1 | 0.7292 |
|
328 |
+
| cosine_accuracy@3 | 0.8542 |
|
329 |
+
| cosine_accuracy@5 | 0.9375 |
|
330 |
+
| cosine_accuracy@10 | 1.0 |
|
331 |
+
| cosine_precision@1 | 0.7292 |
|
332 |
+
| cosine_precision@3 | 0.2847 |
|
333 |
+
| cosine_precision@5 | 0.1875 |
|
334 |
+
| cosine_precision@10 | 0.1 |
|
335 |
+
| cosine_recall@1 | 0.7292 |
|
336 |
+
| cosine_recall@3 | 0.8542 |
|
337 |
+
| cosine_recall@5 | 0.9375 |
|
338 |
+
| cosine_recall@10 | 1.0 |
|
339 |
+
| **cosine_ndcg@10** | **0.8576** |
|
340 |
+
| cosine_mrr@10 | 0.8125 |
|
341 |
+
| cosine_map@100 | 0.8125 |
|
342 |
+
|
343 |
+
<!--
|
344 |
+
## Bias, Risks and Limitations
|
345 |
+
|
346 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
347 |
+
-->
|
348 |
+
|
349 |
+
<!--
|
350 |
+
### Recommendations
|
351 |
+
|
352 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
353 |
+
-->
|
354 |
+
|
355 |
+
## Training Details
|
356 |
+
|
357 |
+
### Training Dataset
|
358 |
+
|
359 |
+
#### Unnamed Dataset
|
360 |
+
|
361 |
+
* Size: 400 training samples
|
362 |
+
* Columns: <code>sentence_0</code> and <code>sentence_1</code>
|
363 |
+
* Approximate statistics based on the first 400 samples:
|
364 |
+
| | sentence_0 | sentence_1 |
|
365 |
+
|:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
|
366 |
+
| type | string | string |
|
367 |
+
| details | <ul><li>min: 10 tokens</li><li>mean: 20.93 tokens</li><li>max: 35 tokens</li></ul> | <ul><li>min: 25 tokens</li><li>mean: 140.37 tokens</li><li>max: 260 tokens</li></ul> |
|
368 |
+
* Samples:
|
369 |
+
| sentence_0 | sentence_1 |
|
370 |
+
|:-----------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
371 |
+
| <code>What provisions of the 2023-2024 Handbook were referenced regarding the use of AI and academic integrity?</code> | <code>13 <br> <br>procedure, expectation, conduct, discipline, sanction or consequence for the use of AI. Id. at ¶102. <br>Under these circumstances, the use of AI was not a violation of the then existing “Academic <br>Integrity: Cheating and Plagiarism” provisions of the 2023-2024 Handbook. Id. at ¶104. As such, <br>accusations of cheating, plagiarism, and academic misconduct or dishonesty were not supported <br>by the record evidence which, at all times relevant, the Defendants have had in their care, custody <br>and control. Id. at ¶105. <br>While there is much dispute as to whether the use of generative AI constitutes plagiarism, <br>plagiarism is defined as the practice of taking someone else’s work or ideas and passing them off</code> |
|
372 |
+
| <code>How is plagiarism defined in the context provided?</code> | <code>13 <br> <br>procedure, expectation, conduct, discipline, sanction or consequence for the use of AI. Id. at ¶102. <br>Under these circumstances, the use of AI was not a violation of the then existing “Academic <br>Integrity: Cheating and Plagiarism” provisions of the 2023-2024 Handbook. Id. at ¶104. As such, <br>accusations of cheating, plagiarism, and academic misconduct or dishonesty were not supported <br>by the record evidence which, at all times relevant, the Defendants have had in their care, custody <br>and control. Id. at ¶105. <br>While there is much dispute as to whether the use of generative AI constitutes plagiarism, <br>plagiarism is defined as the practice of taking someone else’s work or ideas and passing them off</code> |
|
373 |
+
| <code>What is the case number associated with the document filed on 10/21/24?</code> | <code>program-ad-revenue-sharing-ai-time-fortune-der-spiegel. <br>Case 1:24-cv-07984 Document 1 Filed 10/21/24 Page 21 of 42</code> |
|
374 |
+
* Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters:
|
375 |
+
```json
|
376 |
+
{
|
377 |
+
"loss": "MultipleNegativesRankingLoss",
|
378 |
+
"matryoshka_dims": [
|
379 |
+
768,
|
380 |
+
512,
|
381 |
+
256,
|
382 |
+
128,
|
383 |
+
64
|
384 |
+
],
|
385 |
+
"matryoshka_weights": [
|
386 |
+
1,
|
387 |
+
1,
|
388 |
+
1,
|
389 |
+
1,
|
390 |
+
1
|
391 |
+
],
|
392 |
+
"n_dims_per_step": -1
|
393 |
+
}
|
394 |
+
```
|
395 |
+
|
396 |
+
### Training Hyperparameters
|
397 |
+
#### Non-Default Hyperparameters
|
398 |
+
|
399 |
+
- `eval_strategy`: steps
|
400 |
+
- `per_device_train_batch_size`: 10
|
401 |
+
- `per_device_eval_batch_size`: 10
|
402 |
+
- `num_train_epochs`: 10
|
403 |
+
- `multi_dataset_batch_sampler`: round_robin
|
404 |
+
|
405 |
+
#### All Hyperparameters
|
406 |
+
<details><summary>Click to expand</summary>
|
407 |
+
|
408 |
+
- `overwrite_output_dir`: False
|
409 |
+
- `do_predict`: False
|
410 |
+
- `eval_strategy`: steps
|
411 |
+
- `prediction_loss_only`: True
|
412 |
+
- `per_device_train_batch_size`: 10
|
413 |
+
- `per_device_eval_batch_size`: 10
|
414 |
+
- `per_gpu_train_batch_size`: None
|
415 |
+
- `per_gpu_eval_batch_size`: None
|
416 |
+
- `gradient_accumulation_steps`: 1
|
417 |
+
- `eval_accumulation_steps`: None
|
418 |
+
- `torch_empty_cache_steps`: None
|
419 |
+
- `learning_rate`: 5e-05
|
420 |
+
- `weight_decay`: 0.0
|
421 |
+
- `adam_beta1`: 0.9
|
422 |
+
- `adam_beta2`: 0.999
|
423 |
+
- `adam_epsilon`: 1e-08
|
424 |
+
- `max_grad_norm`: 1
|
425 |
+
- `num_train_epochs`: 10
|
426 |
+
- `max_steps`: -1
|
427 |
+
- `lr_scheduler_type`: linear
|
428 |
+
- `lr_scheduler_kwargs`: {}
|
429 |
+
- `warmup_ratio`: 0.0
|
430 |
+
- `warmup_steps`: 0
|
431 |
+
- `log_level`: passive
|
432 |
+
- `log_level_replica`: warning
|
433 |
+
- `log_on_each_node`: True
|
434 |
+
- `logging_nan_inf_filter`: True
|
435 |
+
- `save_safetensors`: True
|
436 |
+
- `save_on_each_node`: False
|
437 |
+
- `save_only_model`: False
|
438 |
+
- `restore_callback_states_from_checkpoint`: False
|
439 |
+
- `no_cuda`: False
|
440 |
+
- `use_cpu`: False
|
441 |
+
- `use_mps_device`: False
|
442 |
+
- `seed`: 42
|
443 |
+
- `data_seed`: None
|
444 |
+
- `jit_mode_eval`: False
|
445 |
+
- `use_ipex`: False
|
446 |
+
- `bf16`: False
|
447 |
+
- `fp16`: False
|
448 |
+
- `fp16_opt_level`: O1
|
449 |
+
- `half_precision_backend`: auto
|
450 |
+
- `bf16_full_eval`: False
|
451 |
+
- `fp16_full_eval`: False
|
452 |
+
- `tf32`: None
|
453 |
+
- `local_rank`: 0
|
454 |
+
- `ddp_backend`: None
|
455 |
+
- `tpu_num_cores`: None
|
456 |
+
- `tpu_metrics_debug`: False
|
457 |
+
- `debug`: []
|
458 |
+
- `dataloader_drop_last`: False
|
459 |
+
- `dataloader_num_workers`: 0
|
460 |
+
- `dataloader_prefetch_factor`: None
|
461 |
+
- `past_index`: -1
|
462 |
+
- `disable_tqdm`: False
|
463 |
+
- `remove_unused_columns`: True
|
464 |
+
- `label_names`: None
|
465 |
+
- `load_best_model_at_end`: False
|
466 |
+
- `ignore_data_skip`: False
|
467 |
+
- `fsdp`: []
|
468 |
+
- `fsdp_min_num_params`: 0
|
469 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
470 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
471 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
472 |
+
- `deepspeed`: None
|
473 |
+
- `label_smoothing_factor`: 0.0
|
474 |
+
- `optim`: adamw_torch
|
475 |
+
- `optim_args`: None
|
476 |
+
- `adafactor`: False
|
477 |
+
- `group_by_length`: False
|
478 |
+
- `length_column_name`: length
|
479 |
+
- `ddp_find_unused_parameters`: None
|
480 |
+
- `ddp_bucket_cap_mb`: None
|
481 |
+
- `ddp_broadcast_buffers`: False
|
482 |
+
- `dataloader_pin_memory`: True
|
483 |
+
- `dataloader_persistent_workers`: False
|
484 |
+
- `skip_memory_metrics`: True
|
485 |
+
- `use_legacy_prediction_loop`: False
|
486 |
+
- `push_to_hub`: False
|
487 |
+
- `resume_from_checkpoint`: None
|
488 |
+
- `hub_model_id`: None
|
489 |
+
- `hub_strategy`: every_save
|
490 |
+
- `hub_private_repo`: None
|
491 |
+
- `hub_always_push`: False
|
492 |
+
- `gradient_checkpointing`: False
|
493 |
+
- `gradient_checkpointing_kwargs`: None
|
494 |
+
- `include_inputs_for_metrics`: False
|
495 |
+
- `include_for_metrics`: []
|
496 |
+
- `eval_do_concat_batches`: True
|
497 |
+
- `fp16_backend`: auto
|
498 |
+
- `push_to_hub_model_id`: None
|
499 |
+
- `push_to_hub_organization`: None
|
500 |
+
- `mp_parameters`:
|
501 |
+
- `auto_find_batch_size`: False
|
502 |
+
- `full_determinism`: False
|
503 |
+
- `torchdynamo`: None
|
504 |
+
- `ray_scope`: last
|
505 |
+
- `ddp_timeout`: 1800
|
506 |
+
- `torch_compile`: False
|
507 |
+
- `torch_compile_backend`: None
|
508 |
+
- `torch_compile_mode`: None
|
509 |
+
- `dispatch_batches`: None
|
510 |
+
- `split_batches`: None
|
511 |
+
- `include_tokens_per_second`: False
|
512 |
+
- `include_num_input_tokens_seen`: False
|
513 |
+
- `neftune_noise_alpha`: None
|
514 |
+
- `optim_target_modules`: None
|
515 |
+
- `batch_eval_metrics`: False
|
516 |
+
- `eval_on_start`: False
|
517 |
+
- `use_liger_kernel`: False
|
518 |
+
- `eval_use_gather_object`: False
|
519 |
+
- `average_tokens_across_devices`: False
|
520 |
+
- `prompts`: None
|
521 |
+
- `batch_sampler`: batch_sampler
|
522 |
+
- `multi_dataset_batch_sampler`: round_robin
|
523 |
+
|
524 |
+
</details>
|
525 |
+
|
526 |
+
### Training Logs
|
527 |
+
| Epoch | Step | cosine_ndcg@10 |
|
528 |
+
|:-----:|:----:|:--------------:|
|
529 |
+
| 1.0 | 40 | 0.8182 |
|
530 |
+
| 1.25 | 50 | 0.8172 |
|
531 |
+
| 2.0 | 80 | 0.8112 |
|
532 |
+
| 2.5 | 100 | 0.8414 |
|
533 |
+
| 3.0 | 120 | 0.8236 |
|
534 |
+
| 3.75 | 150 | 0.7962 |
|
535 |
+
| 4.0 | 160 | 0.7930 |
|
536 |
+
| 5.0 | 200 | 0.8536 |
|
537 |
+
| 6.0 | 240 | 0.8263 |
|
538 |
+
| 6.25 | 250 | 0.8257 |
|
539 |
+
| 7.0 | 280 | 0.8475 |
|
540 |
+
| 7.5 | 300 | 0.8505 |
|
541 |
+
| 8.0 | 320 | 0.8499 |
|
542 |
+
| 8.75 | 350 | 0.8582 |
|
543 |
+
| 9.0 | 360 | 0.8576 |
|
544 |
+
| 10.0 | 400 | 0.8576 |
|
545 |
+
|
546 |
+
|
547 |
+
### Framework Versions
|
548 |
+
- Python: 3.11.11
|
549 |
+
- Sentence Transformers: 3.4.1
|
550 |
+
- Transformers: 4.48.2
|
551 |
+
- PyTorch: 2.5.1+cu124
|
552 |
+
- Accelerate: 1.3.0
|
553 |
+
- Datasets: 3.2.0
|
554 |
+
- Tokenizers: 0.21.0
|
555 |
+
|
556 |
+
## Citation
|
557 |
+
|
558 |
+
### BibTeX
|
559 |
+
|
560 |
+
#### Sentence Transformers
|
561 |
+
```bibtex
|
562 |
+
@inproceedings{reimers-2019-sentence-bert,
|
563 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
564 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
565 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
566 |
+
month = "11",
|
567 |
+
year = "2019",
|
568 |
+
publisher = "Association for Computational Linguistics",
|
569 |
+
url = "https://arxiv.org/abs/1908.10084",
|
570 |
+
}
|
571 |
+
```
|
572 |
+
|
573 |
+
#### MatryoshkaLoss
|
574 |
+
```bibtex
|
575 |
+
@misc{kusupati2024matryoshka,
|
576 |
+
title={Matryoshka Representation Learning},
|
577 |
+
author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi},
|
578 |
+
year={2024},
|
579 |
+
eprint={2205.13147},
|
580 |
+
archivePrefix={arXiv},
|
581 |
+
primaryClass={cs.LG}
|
582 |
+
}
|
583 |
+
```
|
584 |
+
|
585 |
+
#### MultipleNegativesRankingLoss
|
586 |
+
```bibtex
|
587 |
+
@misc{henderson2017efficient,
|
588 |
+
title={Efficient Natural Language Response Suggestion for Smart Reply},
|
589 |
+
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
|
590 |
+
year={2017},
|
591 |
+
eprint={1705.00652},
|
592 |
+
archivePrefix={arXiv},
|
593 |
+
primaryClass={cs.CL}
|
594 |
+
}
|
595 |
+
```
|
596 |
+
|
597 |
+
<!--
|
598 |
+
## Glossary
|
599 |
+
|
600 |
+
*Clearly define terms in order to be accessible across audiences.*
|
601 |
+
-->
|
602 |
+
|
603 |
+
<!--
|
604 |
+
## Model Card Authors
|
605 |
+
|
606 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
607 |
+
-->
|
608 |
+
|
609 |
+
<!--
|
610 |
+
## Model Card Contact
|
611 |
+
|
612 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
613 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Snowflake/snowflake-arctic-embed-l",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 1024,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 4096,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_hidden_layers": 24,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"torch_dtype": "float32",
|
21 |
+
"transformers_version": "4.48.2",
|
22 |
+
"type_vocab_size": 2,
|
23 |
+
"use_cache": true,
|
24 |
+
"vocab_size": 30522
|
25 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.4.1",
|
4 |
+
"transformers": "4.48.2",
|
5 |
+
"pytorch": "2.5.1+cu124"
|
6 |
+
},
|
7 |
+
"prompts": {
|
8 |
+
"query": "Represent this sentence for searching relevant passages: "
|
9 |
+
},
|
10 |
+
"default_prompt_name": null,
|
11 |
+
"similarity_fn_name": "cosine"
|
12 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:007afa097a0e16fe9d16b2a3c72b85a55d3e55e96c83773daaa93d600731967e
|
3 |
+
size 1336413848
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"extra_special_tokens": {},
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"max_length": 512,
|
50 |
+
"model_max_length": 512,
|
51 |
+
"pad_to_multiple_of": null,
|
52 |
+
"pad_token": "[PAD]",
|
53 |
+
"pad_token_type_id": 0,
|
54 |
+
"padding_side": "right",
|
55 |
+
"sep_token": "[SEP]",
|
56 |
+
"stride": 0,
|
57 |
+
"strip_accents": null,
|
58 |
+
"tokenize_chinese_chars": true,
|
59 |
+
"tokenizer_class": "BertTokenizer",
|
60 |
+
"truncation_side": "right",
|
61 |
+
"truncation_strategy": "longest_first",
|
62 |
+
"unk_token": "[UNK]"
|
63 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|