Sakil commited on
Commit
aeca7d5
·
1 Parent(s): 568b6f7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -1
README.md CHANGED
@@ -25,4 +25,49 @@ pipeline_tag: sentence-similarity
25
 
26
  # Application:
27
  * This model is useful for the semantic search,sentence similarity,recommendation system.
28
- * You can fine-tune this model for your particular use cases.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Application:
27
  * This model is useful for the semantic search,sentence similarity,recommendation system.
28
+ * You can fine-tune this model for your particular use cases.
29
+
30
+ # Model Implementation
31
+
32
+ # pip install -U sentence-transformers
33
+
34
+ from sentence_transformers import SentenceTransformer, InputExample, losses
35
+ import pandas as pd
36
+ from sentence_transformers import SentenceTransformer, InputExample
37
+ from torch.utils.data import DataLoader
38
+ from sentence_transformers import SentenceTransformer, util
39
+
40
+ model_name="Sakil/sentence_similarity_semantic_search"
41
+
42
+ sentences = ['A man is eating food.',
43
+ 'A man is eating a piece of bread.',
44
+ 'The girl is carrying a baby.',
45
+ 'A man is riding a horse.',
46
+ 'A woman is playing violin.',
47
+ 'Two men pushed carts through the woods.',
48
+ 'A man is riding a white horse on an enclosed ground.',
49
+ 'A monkey is playing drums.',
50
+ 'Someone in a gorilla costume is playing a set of drums.'
51
+ ]
52
+
53
+ #Encode all sentences
54
+ embeddings = model.encode(sentences)
55
+
56
+ #Compute cosine similarity between all pairs
57
+ cos_sim = util.cos_sim(embeddings, embeddings)
58
+
59
+ #Add all pairs to a list with their cosine similarity score
60
+ all_sentence_combinations = []
61
+ for i in range(len(cos_sim)-1):
62
+ for j in range(i+1, len(cos_sim)):
63
+ all_sentence_combinations.append([cos_sim[i][j], i, j])
64
+
65
+ #Sort list by the highest cosine similarity score
66
+ all_sentence_combinations = sorted(all_sentence_combinations, key=lambda x: x[0], reverse=True)
67
+
68
+ print("Top-5 most similar pairs:")
69
+ for score, i, j in all_sentence_combinations[0:5]:
70
+ print("{} \t {} \t {:.4f}".format(sentences[i], sentences[j], cos_sim[i][j]))
71
+
72
+
73
+ # Github: [Sakil Ansari](https://github.com/Sakil786/hate_speech_detection_pretrained_model)