yogeshjat commited on
Commit
ccab347
·
1 Parent(s): fddb930
Files changed (4) hide show
  1. .github/workflows/deploy.yml +28 -0
  2. app.py +7 -0
  3. model.py +32 -0
  4. requirements.txt +5 -0
.github/workflows/deploy.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy AI Model
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main # ya jo bhi branch tum use kar rahe ho
7
+
8
+ jobs:
9
+ deploy:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout repository
14
+ uses: actions/checkout@v3
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9' # Ya jo version tum use kar rahe ho
20
+
21
+ - name: Install dependencies
22
+ run: |
23
+ python -m pip install --upgrade pip
24
+ pip install -r requirements.txt
25
+
26
+ - name: Run the model deployment script
27
+ run: |
28
+ python model.py # Tumhare model code ko run karo
app.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from model import prediction
3
+
4
+ def predict(text):
5
+ return prediction(text)
6
+
7
+ gr.Interface(fn=predict, inputs="text", outputs="text").launch()
model.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer
2
+ from sklearn.metrics.pairwise import cosine_similarity
3
+
4
+ def prediction(file_paths,uploaded_text):
5
+ sentences = []
6
+ for file_path in file_paths:
7
+ with open(file_path, 'r') as file:
8
+ text = file.read()
9
+ sentences.append(text)
10
+
11
+ sentences.append(uploaded_text)
12
+
13
+ # Load the pre-trained BERT-based model
14
+ model_name = 'sentence-transformers/bert-base-nli-mean-tokens'
15
+ model = SentenceTransformer(model_name)
16
+
17
+ # Use the model to encode the sentences
18
+ sentence_embeddings = model.encode(sentences)
19
+
20
+ # Calculate cosine similarity between the uploaded text and the rest of the sentence embeddings
21
+ query_embedding = sentence_embeddings[-1] # Uploaded text embedding
22
+ candidate_embeddings = sentence_embeddings[:-1] # Embeddings of other files
23
+ cosine_similarities = cosine_similarity([query_embedding], candidate_embeddings)[0]
24
+
25
+ # Combine the candidate sentences with their corresponding cosine similarities
26
+ predictions = list(zip(file_paths, cosine_similarities))
27
+
28
+ # Sort the predictions based on cosine similarity (highest to lowest)
29
+ predictions.sort(key=lambda x: x[1], reverse=True)
30
+
31
+ return predictions
32
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers==4.47.1
2
+ torch==2.5.1
3
+ sentence-transformers==3.3.1
4
+ scikit-learn==1.6.0
5
+ gradio==4.26.0