File size: 1,482 Bytes
43d5a53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
import joblib

# Load the dataset from the txt file
data_path = 'trainingdata.txt'
data = []

# Read the file and parse the data
with open(data_path, 'r') as file:
    lines = file.readlines()
    for line in lines:
        # Split each line into question and tool by the last comma
        parts = line.rsplit(', "', 1)
        if len(parts) == 2:
            question = parts[0].strip().strip('"')
            tool = parts[1].strip().strip('",')
            data.append((question, tool))

# Create a DataFrame
df = pd.DataFrame(data, columns=['question', 'tool'])

# Split the data
X_train, X_test, y_train, y_test = train_test_split(df['question'], df['tool'], test_size=0.2, random_state=42)

# Vectorize the text data
vectorizer = TfidfVectorizer()
X_train_vectorized = vectorizer.fit_transform(X_train)
X_test_vectorized = vectorizer.transform(X_test)

# Train a Naive Bayes classifier
clf = MultinomialNB()
clf.fit(X_train_vectorized, y_train)

# Make predictions
y_pred = clf.predict(X_test_vectorized)

# Print the classification report
print(classification_report(y_test, y_pred))

# Save the model and vectorizer
joblib.dump(clf, 'findtool_model.pkl')
joblib.dump(vectorizer, 'vectorizer.pkl')