Upload 6 files
Browse files- bot.py +58 -0
- chatbotmodel.h5 +3 -0
- classes.pkl +3 -0
- intents.json +109 -0
- train.py +87 -0
- words.pkl +3 -0
bot.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nltk
|
2 |
+
import random
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
import pickle
|
6 |
+
from nltk.stem import WordNetLemmatizer
|
7 |
+
from tensorflow.keras.models import load_model
|
8 |
+
|
9 |
+
lemmatizer = WordNetLemmatizer()
|
10 |
+
|
11 |
+
with open('intents.json') as json_file:
|
12 |
+
intents = json.load(json_file)
|
13 |
+
|
14 |
+
words = pickle.load(open('words.pkl', 'rb'))
|
15 |
+
classes = pickle.load(open('classes.pkl', 'rb'))
|
16 |
+
model = load_model('chatbotmodel.h5')
|
17 |
+
|
18 |
+
def clean_up_sentence(sentence):
|
19 |
+
sentence_words = nltk.word_tokenize(sentence)
|
20 |
+
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
|
21 |
+
return sentence_words
|
22 |
+
|
23 |
+
def bag_of_words(sentence):
|
24 |
+
sentence_words = clean_up_sentence(sentence)
|
25 |
+
bag = [0] * len(words)
|
26 |
+
for w in sentence_words:
|
27 |
+
for i, word in enumerate(words):
|
28 |
+
if word == w:
|
29 |
+
bag[i] = 1
|
30 |
+
return np.array(bag)
|
31 |
+
|
32 |
+
def predict_class(sentence):
|
33 |
+
bow = bag_of_words(sentence)
|
34 |
+
res = model.predict(np.array([bow]))[0]
|
35 |
+
ERROR_THRESHOLD = 0.25
|
36 |
+
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
|
37 |
+
|
38 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
39 |
+
return_list = []
|
40 |
+
for r in results:
|
41 |
+
return_list.append({'intent': classes[r[0]], 'probability': str(r[1])})
|
42 |
+
return return_list
|
43 |
+
|
44 |
+
def get_response(intents_list, intents_json):
|
45 |
+
tag = intents_list[0]['intent']
|
46 |
+
list_of_intents = intents_json['intents']
|
47 |
+
for i in list_of_intents:
|
48 |
+
if i['tag'] == tag:
|
49 |
+
result = random.choice(i['responses'])
|
50 |
+
break
|
51 |
+
return result
|
52 |
+
|
53 |
+
def chat(text):
|
54 |
+
ints = predict_class(text)
|
55 |
+
res = get_response(ints, intents)
|
56 |
+
return res
|
57 |
+
|
58 |
+
print("GO! BOT IS RUNNING")
|
chatbotmodel.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0703f08768ffca2457cf3690e9a8f18ea2dec3127d985af5f45b4d9eb5fa5ebc
|
3 |
+
size 242776
|
classes.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e1f79d3640efc82efba391235ce3272f75910e2864507eed926d6093c10ac2c
|
3 |
+
size 276
|
intents.json
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"intents":
|
2 |
+
[
|
3 |
+
{
|
4 |
+
"tag":"greetings",
|
5 |
+
"patterns": ["hello","hey","hi","good day","greetings","what's up?","how is it going"],
|
6 |
+
"responses":["hello","hey!","what can i do for you?"]
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"tag":"goodbye",
|
10 |
+
"patterns": ["cya","see you later","goodbye","have a good day","bye","cao","see ya"],
|
11 |
+
"responses":["have a nice day","goodbye"]
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"tag":"age",
|
15 |
+
"patterns": ["how old","how old are you?","what is your age","how old are you","age?"],
|
16 |
+
"responses":["I get reborn after every compilation","hey!","my owners are averagely 20 years!"]
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"tag":"name",
|
20 |
+
"patterns": ["what is your name","what should i call you","what's your name?","who are you?","can you tell me your name"],
|
21 |
+
"responses":["you can call me Medbot!","i am Medbot!","i am Medbot your medical assistant"]
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"tag":"common cold symptoms",
|
25 |
+
"patterns": ["Runny or stuffy nose",
|
26 |
+
"Sore throat",
|
27 |
+
"Cough",
|
28 |
+
"Congestion",
|
29 |
+
"Slight body aches or a mild headache",
|
30 |
+
"Sneezing",
|
31 |
+
"Low-grade fever",
|
32 |
+
"Generally feeling unwell (malaise)"],
|
33 |
+
"responses":["It seems that you are suffering from common cold"]
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"tag":"fever symptoms",
|
37 |
+
"patterns":["Sweating",
|
38 |
+
"Chills and shivering",
|
39 |
+
"Headache",
|
40 |
+
"Muscle aches",
|
41 |
+
"Loss of appetite",
|
42 |
+
"Irritability",
|
43 |
+
"Dehydration",
|
44 |
+
"General weakness"],
|
45 |
+
"responses":["It seems that you are suffering from fever"]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"tag":"Diabetes symptoms",
|
49 |
+
"patterns":["increased hunger",
|
50 |
+
"increased thirst",
|
51 |
+
"weight loss",
|
52 |
+
"frequent urination",
|
53 |
+
"blurry vision",
|
54 |
+
"extreme fatigue"],
|
55 |
+
"responses":["It seems that you are suffering from Diabetes"]
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"tag":"Depression symptoms",
|
59 |
+
"patterns":["Hopeless outlook",
|
60 |
+
"Lost interest",
|
61 |
+
"Increased fatigue",
|
62 |
+
"sleep problem",
|
63 |
+
"Anxiety",
|
64 |
+
"change in weight",
|
65 |
+
"Looking at death"],
|
66 |
+
"responses":["It seem that you are suffering from depression"]
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"tag":"Asthma symptoms",
|
70 |
+
"patterns":["coughing",
|
71 |
+
"tightness in the chest",
|
72 |
+
"shortness of breath",
|
73 |
+
"difficulty talking",
|
74 |
+
"panic",
|
75 |
+
"fatigue"],
|
76 |
+
"responses":["It seem that you are suffering from Asthma"]
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"tag":"common cold prevention",
|
80 |
+
"patterns":["What medicines can I buy to help me with my common cold?","tell me some prevention method from common cold","What should I eat or drink if i am suffering from common cold?","How can I keep from getting a cold or the flu?"],
|
81 |
+
"responses":["medicines you can consume : Dextromethorphan,Decongestant,Diphenhydramine,Crocin Cold & Flu Max, preventions that you must follow :Wash your hands,Avoid touching your face,Clean frequently used surfaces,Use hand sanitizers,SUGGESTED FOODS ARE:Garlic,Vitamin C–containing fruits,Leafy greens,Broccoli,Oatmeal,Spices,Chicken Soup"]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"tag":"fever prevention",
|
85 |
+
"patterns":["What medicines can I buy to help me with my fever?","tell me some prevention method from fever","What should I eat or drink if i am suffering from fever?","How can I keep from getting a fever?"],
|
86 |
+
"responses":["medicines you can consume : acetaminophen ,ibuprofen,aspirin,Crocin Cold & Flu Max, prevention that you must follow :Wash your hands,Cover your mouth when you cough and your nose when you sneeze,Clean frequently used surfaces,Avoid sharing cups, glasses, and eating utensils with other people.,SUGGESTED FOODS ARE:Garlic,Vitamin C–containing fruits,Leafy greens,Broccoli,Oatmeal,Spices,Chicken Soup"]
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"tag":"diabetes prevention",
|
90 |
+
"patterns":["What medicines can I buy to help me with my diabetes?","tell me some prevention method from diabetes","What should I eat or drink if i am suffering from diabetes?","How can I keep from getting diabetes?"],
|
91 |
+
"responses":["medicines you can consume : Insulin ,Amylinomimetic drug,Dipeptidyl peptidase-4 (DPP-4) inhibitor, prevention that you must follow :Cut Sugar and Refined Carbs From Your Diet,Work Out Regularly,Drink Water as Your Primary Beverage,Lose Weight If You’re Overweight or Obese,Quit Smoking, Follow a Very-Low-Carb Diet,Watch Portion Sizes,SUGGESTED FOODS ARE:Leafy greens,Avocados,Eggs"]
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"tag":"depression prevention",
|
95 |
+
"patterns":["What medicines can I buy to help me with my depression?","tell me some prevention method from depression","What should I eat or drink if i am suffering from depression?","How can I keep from getting depression?"],
|
96 |
+
"responses":["medicines you can consume : brexpiprazole, quetiapine,olanzapine, prevention that you must follow :Exercise regularly,Cut back on social media time,Drink Water as Your Primary Beverage,Build strong relationships,Minimize your daily choices, Follow a Very-Low-Carb Diet,SUGGESTED FOODS ARE:Get Enough Vitamin D,Include Omega-3 Fatty Acids,Beans and legumes"]
|
97 |
+
},
|
98 |
+
{
|
99 |
+
"tag":"asthma prevention",
|
100 |
+
"patterns":["What medicines can I buy to help me with my asthma?","tell me some prevention method from asthma","What should I eat or drink if i am suffering from asthma?","How can I keep from getting asthma?"],
|
101 |
+
"responses":["medicines you can consume : epinephrine,anticholinergic,Proair HFA, prevention that you must follow : Identify Asthma Triggers, Stay Away From Allergens,Avoid Smoke of Any Type,SUGGESTED FOODS ARE:carrots,juice,eggs,broccoli,cantaloupe,milk"]
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"tag":"Consultation",
|
105 |
+
"patterns": ["who should i contact for consultation?","is there any doctor available?","can you give me some suggestions for doctor consultations?","can you set up a meeting with a doctor for consultation?","is there any doctor available for consultation"],
|
106 |
+
"responses":["You can contact various doctors here for any kind of consultation: 1. https://www.1mg.com/online-doctor-consultation, 2. https://www.tatahealth.com/online-doctor-consultation/general-physician, 3. https://www.doconline.com/, or you can pay a visit to your local area doctor or family doctor."]
|
107 |
+
}
|
108 |
+
]}
|
109 |
+
|
train.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import nltk
|
2 |
+
import random
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
import pickle
|
6 |
+
from nltk.stem import WordNetLemmatizer
|
7 |
+
from tensorflow.keras.models import Sequential
|
8 |
+
from tensorflow.keras.layers import Dense, Dropout
|
9 |
+
from tensorflow.keras.optimizers import SGD
|
10 |
+
|
11 |
+
lemmatizer = WordNetLemmatizer()
|
12 |
+
|
13 |
+
# Load the intents file
|
14 |
+
with open('intents.json') as json_file:
|
15 |
+
intents = json.load(json_file)
|
16 |
+
|
17 |
+
# Initialize lists
|
18 |
+
words = []
|
19 |
+
classes = []
|
20 |
+
documents = []
|
21 |
+
ignore_words = ['?', '!']
|
22 |
+
|
23 |
+
# Process the intents
|
24 |
+
for intent in intents['intents']:
|
25 |
+
for pattern in intent['patterns']:
|
26 |
+
word_list = nltk.word_tokenize(pattern)
|
27 |
+
words.extend(word_list)
|
28 |
+
documents.append((word_list, intent['tag']))
|
29 |
+
if intent['tag'] not in classes:
|
30 |
+
classes.append(intent['tag'])
|
31 |
+
|
32 |
+
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
|
33 |
+
words = sorted(list(set(words)))
|
34 |
+
classes = sorted(list(set(classes)))
|
35 |
+
|
36 |
+
pickle.dump(words, open('words.pkl', 'wb'))
|
37 |
+
pickle.dump(classes, open('classes.pkl', 'wb'))
|
38 |
+
|
39 |
+
training = []
|
40 |
+
output_empty = [0] * len(classes)
|
41 |
+
|
42 |
+
# Debugging: Print lengths of words and classes
|
43 |
+
print(f'Number of words: {len(words)}')
|
44 |
+
print(f'Number of classes: {len(classes)}')
|
45 |
+
|
46 |
+
for doc in documents:
|
47 |
+
bag = []
|
48 |
+
pattern_words = doc[0]
|
49 |
+
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
|
50 |
+
for word in words:
|
51 |
+
bag.append(1) if word in pattern_words else bag.append(0)
|
52 |
+
output_row = list(output_empty)
|
53 |
+
output_row[classes.index(doc[1])] = 1
|
54 |
+
training.append([bag, output_row])
|
55 |
+
|
56 |
+
# Debugging: Check for inconsistencies in training data
|
57 |
+
for i, t in enumerate(training):
|
58 |
+
if len(t[0]) != len(words):
|
59 |
+
print(f'Inconsistent length in training data at index {i}: {len(t[0])} != {len(words)}')
|
60 |
+
|
61 |
+
random.shuffle(training)
|
62 |
+
training = np.array(training, dtype=object)
|
63 |
+
|
64 |
+
# Debugging: Print shape of training data
|
65 |
+
print(f'Training data shape: {training.shape}')
|
66 |
+
|
67 |
+
train_x = list(training[:, 0])
|
68 |
+
train_y = list(training[:, 1])
|
69 |
+
|
70 |
+
# Debugging: Print shapes of train_x and train_y
|
71 |
+
print(f'Shape of train_x: {np.array(train_x).shape}')
|
72 |
+
print(f'Shape of train_y: {np.array(train_y).shape}')
|
73 |
+
|
74 |
+
model = Sequential()
|
75 |
+
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
|
76 |
+
model.add(Dropout(0.5))
|
77 |
+
model.add(Dense(64, activation='relu'))
|
78 |
+
model.add(Dropout(0.5))
|
79 |
+
model.add(Dense(len(train_y[0]), activation='softmax'))
|
80 |
+
|
81 |
+
sgd = SGD(learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True)
|
82 |
+
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
|
83 |
+
|
84 |
+
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
|
85 |
+
model.save('chatbotmodel.h5', hist)
|
86 |
+
|
87 |
+
print("Model trained and saved.")
|
words.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3bf55e8b53f87caf9396d47a0c70a9bc1deab9ec687bcb269950314a75194747
|
3 |
+
size 1126
|