sebastiancgeorge commited on
Commit
330e2c5
·
verified ·
1 Parent(s): 3f71956

Upload 12 files

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/T24.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
5
+ <option name="ignoredErrors">
6
+ <list>
7
+ <option value="N803" />
8
+ <option value="N802" />
9
+ <option value="N806" />
10
+ <option value="N801" />
11
+ </list>
12
+ </option>
13
+ </inspection_tool>
14
+ </profile>
15
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/T24.iml" filepath="$PROJECT_DIR$/.idea/T24.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/workspace.xml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="15e16f15-f7a0-47bd-8da7-9e3e2bf12e47" name="Changes" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="MarkdownSettingsMigration">
14
+ <option name="stateVersion" value="1" />
15
+ </component>
16
+ <component name="ProjectId" id="2l1D3W1d2Nr45X1uRVePz8Qnu8H" />
17
+ <component name="ProjectViewState">
18
+ <option name="hideEmptyMiddlePackages" value="true" />
19
+ <option name="showLibraryContents" value="true" />
20
+ </component>
21
+ <component name="PropertiesComponent">{
22
+ &quot;keyToString&quot;: {
23
+ &quot;RunOnceActivity.OpenProjectViewOnStart&quot;: &quot;true&quot;,
24
+ &quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
25
+ &quot;last_opened_file_path&quot;: &quot;D:/SEBASTIAN GEORGE/S5/f/EEg&quot;,
26
+ &quot;settings.editor.selected.configurable&quot;: &quot;com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable&quot;
27
+ }
28
+ }</component>
29
+ <component name="RunManager">
30
+ <configuration name="dataCollection" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true">
31
+ <module name="T24" />
32
+ <option name="INTERPRETER_OPTIONS" value="" />
33
+ <option name="PARENT_ENVS" value="true" />
34
+ <envs>
35
+ <env name="PYTHONUNBUFFERED" value="1" />
36
+ </envs>
37
+ <option name="SDK_HOME" value="" />
38
+ <option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
39
+ <option name="IS_MODULE_SDK" value="true" />
40
+ <option name="ADD_CONTENT_ROOTS" value="true" />
41
+ <option name="ADD_SOURCE_ROOTS" value="true" />
42
+ <option name="SCRIPT_NAME" value="D:\SEBASTIAN GEORGE\S5\f\T24\dataCollection.py" />
43
+ <option name="PARAMETERS" value="" />
44
+ <option name="SHOW_COMMAND_LINE" value="false" />
45
+ <option name="EMULATE_TERMINAL" value="false" />
46
+ <option name="MODULE_MODE" value="false" />
47
+ <option name="REDIRECT_INPUT" value="false" />
48
+ <option name="INPUT_FILE" value="" />
49
+ <method v="2" />
50
+ </configuration>
51
+ </component>
52
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
53
+ <component name="SvnConfiguration">
54
+ <configuration>C:\Users\user\AppData\Roaming\Subversion</configuration>
55
+ </component>
56
+ <component name="TaskManager">
57
+ <task active="true" id="Default" summary="Default task">
58
+ <changelist id="15e16f15-f7a0-47bd-8da7-9e3e2bf12e47" name="Changes" comment="" />
59
+ <created>1724338491973</created>
60
+ <option name="number" value="Default" />
61
+ <option name="presentableId" value="Default" />
62
+ <updated>1724338491973</updated>
63
+ </task>
64
+ <servers />
65
+ </component>
66
+ </project>
.ipynb_checkpoints/test-checkpoint.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from cvzone.HandTrackingModule import HandDetector
3
+ from cvzone.ClassificationModule import Classifier
4
+ import numpy as np
5
+ import math
6
+ import pyttsx3
7
+ from collections import deque
8
+
9
+ # Initialize the text-to-speech engine
10
+ engine = pyttsx3.init()
11
+ voices = engine.getProperty('voices')
12
+ engine.setProperty('voice', voices[1].id)
13
+ rate = engine.getProperty('rate')
14
+ engine.setProperty('rate', 125)
15
+
16
+ cap = cv2.VideoCapture(0)
17
+ detector = HandDetector(maxHands=1)
18
+ classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
19
+ offset = 20
20
+ imgSize = 300
21
+ labels = ["A", "B", "C"]
22
+
23
+ # Deque to store the last few predictions
24
+ prediction_history = deque(maxlen=3)
25
+ last_prediction = None
26
+
27
+ while True:
28
+ try:
29
+ success, img = cap.read()
30
+ if not success:
31
+ print("Failed to capture image")
32
+ continue
33
+
34
+ imgOutput = img.copy()
35
+ hands, img = detector.findHands(img)
36
+ if hands:
37
+ hand = hands[0]
38
+ x, y, w, h = hand['bbox']
39
+ imgWhite = np.ones((imgSize, imgSize, 3), np.uint8) * 255
40
+ try:
41
+ imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]
42
+ if imgCrop.size == 0:
43
+ raise ValueError("Empty image crop detected")
44
+
45
+ imgCropShape = imgCrop.shape
46
+ aspectRatio = h / w
47
+
48
+ if aspectRatio > 1:
49
+ k = imgSize / h
50
+ wCal = math.ceil(k * w)
51
+ imgResize = cv2.resize(imgCrop, (wCal, imgSize))
52
+ imgResizeShape = imgResize.shape
53
+ wGap = math.ceil((imgSize - wCal) / 2)
54
+ imgWhite[:, wGap:wCal + wGap] = imgResize
55
+ else:
56
+ k = imgSize / w
57
+ hCal = math.ceil(k * h)
58
+ imgResize = cv2.resize(imgCrop, (imgSize, hCal))
59
+ imgResizeShape = imgResize.shape
60
+ hGap = math.ceil((imgSize - hCal) / 2)
61
+ imgWhite[hGap:hCal + hGap, :] = imgResize
62
+
63
+ try:
64
+ prediction, index = classifier.getPrediction(imgWhite, draw=False)
65
+ print(prediction, index)
66
+
67
+ prediction_history.append(labels[index])
68
+
69
+ if len(prediction_history) == 3 and len(set(prediction_history)) == 1 and prediction_history[1]!=last_prediction:
70
+ last_prediction=labels[index]
71
+ engine.say(labels[index])
72
+ engine.runAndWait()
73
+ prediction_history = deque(maxlen=3)
74
+
75
+ except Exception as e:
76
+ print("Error in classifier prediction:", e)
77
+
78
+ except cv2.error as e:
79
+ print("OpenCV error:", e)
80
+ except ValueError as e:
81
+ print(e)
82
+
83
+ cv2.rectangle(imgOutput, (x - offset, y - offset - 50),
84
+ (x - offset + 90, y - offset - 50 + 50), (255, 0, 255), cv2.FILLED)
85
+ cv2.putText(imgOutput, labels[index], (x, y - 26), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
86
+ cv2.rectangle(imgOutput, (x - offset, y - offset),
87
+ (x + w + offset, y + h + offset), (255, 0, 255), 4)
88
+ cv2.imshow("ImageCrop", imgCrop)
89
+ cv2.imshow("ImageWhite", imgWhite)
90
+
91
+ cv2.imshow("Image", imgOutput)
92
+ cv2.waitKey(1)
93
+
94
+ except Exception as e:
95
+ print("Unexpected error:", e)
Model/keras_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f7c8e1dcb153a962e7b6048597b74f2fe8b559f2bb9b970b2c50bccc92312a
3
+ size 2453432
Model/labels.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 0 A
2
+ 1 B
3
+ 2 C
dataCollection.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from cvzone.HandTrackingModule import HandDetector
3
+ import numpy as np
4
+ import math
5
+ import time
6
+
7
+ cap = cv2.VideoCapture(0)
8
+ detector = HandDetector(maxHands=1)
9
+ offset = 20
10
+ imgSize = 300
11
+ folder = "Data/Hi"
12
+ counter = 0
13
+
14
+ while True:
15
+ success, img = cap.read()
16
+ if not success:
17
+ print("Failed to capture image")
18
+ continue
19
+
20
+ hands, img = detector.findHands(img)
21
+ if hands:
22
+ hand = hands[0]
23
+ x, y, w, h = hand['bbox']
24
+ imgWhite = np.ones((imgSize, imgSize, 3), np.uint8) * 255
25
+ try:
26
+ imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]
27
+ if imgCrop.size == 0:
28
+ raise ValueError("Empty image crop detected")
29
+
30
+ imgCropShape = imgCrop.shape
31
+ aspectRatio = h / w
32
+ if aspectRatio > 1:
33
+ k = imgSize / h
34
+ wCal = math.ceil(k * w)
35
+ imgResize = cv2.resize(imgCrop, (wCal, imgSize))
36
+ imgResizeShape = imgResize.shape
37
+ wGap = math.ceil((imgSize - wCal) / 2)
38
+ imgWhite[:, wGap:wCal + wGap] = imgResize
39
+ else:
40
+ k = imgSize / w
41
+ hCal = math.ceil(k * h)
42
+ imgResize = cv2.resize(imgCrop, (imgSize, hCal))
43
+ imgResizeShape = imgResize.shape
44
+ hGap = math.ceil((imgSize - hCal) / 2)
45
+ imgWhite[hGap:hCal + hGap, :] = imgResize
46
+
47
+ cv2.imshow("ImageCrop", imgCrop)
48
+ cv2.imshow("ImageWhite", imgWhite)
49
+
50
+ except cv2.error as e:
51
+ print("OpenCV error:", e)
52
+ except ValueError as e:
53
+ print(e)
54
+
55
+ cv2.imshow("Image", img)
56
+ key = cv2.waitKey(1)
57
+ if key == ord("s"):
58
+ if imgCrop.size == 0:
59
+ continue
60
+ counter += 1
61
+ cv2.imwrite(f'{folder}/Image_{time.time()}.jpg', imgWhite)
62
+ print(counter)
test.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from cvzone.HandTrackingModule import HandDetector
3
+ from cvzone.ClassificationModule import Classifier
4
+ import numpy as np
5
+ import math
6
+ import pyttsx3
7
+ from collections import deque
8
+
9
+ # Initialize the text-to-speech engine
10
+ engine = pyttsx3.init()
11
+ voices = engine.getProperty('voices')
12
+ engine.setProperty('voice', voices[1].id)
13
+ rate = engine.getProperty('rate')
14
+ engine.setProperty('rate', 125)
15
+
16
+ cap = cv2.VideoCapture(0)
17
+ detector = HandDetector(maxHands=1)
18
+ classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
19
+ offset = 20
20
+ imgSize = 300
21
+ labels = ["A", "B", "C"]
22
+
23
+ # Deque to store the last few predictions
24
+ prediction_history = deque(maxlen=3)
25
+ last_prediction = None
26
+
27
+ while True:
28
+ try:
29
+ success, img = cap.read()
30
+ if not success:
31
+ print("Failed to capture image")
32
+ continue
33
+
34
+ imgOutput = img.copy()
35
+ hands, img = detector.findHands(img)
36
+ if hands:
37
+ hand = hands[0]
38
+ x, y, w, h = hand['bbox']
39
+ imgWhite = np.ones((imgSize, imgSize, 3), np.uint8) * 255
40
+ try:
41
+ imgCrop = img[y - offset:y + h + offset, x - offset:x + w + offset]
42
+ if imgCrop.size == 0:
43
+ raise ValueError("Empty image crop detected")
44
+
45
+ imgCropShape = imgCrop.shape
46
+ aspectRatio = h / w
47
+
48
+ if aspectRatio > 1:
49
+ k = imgSize / h
50
+ wCal = math.ceil(k * w)
51
+ imgResize = cv2.resize(imgCrop, (wCal, imgSize))
52
+ imgResizeShape = imgResize.shape
53
+ wGap = math.ceil((imgSize - wCal) / 2)
54
+ imgWhite[:, wGap:wCal + wGap] = imgResize
55
+ else:
56
+ k = imgSize / w
57
+ hCal = math.ceil(k * h)
58
+ imgResize = cv2.resize(imgCrop, (imgSize, hCal))
59
+ imgResizeShape = imgResize.shape
60
+ hGap = math.ceil((imgSize - hCal) / 2)
61
+ imgWhite[hGap:hCal + hGap, :] = imgResize
62
+
63
+ try:
64
+ prediction, index = classifier.getPrediction(imgWhite, draw=False)
65
+ print(prediction, index)
66
+
67
+ prediction_history.append(labels[index])
68
+
69
+ if len(prediction_history) == 3 and len(set(prediction_history)) == 1 and prediction_history[1]!=last_prediction:
70
+ last_prediction=labels[index]
71
+ engine.say(labels[index])
72
+ engine.runAndWait()
73
+ prediction_history = deque(maxlen=3)
74
+
75
+ except Exception as e:
76
+ print("Error in classifier prediction:", e)
77
+
78
+ except cv2.error as e:
79
+ print("OpenCV error:", e)
80
+ except ValueError as e:
81
+ print(e)
82
+
83
+ cv2.rectangle(imgOutput, (x - offset, y - offset - 50),
84
+ (x - offset + 90, y - offset - 50 + 50), (255, 0, 255), cv2.FILLED)
85
+ cv2.putText(imgOutput, labels[index], (x, y - 26), cv2.FONT_HERSHEY_COMPLEX, 1.7, (255, 255, 255), 2)
86
+ cv2.rectangle(imgOutput, (x - offset, y - offset),
87
+ (x + w + offset, y + h + offset), (255, 0, 255), 4)
88
+ cv2.imshow("ImageCrop", imgCrop)
89
+ cv2.imshow("ImageWhite", imgWhite)
90
+
91
+ cv2.imshow("Image", imgOutput)
92
+ cv2.waitKey(1)
93
+
94
+ except Exception as e:
95
+ print("Unexpected error:", e)