-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgesture.py
144 lines (119 loc) · 4.87 KB
/
gesture.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# import the necessary packages
from imutils.video import VideoStream
import datetime
import argparse
import imutils
import time
import cv2
import math
import numpy as np
import RPi.GPIO as GPIO
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# initialize the video stream and allow the cammera sensor to warmup
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
while True:
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
# grab the raw NumPy array representing the image
frame = vs.read()
img = imutils.resize(frame, width=1000)
cv2.Flip(img,flipMode=-1)
# get hand data from the rectangle sub window on the screen
#ret, img = cap.read()
cv2.rectangle(img, (300,300), (100,100), (0,255,0))
crop_img = img[100:300, 100:300]
# convert to grayscale
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# applying gaussian blur
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
# thresholdin: Otsu's Binarization method
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# show thresholded image
#cv2.imshow('Thresholded', thresh1)
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# find contour with max area
if contours != 0:
cnt = max(contours, key = lambda x: cv2.contourArea(x))
# create bounding rectangle around the contour (can skip below two lines)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img, (x, y), (x+w, y+h), (0, 0, 255), 2)
# finding convex hull
hull = cv2.convexHull(cnt)
# drawing contours
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0,(0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt, returnPoints=False)
# finding convexity defects
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defects
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
cv2.circle(crop_img, far, 1, [0,0,255], -1)
#dist = cv2.pointPolygonTest(cnt,far,True)
# draw a line from start to end i.e. the convex points (finger tips)
# (can skip this part)
cv2.line(crop_img,start, end, [0,255,0], 2)
#cv2.circle(crop_img,far,5,[0,0,255],-1)
# define actions required
if count_defects == 1:
cv2.putText(img,"1st function", (50, 50), cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)
GPIO.output(18, GPIO.HIGH)
time.sleep(1)
GPIO.output(18, GPIO.LOW)
GPIO.cleanup()
elif count_defects == 2:
str = "2nd function"
cv2.putText(img, str, (5, 50), cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)
GPIO.output(22, GPIO.HIGH)
time.sleep(1)
GPIO.output(22, GPIO.LOW)
GPIO.cleanup()
elif count_defects == 3:
cv2.putText(img,"Third fucntion", (50, 50), cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)
GPIO.output(9, GPIO.HIGH)
time.sleep(1)
GPIO.output(9, GPIO.LOW)
GPIO.cleanup()
elif count_defects == 4:
cv2.putText(img,"4th function", (50, 50), cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)
GPIO.output(5, GPIO.HIGH)
time.sleep(1)
GPIO.output(5, GPIO.LOW)
GPIO.cleanup()
else:
cv2.putText(img,"Welcome", (50, 50),\
cv2.FONT_HERSHEY_COMPLEX,1,(255,0,0),2)
# show appropriate images in windows
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
#cv2.imshow('Contours', all_img)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break