Reputation: 91
I've Been Recently Learning Computer Vision using python, and when making a hand detector project, I encountered this error :-
Traceback (most recent call last):
File "c:\Users\idhant\OneDrive - 007lakshya\Idhant\Programming\Projects\MY MACHINE
LEARNING PROJECTS\Hand Tracking Module.py", line 64, in <module>
main()
File "c:\Users\idhant\OneDrive - 007lakshya\Idhant\Programming\Projects\MY MACHINE
LEARNING PROJECTS\Hand Tracking Module.py", line 41, in main
detector = handDetector()
File "c:\Users\idhant\OneDrive - 007lakshya\Idhant\Programming\Projects\MY MACHINE
LEARNING PROJECTS\Hand Tracking Module.py", line 13, in __init__
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon,
self.trackCon)
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solutions\hands.py", line 114, in __init__
super().__init__(
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solution_base.py", line 258, in __init__
self._input_side_packets = {
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solution_base.py", line 259, in <dictcomp>
name: self._make_packet(self._side_input_type_info[name], data)
File "C:\Users\idhant\AppData\Roaming\Python\Python39\site-
packages\mediapipe\python\solution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_int(): incompatible function arguments. The following argument types
are supported:
1. (arg0: int) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5
[ WARN:0] global D:\a\opencv-python\opencv-
python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-
namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
And I tried Very Much Debugging it, But Not Successfull :(, So Please Help me, Here's The Code That I've Written:-
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands = 2, detectionCon=0.5, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon,
self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
# for id, lm in enumerate(handLms.landmark):
# # print(id, lm)
# h, w, c = img.shape
# cx, cy = int(lm.x*w), int(lm.y*h)
# print(id, cx, cy)
# # if id == 4:
# cv2.circle(img, (cx, cy), 15, (255,0,255), cv2.FILLED)
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img, str(int(fps)),(10, 70), cv2.FONT_HERSHEY_COMPLEX, 3, (255,0,255),3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
I've Tried Making A Class of Hand Detector, Which Does the same thing to detect hand but we can also use it in our other files, That's why I've written this code, and encountered this issue!
Upvotes: 9
Views: 19295
Reputation: 21
Solution:
def __init__(self, mode=False, maxHands = 2, detectionCon=0.5, trackCon = 0.5):
The problem is with this line 6 of the code. The library is updated, you have to initialize an integer value to "detectionCon". for best results initialize detectionCon = 1 and trackCon = 0.5.
If this solves your problem upvote so others can see it easily.
Upvotes: 2
Reputation:
It's a problem of indentation since findHands() needs to be part of the class handDetector(). So just tab findHands() and it should work
Upvotes: 1
Reputation: 560
I was facing the same issue, just add model_complexity
to your init
function and you are good to go:
def __init__(self, mode=False, model_complexity=1, upBody=False, smooth=True, detectionCon=0.5, trackCon=0.5):
Upvotes: 4
Reputation: 65
I don't know if it helps by now or probably for future reference. this is a known issues with the last version of mediapipe. revert to version 0.8.8 will solve the problem.
Upvotes: 1
Reputation: 321
In the def __init__()
, at the code:
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
try adding model complexity for the third parameter in the Hands()
as below:
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex, self.detectionCon, self.trackCon)
So a total of five parameters in the self.mpHands.Hands()
Here is my full code that works for me:
class handDetector():
def __init__(self, mode=False, maxHands=1, modelComplexity=1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.modelComplex = modelComplexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.modelComplex,
self.detectionCon, self.trackCon)
Upvotes: 22
Reputation: 314
You need to assign one more parameter in the __init__
method of the handDetector()
class.
Your complete code may look like:
import cv2
import mediapipe as mp
import time
# class creation
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5,modelComplexity=1,trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.modelComplex = modelComplexity
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.modelComplex,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils # it gives small dots onhands total 20 landmark points
def findHands(self,img,draw=True):
# Send rgb image to hands
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB) # process the frame
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
#Draw dots and connect them
self.mpDraw.draw_landmarks(img,handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self,img, handNo=0, draw=True):
"""Lists the position/type of landmarks
we give in the list and in the list ww have stored
type and position of the landmarks.
List has all the lm position"""
lmlist = []
# check wether any landmark was detected
if self.results.multi_hand_landmarks:
#Which hand are we talking about
myHand = self.results.multi_hand_landmarks[handNo]
# Get id number and landmark information
for id, lm in enumerate(myHand.landmark):
# id will give id of landmark in exact index number
# height width and channel
h,w,c = img.shape
#find the position
cx,cy = int(lm.x*w), int(lm.y*h) #center
# print(id,cx,cy)
lmlist.append([id,cx,cy])
# Draw circle for 0th landmark
if draw:
cv2.circle(img,(cx,cy), 15 , (255,0,255), cv2.FILLED)
return lmlist
def main():
#Frame rates
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = handDetector()
while True:
success,img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,str(int(fps)),(10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
cv2.imshow("Video",img)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
Upvotes: 4