In this lesson we show how to interact with the GPIO pins on the NVIDIA Jetson Nano. The GPIO pins on the Jetson Nano have very limited current capability, so you must learn to use a PN2222 BJT transistor in order to control things like LED or other components. In this lesson we show how the Jetson Nano can be used to control a standard LED.
Tag Archives: Jetson Nano
AI on the Jetson Nano LESSON 40: Training Facial Recognition Models in OpenCV
In this lesson we learn to make training our AI Facial Recognition system simpler and more automated.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 | import face_recognition import cv2 import os print(cv2.__version__) Encodings=[] Names=[] j=0 image_dir='/home/pjm/Desktop/pyPro/faceRecognizer/demoImages/known' for root, dirs, files in os.walk(image_dir): print(files) for file in files: path=os.path.join(root,file) print(path) name=os.path.splitext(file)[0] print(name) person=face_recognition.load_image_file(path) encoding=face_recognition.face_encodings(person)[0] Encodings.append(encoding) Names.append(name) print(Names) font=cv2.FONT_HERSHEY_SIMPLEX image_dir='/home/pjm/Desktop/pyPro/faceRecognizer/demoImages/unknown' for root,dirs, files in os.walk(image_dir): for file in files: print(root) print(file) testImagePath=os.path.join(root,file) testImage=face_recognition.load_image_file(testImagePath) facePositions=face_recognition.face_locations(testImage) allEncodings=face_recognition.face_encodings(testImage,facePositions) testImage=cv2.cvtColor(testImage,cv2.COLOR_RGB2BGR) for (top,right,bottom,left),face_encoding in zip(facePositions,allEncodings): name='Unknown Person' matches=face_recognition.compare_faces(Encodings,face_encoding) if True in matches: first_match_index=matches.index(True) name=Names[first_match_index] cv2.rectangle(testImage,(left,top),(right,bottom),(0,0,255),2) cv2.putText(testImage,name,(left,top-6),font,.75,(0,255,255),2) cv2.imshow('Picture', testImage) cv2.moveWindow('Picture',0,0) if cv2.waitKey(0)==ord('q'): cv2.destroyAllWindows() |
AI ON THE JETSON NANO LESSON 28: Tracking Objects in OpenCV using Contours
In Lesson 27 we learned how to isolate an object of interest by masking it, and separating it from the background. In this lesson we learn how to track the object of interest by creating OpenCV contours, and then drawing a con tour or box around the object of interest in the original frame. This allows us to track objects of interest in real time on live videos. The video above takes you through the lesson step-by-step, and below is the demo code we developed during the lesson
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | import cv2 print(cv2.__version__) import numpy as np def nothing(x): pass cv2.namedWindow('Trackbars') cv2.moveWindow('Trackbars',1320,0) cv2.createTrackbar('hueLower', 'Trackbars',50,179,nothing) cv2.createTrackbar('hueUpper', 'Trackbars',100,179,nothing) cv2.createTrackbar('hue2Lower', 'Trackbars',50,179,nothing) cv2.createTrackbar('hue2Upper', 'Trackbars',100,179,nothing) cv2.createTrackbar('satLow', 'Trackbars',100,255,nothing) cv2.createTrackbar('satHigh', 'Trackbars',255,255,nothing) cv2.createTrackbar('valLow','Trackbars',100,255,nothing) cv2.createTrackbar('valHigh','Trackbars',255,255,nothing) dispW=640 dispH=480 flip=2 #Uncomment These next Two Line for Pi Camera camSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink' cam= cv2.VideoCapture(camSet) #Or, if you have a WEB cam, uncomment the next line #(If it does not work, try setting to '1' instead of '0') #cam=cv2.VideoCapture(0) while True: ret, frame = cam.read() #frame=cv2.imread('smarties.png') hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) hueLow=cv2.getTrackbarPos('hueLower', 'Trackbars') hueUp=cv2.getTrackbarPos('hueUpper', 'Trackbars') hue2Low=cv2.getTrackbarPos('hue2Lower', 'Trackbars') hue2Up=cv2.getTrackbarPos('hue2Upper', 'Trackbars') Ls=cv2.getTrackbarPos('satLow', 'Trackbars') Us=cv2.getTrackbarPos('satHigh', 'Trackbars') Lv=cv2.getTrackbarPos('valLow', 'Trackbars') Uv=cv2.getTrackbarPos('valHigh', 'Trackbars') l_b=np.array([hueLow,Ls,Lv]) u_b=np.array([hueUp,Us,Uv]) l_b2=np.array([hue2Low,Ls,Lv]) u_b2=np.array([hue2Up,Us,Uv]) FGmask=cv2.inRange(hsv,l_b,u_b) FGmask2=cv2.inRange(hsv,l_b2,u_b2) FGmaskComp=cv2.add(FGmask,FGmask2) cv2.imshow('FGmaskComp',FGmaskComp) cv2.moveWindow('FGmaskComp',0,530) _,contours,_=cv2.findContours(FGmaskComp,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) contours=sorted(contours,key=lambda x:cv2.contourArea(x),reverse=True) for cnt in contours: area=cv2.contourArea(cnt) (x,y,w,h)=cv2.boundingRect(cnt) if area>=50: #cv2.drawContours(frame,[cnt],0,(255,0,0),3) cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3) cv2.imshow('nanoCam',frame) cv2.moveWindow('nanoCam',0,0) if cv2.waitKey(1)==ord('q'): break cam.release() cv2.destroyAllWindows() |
AI ON THE JETSON NANO LESSON 29: Gear to Create a Pan Tilt Camera Platform for Real Time Tracking
Now that we are able to isolate and track objects using the Jetson Nano and OpenCV, now we can create a pan tilt platform for the camera, so the camera can actually track and follow the object of interest in the scene. We will do this be using two servos and then some mounting brackets. Below, I show the gear you will need to order to continue.
First we will need to be able to mount our camera on the pan/tilt bracket. If you have the Logitech WEB cam, then it can be affixed to the bracket in any number of different ways. I you are using the Raspberry Pi camera, you will need to either pick up the Logitech WEB cam, or you will need a longer cable for the Raspberry Pi Camera. For this project, I prefer the logitech WEB cam. But if price is a barrier, the simplest solution is to order the set of longer cables. You can get this gear from these links:
Raspberry Pi Long Camera Cable Set
Now you will need a couple of good servos to drive the pan tilt brackets. I found this set works well, and the price is good, and you will end up with a couple of spares, which is always useful.
You will also need a pan/tilt bracket for the servos and cameras, and the one below is the set I am using in this project.
And Finally you will need an PCA9685 I2C Servo Control Board. You can pick up what you need here:
The link above is for a pair of the boards, and you only need one. I suggest getting the pair so you have a spare if needed, or it can be used for other projects. And that is all you will need!
AI on the Jetson Nano LESSON 27: Tracking Objects in OpenCV Using HSV Color Space
In this lesson we show how to track items in OpenCV based on color using the HSV color space. Below is the code we developed in this lesson.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | import cv2 print(cv2.__version__) import numpy as np def nothing(x): pass cv2.namedWindow('Trackbars') cv2.moveWindow('Trackbars',1320,0) cv2.createTrackbar('hueLower', 'Trackbars',50,179,nothing) cv2.createTrackbar('hueUpper', 'Trackbars',100,179,nothing) cv2.createTrackbar('hue2Lower', 'Trackbars',50,179,nothing) cv2.createTrackbar('hue2Upper', 'Trackbars',100,179,nothing) cv2.createTrackbar('satLow', 'Trackbars',100,255,nothing) cv2.createTrackbar('satHigh', 'Trackbars',255,255,nothing) cv2.createTrackbar('valLow','Trackbars',100,255,nothing) cv2.createTrackbar('valHigh','Trackbars',255,255,nothing) dispW=640 dispH=480 flip=2 #Uncomment These next Two Line for Pi Camera camSet='nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3264, height=2464, format=NV12, framerate=21/1 ! nvvidconv flip-method='+str(flip)+' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink' cam= cv2.VideoCapture(camSet) #Or, if you have a WEB cam, uncomment the next line #(If it does not work, try setting to '1' instead of '0') #cam=cv2.VideoCapture(0) while True: ret, frame = cam.read() #frame=cv2.imread('smarties.png') cv2.imshow('nanoCam',frame) cv2.moveWindow('nanoCam',0,0) hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) hueLow=cv2.getTrackbarPos('hueLower', 'Trackbars') hueUp=cv2.getTrackbarPos('hueUpper', 'Trackbars') hue2Low=cv2.getTrackbarPos('hue2Lower', 'Trackbars') hue2Up=cv2.getTrackbarPos('hue2Upper', 'Trackbars') Ls=cv2.getTrackbarPos('satLow', 'Trackbars') Us=cv2.getTrackbarPos('satHigh', 'Trackbars') Lv=cv2.getTrackbarPos('valLow', 'Trackbars') Uv=cv2.getTrackbarPos('valHigh', 'Trackbars') l_b=np.array([hueLow,Ls,Lv]) u_b=np.array([hueUp,Us,Uv]) l_b2=np.array([hue2Low,Ls,Lv]) u_b2=np.array([hue2Up,Us,Uv]) FGmask=cv2.inRange(hsv,l_b,u_b) FGmask2=cv2.inRange(hsv,l_b2,u_b2) FGmaskComp=cv2.add(FGmask,FGmask2) cv2.imshow('FGmaskComp',FGmaskComp) cv2.moveWindow('FGmaskComp',0,530) FG=cv2.bitwise_and(frame, frame, mask=FGmaskComp) cv2.imshow('FG',FG) cv2.moveWindow('FG',700,0) bgMask=cv2.bitwise_not(FGmaskComp) cv2.imshow('bgMask',bgMask) cv2.moveWindow('bgMask',700,530) BG=cv2.cvtColor(bgMask,cv2.COLOR_GRAY2BGR) final=cv2.add(FG,BG) cv2.imshow('final',final) cv2.moveWindow('final',1400,0) if cv2.waitKey(1)==ord('q'): break cam.release() cv2.destroyAllWindows() |