This lesson describes how to add RFID Tag capability to the Raspberry Pi. we will be using the MFRC522 RFID Module. The schematic below shows how to connect the device up to the Raspberry Pi through a standard breakout board.
LCD1602 Display Library for Micropython and the Raspberry Pi Pico W
This is some demonstration sample code showing use of the LCD1602 as an LCD display for the Raspberry Pi Pico W. The code is explained in the video above. It will prompt a user for his name, and then display a greeting on the LCD.
1 2 3 4 5 6 7 8 9 10 11 |
from lcd1602 import LCD import utime as time lcd=LCD() while True: myName=input('What is Your Name? ') lcd.clear() greeting1='Hello '+myName greeting2='Welcome to My Pi' lcd.write(0,0,greeting1) lcd.write(0,1,greeting2) |
Below is the library for the Sunfounder Kepler Kit LCD1602 display. It allows the LCD display to operate with the Raspberry Pi Pico W. The code should be copied and pasted into Thonny, and then saved to your Raspberry Pi Pico W, to the same folder that contains you Python code. It MUST be saved with file name lcd1602.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import machine import time class LCD(): def __init__(self, addr=None, blen=1): sda = machine.Pin(6) scl = machine.Pin(7) self.bus = machine.I2C(1,sda=sda, scl=scl, freq=400000) #print(self.bus.scan()) self.addr = self.scanAddress(addr) self.blen = blen self.send_command(0x33) # Must initialize to 8-line mode at first time.sleep(0.005) self.send_command(0x32) # Then initialize to 4-line mode time.sleep(0.005) self.send_command(0x28) # 2 Lines & 5*7 dots time.sleep(0.005) self.send_command(0x0C) # Enable display without cursor time.sleep(0.005) self.send_command(0x01) # Clear Screen self.bus.writeto(self.addr, bytearray([0x08])) def scanAddress(self, addr): devices = self.bus.scan() if len(devices) == 0: raise Exception("No LCD found") if addr is not None: if addr in devices: return addr else: raise Exception(f"LCD at 0x{addr:2X} not found") elif 0x27 in devices: return 0x27 elif 0x3F in devices: return 0x3F else: raise Exception("No LCD found") def write_word(self, data): temp = data if self.blen == 1: temp |= 0x08 else: temp &= 0xF7 self.bus.writeto(self.addr, bytearray([temp])) def send_command(self, cmd): # Send bit7-4 firstly buf = cmd & 0xF0 buf |= 0x04 # RS = 0, RW = 0, EN = 1 self.write_word(buf) time.sleep(0.002) buf &= 0xFB # Make EN = 0 self.write_word(buf) # Send bit3-0 secondly buf = (cmd & 0x0F) << 4 buf |= 0x04 # RS = 0, RW = 0, EN = 1 self.write_word(buf) time.sleep(0.002) buf &= 0xFB # Make EN = 0 self.write_word(buf) def send_data(self, data): # Send bit7-4 firstly buf = data & 0xF0 buf |= 0x05 # RS = 1, RW = 0, EN = 1 self.write_word(buf) time.sleep(0.002) buf &= 0xFB # Make EN = 0 self.write_word(buf) # Send bit3-0 secondly buf = (data & 0x0F) << 4 buf |= 0x05 # RS = 1, RW = 0, EN = 1 self.write_word(buf) time.sleep(0.002) buf &= 0xFB # Make EN = 0 self.write_word(buf) def clear(self): self.send_command(0x01) # Clear Screen def openlight(self): # Enable the backlight self.bus.writeto(self.addr,bytearray([0x08])) # self.bus.close() def write(self, x, y, str): if x < 0: x = 0 if x > 15: x = 15 if y < 0: y = 0 if y > 1: y = 1 # Move cursor addr = 0x80 + 0x40 * y + x self.send_command(addr) for chr in str: self.send_data(ord(chr)) def message(self, text): #print("message: %s"%text) for char in text: if char == '\n': self.send_command(0xC0) # next line else: self.send_data(ord(char)) |
Raspberry Pi LESSON 63: Object Detection on Raspberry Pi Using Tensorflow Lite
In this lesson I show you how to do object detection on the Raspberry Pi using Tensorflow Lite. We will write our first program and by the end of the lesson you will have your Pi detecting objects, boxing them and labeling them in OpenCV.
The video demonstrates step-by-step how to install the tensorflow libraries.
For your convenience I have included the code below we develop in this lesson
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import cv2 import time from picamera2 import Picamera2 from tflite_support.task import core from tflite_support.task import processor from tflite_support.task import vision import utils model='efficientdet_lite0.tflite' num_threads=4 dispW=1280 dispH=720 picam2=Picamera2() picam2.preview_configuration.main.size=(dispW,dispH) picam2.preview_configuration.main.format='RGB888' picam2.preview_configuration.align() picam2.configure("preview") picam2.start() webCam='/dev/video2' cam=cv2.VideoCapture(webCam) cam.set(cv2.CAP_PROP_FRAME_WIDTH, dispW) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, dispH) cam.set(cv2.CAP_PROP_FPS, 30) pos=(20,60) font=cv2.FONT_HERSHEY_SIMPLEX height=1.5 weight=3 myColor=(255,0,0) fps=0 base_options=core.BaseOptions(file_name=model,use_coral=False, num_threads=num_threads) detection_options=processor.DetectionOptions(max_results=3, score_threshold=.3) options=vision.ObjectDetectorOptions(base_options=base_options,detection_options=detection_options) detector=vision.ObjectDetector.create_from_options(options) tStart=time.time() while True: ret, im = cam.read() #im=picam2.capture_array() #im=cv2.flip(im,-1) imRGB=cv2.cvtColor(im,cv2.COLOR_BGR2RGB) imTensor=vision.TensorImage.create_from_array(imRGB) detections=detector.detect(imTensor) image=utils.visualize(im, detections) cv2.putText(im,str(int(fps))+' FPS',pos,font,height,myColor,weight) cv2.imshow('Camera',im) if cv2.waitKey(1)==ord('q'): break tEnd=time.time() loopTime=tEnd-tStart fps= .9*fps +.1*1/loopTime tStart=time.time() cv2.destroyAllWindows() |
Raspberry Pi LESSON 61: Finding and Tracking Faces and Eyes In OpenCV
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import cv2 from picamera2 import Picamera2 import time picam2 = Picamera2() dispW=1280 dispH=720 picam2.preview_configuration.main.size = (dispW,dispH) picam2.preview_configuration.main.format = "RGB888" picam2.preview_configuration.controls.FrameRate=30 picam2.preview_configuration.align() picam2.configure("preview") picam2.start() fps=0 pos=(30,60) font=cv2.FONT_HERSHEY_SIMPLEX height=1.5 weight=3 myColor=(0,0,255) faceCascade=cv2.CascadeClassifier('./haar/haarcascade_frontalface_default.xml') eyeCascade=cv2.CascadeClassifier('./haar/haarcascade_eye.xml') while True: tStart=time.time() frame= picam2.capture_array() frame=cv2.flip(frame,-1) frameGray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) faces=faceCascade.detectMultiScale(frameGray,1.3,5) for face in faces: x,y,w,h=face cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),3) roiGray=frameGray[y:y+h,x:x+w] roiColor=frame[y:y+h,x:x+w] eyes=eyeCascade.detectMultiScale(roiGray,1.3,5) for eye in eyes: x,y,w,h=eye cv2.rectangle(roiColor,(x,y),(x+w,y+h),(255,0,0),3) cv2.putText(frame,str(int(fps))+' FPS',pos,font,height,myColor,weight) cv2.imshow("Camera", frame) if cv2.waitKey(1)==ord('q'): break tEnd=time.time() loopTime=tEnd-tStart fps=.9*fps + .1*(1/loopTime) cv2.destroyAllWindows() |
Raspberry Pi LESSON 59: Improved Pan/Tilt Tracking Control Algorithm
In this Video Lesson we show an improved control algorithm for tracking an Object of Interest in OpenCV. We develop a simple example of Proportional control, where the correction signal is proportional to the error signal. We show this is a much improved algorithm over our earlier one, which simply applied 1 degree corrections independent of the size of the error. The code we develop in this lesson is included below for your convenience.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import cv2 from picamera2 import Picamera2 import time import numpy as np from servo import Servo picam2 = Picamera2() pan=Servo(pin=13) tilt=Servo(pin=12) panAngle=0 tiltAngle=0 pan.set_angle(panAngle) tilt.set_angle(tiltAngle) dispW=1280 dispH=720 picam2.preview_configuration.main.size = (dispW,dispH) picam2.preview_configuration.main.format = "RGB888" picam2.preview_configuration.controls.FrameRate=30 picam2.preview_configuration.align() picam2.configure("preview") picam2.start() fps=0 pos=(30,60) font=cv2.FONT_HERSHEY_SIMPLEX height=1.5 weight=3 myColor=(0,0,255) track=0 def onTrack1(val): global hueLow hueLow=val print('Hue Low',hueLow) def onTrack2(val): global hueHigh hueHigh=val print('Hue High',hueHigh) def onTrack3(val): global satLow satLow=val print('Sat Low',satLow) def onTrack4(val): global satHigh satHigh=val print('Sat High',satHigh) def onTrack5(val): global valLow valLow=val print('Val Low',valLow) def onTrack6(val): global valHigh valHigh=val print('Val High',valHigh) def onTrack7(val): global track track=val print('Track Value',track) cv2.namedWindow('myTracker') cv2.createTrackbar('Hue Low','myTracker',10,179,onTrack1) cv2.createTrackbar('Hue High','myTracker',20,179,onTrack2) cv2.createTrackbar('Sat Low','myTracker',100,255,onTrack3) cv2.createTrackbar('Sat High','myTracker',255,255,onTrack4) cv2.createTrackbar('Val Low','myTracker',100,255,onTrack5) cv2.createTrackbar('Val High','myTracker',255,255,onTrack6) cv2.createTrackbar('Train-0 Track-1','myTracker',0,1,onTrack7) while True: tStart=time.time() frame= picam2.capture_array() frame=cv2.flip(frame,-1) frameHSV=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) cv2.putText(frame,str(int(fps))+' FPS',pos,font,height,myColor,weight) lowerBound=np.array([hueLow,satLow,valLow]) upperBound=np.array([hueHigh,satHigh,valHigh]) myMask=cv2.inRange(frameHSV,lowerBound,upperBound) myMaskSmall=cv2.resize(myMask,(int(dispW/2),int(dispH/2))) myObject=cv2.bitwise_and(frame,frame, mask=myMask) myObjectSmall=cv2.resize(myObject,(int(dispW/2),int(dispH/2))) contours,junk=cv2.findContours(myMask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) if len(contours)>0: contours=sorted(contours,key=lambda x:cv2.contourArea(x),reverse=True) #cv2.drawContours(frame,contours,-1,(255,0,0),3) contour=contours[0] x,y,w,h=cv2.boundingRect(contour) cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),3) if track==1: error=(x+w/2)-dispW/2 panAngle=panAngle-error/75 if panAngle>90: panAngle=90 if panAngle<-90: panAngle=-90 if abs(error)>35: pan.set_angle(panAngle) tiltError=(y+h/2)-dispH/2 tiltAngle=tiltAngle+tiltError/75 if tiltAngle>40: tiltAngle=40 if tiltAngle<-90: tiltAngle=-90 if abs(tiltError)>35: tilt.set_angle(tiltAngle) cv2.imshow('Camera',frame) cv2.imshow('Mask',myMaskSmall) cv2.imshow('My Object',myObjectSmall) if cv2.waitKey(1)==ord('q'): break tEnd=time.time() loopTime=tEnd-tStart fps=.9*fps + .1*(1/loopTime) cv2.destroyAllWindows() |