文章目录
目录
一、材料
1.树莓派(我用的是树莓派4b)
2.摄像头(我用的是csi摄像头,因为csi摄像头比普通usb摄像头占用cpu资源少)
3.THB6128步进电机驱动
4.两相四线步进电机
二、环境配置
1.树莓派烧录系统
这里建议烧录NOOBS或Raspbian等官方推荐的系统,而不是Ubuntu、Windows等。我之前就是烧录了Ubuntu的系统,然后就一堆bug,大多数是系统引起的,因为树莓派官方系统里面有很多适用于树莓派的库和一些硬件驱动。
2.安装相关库
树莓派官方推荐的系统是自带python环境的,如果没有可以自己再下载。
然后安装百度api的库
pip install baidu-aip
这里我们用的是百度AI开放平台,进行在线手势识别
三、申请百度AI开放平台的id和密钥
1.创建应用
2.获取自己的ID以及密钥
用自己的APP_ID、KEY替换代码里的。
百度AI开放平台每个月会有免费调用的次数便于开发者开发(足够开发使用),我这里就不分享我的APP_ID了哈。
""" 你的 APPID AK SK """
APP_ID = '*********'
API_KEY = '*************'
SECRET_KEY = '*******************'
''' 调用'''
四、接线
1.步进电机与步进电机驱动接线以及电机供电
2.树莓派GPIO口与步进电机驱动接线
和上篇博客差不多,只不过这里我把openmv换成了树莓派,这里我将CP+-分别连接树莓派的pin16(GPIO23)、pin18(GPIO24),DIR+-分别接pin13(GPIO27)、pin15(GPIO22)。
五、树莓派python代码
最后上代码啦。
import os
import cv2
from aip import AipBodyAnalysis
from aip import AipSpeech
from threading import Thread
import time
from playsound import playsound
import serial
import RPi.GPIO as GPIO
""" 你的 APPID AK SK """
APP_ID = '23*****2'
API_KEY = 'mdSOHU1Vq******0zDsiuUMY'
SECRET_KEY = 'yeKL9o3TPWNrrl******qPuhLZ116Hwc'
''' 调用'''
IN1=18
IN2=16
IN3=15
IN4=13
hand = {'One': '数字1', 'Five': '数字5', 'Fist': '拳头', 'Ok': 'OK',
'Prayer': '祈祷', 'Congratulation': '作揖', 'Honour': '作别',
'Heart_single': '比心心', 'Thumb_up': '点赞', 'Thumb_down': 'Diss',
'ILY': '我爱你', 'Palm_up': '掌心向上', 'Heart_1': '双手比心1',
'Heart_2': '双手比心2', 'Heart_3': '双手比心3', 'Two': '数字2',
'Three': '数字3', 'Four': '数字4', 'Six': '数字6', 'Seven': '数字7',
'Eight': '数字8', 'Nine': '数字9', 'Rock': 'Rock', 'Insult': '竖中指', 'Face': '脸'}
# 语音合成
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
# 手势识别
gesture_client = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
capture = cv2.VideoCapture(0) # 0为默认摄像头
def camera():
while True:
# 获得图片
ret, frame = capture.read()
# cv2.imshow(窗口名称, 窗口显示的图像)
# 显示图片
frame=cv2.flip(frame,1)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break
Thread(target=camera).start() # 引入线程防止在识别的时候卡死
def gesture_recognition():
# 第一个参数ret 为True 或者False,代表有没有读取到图片
# 第二个参数frame表示截取到一帧的图片
floor=0
while True:
try:
ret, frame = capture.read()
# 图片格式转换
image = cv2.imencode('.jpeg', frame)[1]
gesture = gesture_client.gesture(image) # AipBodyAnalysis内部函数
words = gesture['result'][0]['classname']
voice(hand[words])
print(hand[words])
print(words)
print(floor)
if words=='One'and floor==0:
forward(0.0001,1000)
floor=1
print("first floor")
if words=='Two'and floor==0:
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
floor=2
if words=='Three'and floor==0:
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
floor=3
if words=='Four'and floor==0:
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
floor=4
if words=='Two'and floor==1:
forward(0.0001,1000)
floor=2
if words=='Three'and floor==1:
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
floor=3
if words=='Four'and floor==1:
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
floor=4
if words=='One'and floor==2:
backward(0.0001,1000)
floor=1
if words=='Three'and floor==2:
forward(0.0001,1000)
floor=3
if words=='Four'and floor==2:
forward(0.0001,1000)
stop()
time.sleep(3)
forward(0.0001,1000)
floor=4
if words=='One'and floor==3:
backward(0.0001,1000)
stop()
time.sleep(3)
backward(0.0001,1000)
floor=1
if words=='Two'and floor==3:
backward(0.0001,1000)
floor=2
if words=='Four'and floor==3:
forward(0.0001,1000)
floor=4
if words=='One'and floor==4:
backward(0.0001,1000)
stop()
time.sleep(3)
backward(0.0001,1000)
stop()
time.sleep(3)
backward(0.0001,1000)
floor=1
if words=='Two'and floor==4:
backward(0.0001,1000)
stop()
time.sleep(3)
backward(0.0001,1000)
floor=2
if words=='Three'and floor==4:
backward(0.0001,1000)
floor=3
except:
voice('识别失败')
if cv2.waitKey(1) == ord('q'):
break
def voice(words):
# 语音函数
result = client.synthesis(words, 'zh', 1, {
'vol': 5,
})
if not isinstance(result, dict):
with open('./res.mp3', 'wb') as f:
f.write(result)
f.close()
playsound('./res.mp3')
def setStep(W1,W2,W3,W4):
GPIO.output(IN1,W1)
GPIO.output(IN2,W2)
GPIO.output(IN3,W3)
GPIO.output(IN4,W4)
def stop():
setStep(0,0,0,0)
def forward (delay,steps):
for i in range (0,steps):
setStep(1,0,1,0)
time.sleep(delay)
setStep(0,1,1,0)
time.sleep(delay)
setStep(0,1,0,1)
time.sleep(delay)
setStep(1,0,0,1)
time.sleep(delay)
def backward (delay,steps):
for i in range (0,steps):
setStep(1,0,0,1)
time.sleep(delay)
setStep(0,1,0,1)
time.sleep(delay)
setStep(0,1,1,0)
time.sleep(delay)
setStep(1,0,1,0)
time.sleep(delay)
def setup():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(IN1,GPIO.OUT)
GPIO.setup(IN2,GPIO.OUT)
GPIO.setup(IN3,GPIO.OUT)
GPIO.setup(IN4,GPIO.OUT)
def destory():
GPIO.cleanup()
if __name__=='__main__':
setup()
try:
gesture_recognition()
except KeyboardInterrupt:
destroy()
总结
为了方便观察现象,我采用了电机分段转动的形式,即0楼去四楼电机正向转动四次,四楼去1楼电机反向转动三次。(每次转动角度一样)