103 lines
3.4 KiB
Python
103 lines
3.4 KiB
Python
# import required libraries
|
||
import cv2
|
||
import time
|
||
import math
|
||
import sys
|
||
import threading
|
||
|
||
# import RflySim APIs
|
||
import PX4MavCtrlV4 as PX4MavCtrl
|
||
|
||
import VisionCaptureApi
|
||
import UE4CtrlAPI
|
||
ue = UE4CtrlAPI.UE4CtrlAPI()
|
||
|
||
vis = VisionCaptureApi.VisionCaptureApi()
|
||
|
||
# VisionCaptureApi 中的配置函数
|
||
vis.jsonLoad() # 加载Config.json中的传感器配置文件
|
||
|
||
isSuss = vis.sendReqToUE4() # 向RflySim3D发送取图请求,并验证
|
||
if not isSuss: # 如果请求取图失败,则退出
|
||
sys.exit(0)
|
||
vis.startImgCap(True) # 开启取图,并启用共享内存图像转发,转发到填写的目录
|
||
|
||
# Send command to UE4 Window 1 to change resolution
|
||
ue.sendUE4Cmd('r.setres 720x405w',0) # 设置UE4窗口分辨率,注意本窗口仅限于显示,取图分辨率在json中配置,本窗口设置越小,资源需求越少。
|
||
ue.sendUE4Cmd('t.MaxFPS 30',0) # 设置UE4最大刷新频率,同时也是取图频率
|
||
time.sleep(2)
|
||
|
||
# Create MAVLink control API instance
|
||
mav = PX4MavCtrl.PX4MavCtrler(1)
|
||
# Init MAVLink data receiving loop
|
||
mav.InitMavLoop()
|
||
|
||
|
||
# send vehicle position command to create a man, with copterID=100
|
||
# the man is located before the drone, and rotated to 180 degree (face the drone)
|
||
ue.sendUE4Pos(100,30,0,[1,0,-8.086],[0,0,math.pi])
|
||
time.sleep(1)
|
||
|
||
# send command to change object with copterID=100 (the man just created) to a walking style
|
||
ue.sendUE4Cmd('RflyChange3DModel 100 16')
|
||
time.sleep(0.5)
|
||
|
||
# send command to the first RflySim3D window, to switch to vehicle 1
|
||
ue.sendUE4Cmd('RflyChangeViewKeyCmd B 1',0)
|
||
|
||
print("5s, Arm the drone")
|
||
mav.initOffboard()
|
||
time.sleep(0.5)
|
||
mav.SendMavArm(True) # Arm the drone
|
||
print("Arm the drone!, and fly to NED 0,0,-5")
|
||
time.sleep(0.5)
|
||
mav.SendPosNED(0, 0, -1.7, 0) # Fly to target position 0,0,-1.5
|
||
|
||
# Process the image in the following timer
|
||
startTime = time.time()
|
||
lastTime = time.time()
|
||
timeInterval = 1/30.0 # time interval of the timer
|
||
face_cascade=cv2.CascadeClassifier(sys.path[0]+'\cascades\haarcascade_frontalface_default.xml')
|
||
|
||
num=0
|
||
lastClock=time.time()
|
||
while True:
|
||
#gImgList = sca.getCVImgList(ImgInfoList)
|
||
#img1=sca.getCVImg(ImgInfo1)
|
||
# Get the first camera view and change to gray
|
||
|
||
if vis.hasData[0]:
|
||
pic1=cv2.cvtColor(vis.Img[0], cv2.COLOR_BGR2GRAY)
|
||
faces1=face_cascade.detectMultiScale(pic1,1.3,5) # face recognition for the first camera
|
||
for (x,y,w,h) in faces1:
|
||
pic1=cv2.rectangle(pic1,(x,y),(x+w,y+h),(255,0,0),1) # Draw a rectangle to mark the face
|
||
cv2.imshow("pic1",pic1) # Show the processed image
|
||
|
||
if vis.hasData[1]:
|
||
#img2=sca.getCVImg(ImgInfo2)
|
||
# Get the second camera view and change to gray
|
||
pic2=cv2.cvtColor(vis.Img[1], cv2.COLOR_BGR2GRAY)
|
||
faces2=face_cascade.detectMultiScale(pic2,1.3,5) # face recognition for the second camera
|
||
for (x,y,w,h) in faces2:
|
||
pic2=cv2.rectangle(pic2,(x,y),(x+w,y+h),(255,0,0),1)
|
||
cv2.imshow("pic2",pic2)
|
||
|
||
|
||
# add target tracking algorithm here with mav.SendVelNED or SendVelFRD API
|
||
|
||
cv2.waitKey(1)
|
||
|
||
num=num+1
|
||
if num%100==0:
|
||
tiem=time.time()
|
||
print('MainThreadFPS: '+str(100/(tiem-lastClock)))
|
||
lastClock=tiem
|
||
|
||
# The above code will be executed 30Hz (0.033333s)
|
||
lastTime = lastTime + timeInterval
|
||
sleepTime = lastTime - time.time()
|
||
if sleepTime > 0:
|
||
time.sleep(sleepTime)
|
||
else:
|
||
lastTime = time.time()
|