You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

230 lines
6.2 KiB

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

// 单目标对地跟踪流程测试:将TLD从算法中剥离到外部导致API调用形式调整
// 读取avi视频进行测试
#include "NeoArithStandardDll.h"
#include <iostream>
#include <memory>
#include <string.h>
#include <algorithm>
#include <thread>
#include "opencv2/opencv.hpp"
#define TEST_WITH_AID 0 // 是否使用AI Detect
#define TEST_WITH_AIT 0 // 是否使用AI Tracker,如果设置为1最外部的CMakeLists.txt 需要设置set(BUILD_AI_TRACK TRUE)
#define VOT_RECTANGLE
#include "vot.h"
#if TEST_WITH_AID
#include "Arith_YOLO_Detect.h"
#endif
#if TEST_WITH_AIT
#include "Arith_AITracker.h"
#endif
using std::cout;
using std::endl;
short SelectCX = 0;
short SelectCY = 0;
unsigned short setLockBoxW = 0;
unsigned short setLockBoxH = 0;
// 算法输入部分
ARIDLL_INPUTPARA stInputPara = { 0 };
// 算法输出部分
ARIDLL_OUTPUT stOutput = { 0 };
// AI Detect算法结果
#if TEST_WITH_AID
obj_res* g_pGLB_AIDetOutput = NULL;
int g_GLB_AIDetNum = 0;
#endif
// AI Tracker算法结果
#if TEST_WITH_AIT
API_AI_Tracker* g_GLB_AITracker = NULL;
AIT_OUTPUT g_GLB_AITrackOutput = { 0 };
#endif
#if TEST_WITH_AID
static void AIDetRun(ArithHandle pTracker, GD_VIDEO_FRAME_S img, int frameID)
{
// 异步调用考虑机器上传输延时注意异步方式结果天然缓1帧。
Async_YOLO_DetectTarget(img.u64VirAddr[0], img.u32Width, img.u32Height, frameID);
g_pGLB_AIDetOutput = Async_YOLO_GetTargetArray(g_GLB_AIDetNum);
int targetNum = 0;
TARGET_OBJECT* pArray = ARIDLL_SearchFrameTargets(pTracker, img, &targetNum);
int mergeNum = ARIDLL_MergeAITargets(pTracker, pArray, targetNum, g_pGLB_AIDetOutput, g_GLB_AIDetNum);
stInputPara.nInputTargetNum = mergeNum;
memcpy(stInputPara.stInputTarget, pArray, sizeof(TARGET_OBJECT) * mergeNum);
}
#endif
#if TEST_WITH_AIT
static int AITrackerRun(GD_VIDEO_FRAME_S img, int frameID)
{
// 从传统算法输出中获取AI跟踪器的控制指令
CENTERRECT32F InitBox = stOutput.stAI_TkCmd.InitBox;
CENTERRECT32F TargetBox = stOutput.stAI_TkCmd.TargetBox;
if (InitBox.w > 0 && InitBox.h > 0)
{
g_GLB_AITracker->init(img, InitBox);
return 0;
}
if (!stOutput.stAI_TkCmd.bTrack)
{
g_GLB_AITracker->stopTrack();
memset(&stInputPara.stAITrackerInfo, 0, sizeof(AIT_OUTPUT));
memset(&g_GLB_AITrackOutput, 0, sizeof(AIT_OUTPUT));
return 0;
}
g_GLB_AITracker->Track(img, TargetBox);
// 获取跟踪结果
g_GLB_AITracker->getTrackResult_Async(&g_GLB_AITrackOutput);
// 向传统算法传参
memcpy(&stInputPara.stAITrackerInfo, &g_GLB_AITrackOutput, sizeof(AIT_OUTPUT));
return 0;
}
#endif
static void RunProcess(ArithHandle pTracker, GD_VIDEO_FRAME_S img)
{
#if TEST_WITH_AID
// 运行AI识别算法
AIDetRun(pTracker, img, stInputPara.unFrmId);
#endif
#if TEST_WITH_AIT
// 运行SiamRPN跟踪算法
AITrackerRun(img, stInputPara.unFrmId);
#endif
// 调用TLD流程
//ARIDLL_RunTLDTracker(pTracker, img);
// 运行算法主控逻辑API
ARIDLL_RunController(pTracker, img, stInputPara, &stOutput);
}
int main()
{
VOT vot;
cv::Rect initialization;
initialization << vot.region();
cv::Mat frame = cv::imread(vot.frame());
int nWidth = frame.cols;
int nHeight = frame.rows;
// 创建算法句柄
ArithHandle pTracker = STD_CreatEOArithHandle();
#if TEST_WITH_AID
// AI检测初始化
//YOLO_Init();
Async_YOLO_Init();
#endif
#if TEST_WITH_AIT
// AI跟踪器初始化
g_GLB_AITracker = API_AI_Tracker::Create(AITrackerType::DaSaimRPN);
g_GLB_AITracker->loadModel();
memset(&g_GLB_AITrackOutput, 0, sizeof(AIT_OUTPUT));
#endif
// 初始化为凝视-对地模式
ARIDLL_EOArithInitWithMode(pTracker,nWidth,nHeight,GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_RGB_PACKED,
GLB_SYS_MODE::GLB_SYS_STARE,GLB_SCEN_MODE::GLB_SCEN_GROUND);
// 构建图像类型
GD_VIDEO_FRAME_S img = { 0 };
img.enPixelFormat = GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_RGB_PACKED;
img.u32Width = nWidth;
img.u32Height = nHeight;
img.u32Stride[0] = img.u32Width * 3;
img.u64VirAddr[0] = frame.data;
stInputPara.unFreq = 30;
stInputPara.stAirCraftInfo.stAtt.fYaw = 0;
stInputPara.stAirCraftInfo.stAtt.fRoll = 0;
stInputPara.stAirCraftInfo.stAtt.fPitch = 0;
stInputPara.stServoInfo.fServoAz = 0;
stInputPara.stServoInfo.fServoPt = 0;
stInputPara.stCameraInfo.fPixelSize = 15;
stInputPara.stCameraInfo.nFocus = 600;
stInputPara.unFrmId = 0;
// 调用一次进行算法内部的初始化
RunProcess(pTracker, img);
// 下发锁定
SelectCX = initialization.x + initialization.width / 2;
SelectCY = initialization.y + initialization.height / 2;
setLockBoxW = initialization.width;
setLockBoxH = initialization.height;
ARIDLL_OBJINFO obj = { 0 };
obj = ARIDLL_LockTarget(pTracker, img, SelectCX, SelectCY, setLockBoxW, setLockBoxH);
#if TEST_WITH_AIT
if (obj.nObjW > 0)
{
// 使用EOTracker的锁定决策初始化AI跟踪器
CENTERRECT32F initBox = { obj.nX,obj.nY, obj.nObjW, obj.nObjH };
g_GLB_AITracker->init(img, initBox);
// 获取跟踪结果
g_GLB_AITracker->getTrackResult_Async(&g_GLB_AITrackOutput);
}
#endif
// 调用一次跟踪流程,完成在锁定帧的跟踪运行
RunProcess(pTracker, img);
// 模拟算法执行流程
while(!vot.end())
{
stInputPara.unFrmId++;
string imagepath = vot.frame();
if (imagepath.empty())
{
break;
}
frame = cv::imread(imagepath);
// 构建图像类型
img.enPixelFormat = GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_RGB_PACKED;
img.u32Width = nWidth;
img.u32Height = nHeight;
img.u32Stride[0] = img.u32Width * 3;
img.u64VirAddr[0] = frame.data;
RunProcess(pTracker, img);
auto trackerOut = stOutput.stTrackers[0];
cv::Rect outRect;
outRect.width = (int)trackerOut.nObjW;
outRect.height= (int)trackerOut.nObjH;
outRect.x = (int)trackerOut.nX - outRect.width / 2;
outRect.y = (int)trackerOut.nY - outRect.height / 2;
vot.report(outRect, trackerOut.fConf);
}
return 0;
}