|
|
#include <iostream>
|
|
|
#include <cstdlib>
|
|
|
#include <string>
|
|
|
#include <fstream>
|
|
|
#include <dirent.h>
|
|
|
#include <unistd.h>
|
|
|
#include <sys/stat.h>
|
|
|
#include <thread>
|
|
|
#include "ff_decode.hpp"
|
|
|
#include <cstdio>
|
|
|
#include <cstdint>
|
|
|
#include "demo_utils.h"
|
|
|
#include "Arith_Nano_Tracker_API.h"
|
|
|
#include "detection_type_api.h"
|
|
|
#include "NeoArithStandardDll.h"
|
|
|
#include "AI_API.h"
|
|
|
#include "threadpool.h"
|
|
|
|
|
|
static int DETECT_MODEL_TYPE = 0;
|
|
|
static int IMAGEWIDTH = 0;
|
|
|
static int IMAGEHEIGHT = 0;
|
|
|
|
|
|
TenMat * maskImg = (TenMat*)malloc(sizeof(TenMat));
|
|
|
void * maskdata = (void*)malloc(IMAGEWIDTH*IMAGEHEIGHT*sizeof(int64_t));
|
|
|
|
|
|
|
|
|
std::vector<objinfo> visdets1;
|
|
|
std::vector<objinfo> visdets2;
|
|
|
|
|
|
void AI_Det_Release()
|
|
|
{
|
|
|
int ret = Uninit_AI();
|
|
|
if (maskdata!=NULL)
|
|
|
free(maskdata);
|
|
|
if (maskImg!=NULL)
|
|
|
free(maskImg);
|
|
|
printf ("Uninit_AI DETECTER Sucess!\n");
|
|
|
}
|
|
|
|
|
|
void AI_Det_Init(int model_type)
|
|
|
{
|
|
|
if (model_type == 0)
|
|
|
{
|
|
|
IMAGEWIDTH = 1280;
|
|
|
IMAGEHEIGHT = 1024;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
IMAGEWIDTH = 640;
|
|
|
IMAGEHEIGHT = 512;
|
|
|
}
|
|
|
|
|
|
std::string visdetmodeFull = "../models/Detect/1684x/vis_common_headers_1280x1024_bm1684x_F16_false.bmodel";
|
|
|
std::string visdetmodePart = "../models/Detect/1684x/vis_common_headers_640x512_bm1684x_F16_false.bmodel";
|
|
|
|
|
|
int ret = 0;
|
|
|
ret = Init_ACL();
|
|
|
if(ret){
|
|
|
printf("Init_ACL error\n");
|
|
|
}
|
|
|
ret = set_deque_size(5,5,5,5,5,5,5,5,9,9,9,9);
|
|
|
if(ret){
|
|
|
printf("set_deque_size error\n");
|
|
|
}
|
|
|
printf("deque setted\n");
|
|
|
// 设置红外图像分辨率
|
|
|
ret = set_stream_size(IMAGEWIDTH, IMAGEHEIGHT, IR);
|
|
|
if(ret){
|
|
|
printf("set_stream_size error\n");
|
|
|
}
|
|
|
// 设置可见光图像分辨率
|
|
|
ret = set_stream_size(IMAGEWIDTH, IMAGEHEIGHT, VIS);
|
|
|
if(ret){
|
|
|
printf("set_stream_size error\n");
|
|
|
}
|
|
|
//设置类别标签, 标签标请咨询对应的算法工程师
|
|
|
uint32_t modelclass[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14};
|
|
|
uint32_t standerclass[] = {10000, 20000, 30000, 40000, 50000,
|
|
|
60000, 70000, 80000, 90000, 100000,
|
|
|
110000, 120000, 130000, 140000, 150000};
|
|
|
ret = setLabelMap(modelclass, standerclass, sizeof(modelclass)/sizeof(uint32_t));
|
|
|
|
|
|
if(ret){
|
|
|
printf("setLabelMap error with ret=%d\n", ret);
|
|
|
}
|
|
|
// 设置数据流格式,NV12 or RGB
|
|
|
ret = set_StreamMode(AIGO::INPUT_LAYOUT::NV12);
|
|
|
if(ret){
|
|
|
printf("set_StreamMode error with ret=%d\n", ret);
|
|
|
}
|
|
|
// 初始化模型
|
|
|
if (model_type == 0){
|
|
|
ret = Init_AI(visdetmodeFull.c_str(), visdetmodeFull.c_str(), NULL, NULL);
|
|
|
|
|
|
}else{
|
|
|
ret = Init_AI(visdetmodeFull.c_str(), visdetmodePart.c_str(), NULL, NULL);
|
|
|
}
|
|
|
if(ret){
|
|
|
printf("init error\n");
|
|
|
}
|
|
|
|
|
|
ret = set_dropout_param(false);
|
|
|
if(ret){
|
|
|
printf("set dropout error\n");
|
|
|
}
|
|
|
// ret = switch_vis_to_ir();
|
|
|
printf("get det threshold\n");
|
|
|
int cNum = sizeof(modelclass)/sizeof(uint32_t);
|
|
|
for (int z = 0; z < cNum; ++z) {
|
|
|
ret = set_det_threshold(0.45, standerclass[z]);
|
|
|
}
|
|
|
|
|
|
if(ret){
|
|
|
printf("set det threshold error\n");
|
|
|
}
|
|
|
|
|
|
set_dropout_param(1);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void AI_Det_Run(cv::Mat NV12Mat,int frnID)
|
|
|
{
|
|
|
//printf("start ai run\n");
|
|
|
ImgMat imgvis;
|
|
|
imgvis.data = NV12Mat.data;
|
|
|
imgvis.width = IMAGEWIDTH;
|
|
|
imgvis.height = IMAGEHEIGHT;
|
|
|
imgvis.depth = 1;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.inlayout = AIGO::INPUT_LAYOUT::NV12;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = frnID;
|
|
|
long long timestamp = frnID;
|
|
|
|
|
|
int ret = runAsync(imgvis, true, false, false);
|
|
|
if(ret)
|
|
|
{
|
|
|
printf("runAsync error\n");
|
|
|
}
|
|
|
|
|
|
// 按照红外再送一次推理
|
|
|
ImgMat imgir = imgvis;
|
|
|
imgir.mode = IR;
|
|
|
|
|
|
ret = runAsync(imgir, true, false, false);
|
|
|
if(ret)
|
|
|
{
|
|
|
printf("runAsync error\n");
|
|
|
}
|
|
|
|
|
|
maskImg->data = maskdata;
|
|
|
visdets1.clear();
|
|
|
ret = get_result(timestamp, visdets1, maskImg, false, IR);
|
|
|
|
|
|
visdets2.clear();
|
|
|
ret = get_result(timestamp, visdets2, maskImg, false, VIS);
|
|
|
|
|
|
printf("end ai run\n");
|
|
|
|
|
|
printf ("**** F:[%d] AIOD_Detect num:[%d] [%d] timestamp: %d\n",frnID,visdets1.size(),visdets2.size(), timestamp);
|
|
|
}
|
|
|
|
|
|
|
|
|
void draw(cv::Mat& frame,ARIDLL_OUTPUT output)
|
|
|
{
|
|
|
auto trackerOut = output.stTrackers[0];
|
|
|
auto detOut_array = output.stAlarmObjs;
|
|
|
auto detOut_cnt = output.nAlarmObjCnts;
|
|
|
|
|
|
for (size_t i = 0; i < detOut_cnt; i++)
|
|
|
{
|
|
|
cv::Rect outRect;
|
|
|
outRect.width = MAX(15,(int)detOut_array[i].nObjW);
|
|
|
outRect.height= MAX(15, (int)detOut_array[i].nObjH);
|
|
|
outRect.x = (int)detOut_array[i].nX-outRect.width/2;
|
|
|
outRect.y = (int)detOut_array[i].nY-outRect.height/2;
|
|
|
cv::rectangle(frame,outRect,cv::Scalar(255,0,0),3);
|
|
|
cv::putText(frame,to_string(detOut_array[i].nOutputID),cv::Point(outRect.x - 10,outRect.y),
|
|
|
cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(255,0,0));
|
|
|
}
|
|
|
|
|
|
cv::Rect outRect;
|
|
|
outRect.width = MAX(15,(int)trackerOut.nObjW);
|
|
|
outRect.height= MAX(15, (int)trackerOut.nObjH);
|
|
|
outRect.x = (int)trackerOut.nX-outRect.width/2;
|
|
|
outRect.y = (int)trackerOut.nY-outRect.height/2;
|
|
|
cv::rectangle(frame,outRect,cv::Scalar(0,0,255),5);
|
|
|
}
|
|
|
|
|
|
// 读取1个视频按照两路执行,测试双路识别跟踪
|
|
|
int main(int argc, char** argv)
|
|
|
{
|
|
|
threadpool executor{ 4 };
|
|
|
|
|
|
cv::VideoCapture capture(argv[2]);
|
|
|
int nWidth = int(capture.get(cv::CAP_PROP_FRAME_WIDTH));
|
|
|
int nHeight = int(capture.get(cv::CAP_PROP_FRAME_HEIGHT));
|
|
|
|
|
|
cv::VideoWriter writer;
|
|
|
Size size = cv::Size(nWidth * 2, nHeight);
|
|
|
bool bflag = writer.open("trackbm1684x.avi", cv::VideoWriter::fourcc('M', 'J', 'P', 'G'), 25, size, true);
|
|
|
|
|
|
printf("nW = %d,nH = %d \n",nWidth,nHeight);
|
|
|
|
|
|
// 初始化识别:内部处理双路
|
|
|
DETECT_MODEL_TYPE = *argv[1];
|
|
|
AI_Det_Init(DETECT_MODEL_TYPE);
|
|
|
|
|
|
printf("ai detect init done!");
|
|
|
|
|
|
// 创建跟踪算法句柄
|
|
|
ArithHandle pTracker1 = STD_CreatEOArithHandle();
|
|
|
ArithHandle pTracker2 = STD_CreatEOArithHandle();
|
|
|
|
|
|
// 初始化为凝视-对地模式
|
|
|
ARIDLL_EOArithInitWithMode(pTracker1,nWidth,nHeight,GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_GRAY_Y16,
|
|
|
GLB_SYS_MODE::GLB_SYS_STARE,GLB_SCEN_MODE::GLB_SCEN_GROUND);
|
|
|
ARIDLL_EOArithInitWithMode(pTracker2,nWidth,nHeight,GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_GRAY_Y16,
|
|
|
GLB_SYS_MODE::GLB_SYS_STARE,GLB_SCEN_MODE::GLB_SCEN_GROUND);
|
|
|
|
|
|
// 算法输入部分
|
|
|
ARIDLL_INPUTPARA stInputPara = { 0 };
|
|
|
stInputPara.unFrmId++;
|
|
|
stInputPara.stCameraInfo.fPixelSize = 15;
|
|
|
stInputPara.stCameraInfo.nFocus = 300;
|
|
|
|
|
|
// 算法输出部分
|
|
|
ARIDLL_OUTPUT stOutput1 = { 0 };
|
|
|
ARIDLL_OUTPUT stOutput2 = { 0 };
|
|
|
|
|
|
unsigned char* pY8Data = new unsigned char[nWidth * nHeight];
|
|
|
|
|
|
|
|
|
FILE* file = fopen("timer.log","w+");
|
|
|
|
|
|
|
|
|
// 模拟算法执行流程
|
|
|
int nTrackSuc = 0;
|
|
|
cv::Mat frame;
|
|
|
while(1)
|
|
|
{
|
|
|
stInputPara.unFrmId++;
|
|
|
|
|
|
capture.read(frame);
|
|
|
if(frame.empty())
|
|
|
break;
|
|
|
int imgW = frame.cols; // img_w
|
|
|
int imgH = frame.rows; // img_h
|
|
|
|
|
|
|
|
|
cv::Mat srcGray(nHeight,nWidth,CV_8UC1);
|
|
|
cv::cvtColor(frame,srcGray,cv::COLOR_RGB2GRAY);
|
|
|
for (size_t i = 0; i < nWidth * nHeight; i++)
|
|
|
{
|
|
|
pY8Data[i] = srcGray.data[i];
|
|
|
}
|
|
|
|
|
|
cv::Mat resizedImg;
|
|
|
cv::resize(frame, resizedImg, cv::Size(IMAGEWIDTH, IMAGEHEIGHT));
|
|
|
cv::Mat NV12Mat;
|
|
|
BGR2YUV_nv12(resizedImg, NV12Mat);
|
|
|
|
|
|
cv::TickMeter tm;
|
|
|
tm.start();
|
|
|
|
|
|
// 调用AI识别异步接口
|
|
|
AI_Det_Run(NV12Mat,stInputPara.unFrmId);
|
|
|
|
|
|
// remap 到原图
|
|
|
for (size_t i = 0; i < visdets1.size(); i++)
|
|
|
{
|
|
|
auto ratioW = (imgW *1.0) / IMAGEWIDTH;
|
|
|
auto ratioH = (imgH *1.0) / IMAGEHEIGHT;
|
|
|
visdets1[i].x1 *= ratioW;
|
|
|
visdets1[i].x2 *= ratioW;
|
|
|
visdets1[i].y1 *= ratioH;
|
|
|
visdets1[i].y2 *= ratioH;
|
|
|
}
|
|
|
for (size_t i = 0; i < visdets2.size(); i++)
|
|
|
{
|
|
|
auto ratioW = (imgW *1.0) / IMAGEWIDTH;
|
|
|
auto ratioH = (imgH *1.0) / IMAGEHEIGHT;
|
|
|
visdets2[i].x1 *= ratioW;
|
|
|
visdets2[i].x2 *= ratioW;
|
|
|
visdets2[i].y1 *= ratioH;
|
|
|
visdets2[i].y2 *= ratioH;
|
|
|
}
|
|
|
|
|
|
ARIDLL_MergeAITargets(pTracker1,visdets1.data(),visdets1.size());
|
|
|
ARIDLL_MergeAITargets(pTracker2,visdets2.data(),visdets2.size());
|
|
|
|
|
|
|
|
|
// 下发面锁定指令
|
|
|
if (stInputPara.unFrmId == 10)
|
|
|
{
|
|
|
ARIDLL_LockCommand(pTracker1, 228*2,279*2,40,25);
|
|
|
}
|
|
|
|
|
|
if (stInputPara.unFrmId == 300)
|
|
|
{
|
|
|
ARIDLL_LockCommand(pTracker2, 228*2,279*2,40,25);
|
|
|
}
|
|
|
|
|
|
|
|
|
// 构建图像类型
|
|
|
GD_VIDEO_FRAME_S img = { 0 };
|
|
|
img.enPixelFormat = GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_GRAY_Y8;
|
|
|
img.u32Width = nWidth;
|
|
|
img.u32Height = nHeight;
|
|
|
img.u32Stride[0] = img.u32Width * 1;
|
|
|
img.u64VirAddr[0] = (unsigned char*)pY8Data;
|
|
|
|
|
|
|
|
|
|
|
|
// 运行算法主控逻辑API
|
|
|
std::future<int> out1 = executor.commit("Tracker1",ARIDLL_RunController,pTracker1, img, stInputPara, &stOutput1);
|
|
|
std::future<int> out2 = executor.commit("Tracker2",ARIDLL_RunController,pTracker2, img, stInputPara, &stOutput2);
|
|
|
out1.get();
|
|
|
out2.get();
|
|
|
|
|
|
tm.stop();
|
|
|
|
|
|
printf("time:%.2f\n",tm.getTimeMilli());
|
|
|
|
|
|
|
|
|
fprintf(file,"%.1f\n",tm.getTimeMilli());
|
|
|
|
|
|
|
|
|
draw(frame,stOutput1);
|
|
|
auto frame2 = frame.clone();
|
|
|
draw(frame2,stOutput2);
|
|
|
|
|
|
cv::Mat contact;
|
|
|
cv::hconcat(frame, frame2, contact);
|
|
|
|
|
|
|
|
|
cv::putText(contact,"bm1684x fps:" + to_string(1000/tm.getTimeMilli()),cv::Point(100,50),
|
|
|
cv::FONT_HERSHEY_COMPLEX, 6, cv::Scalar(255,0,0));
|
|
|
|
|
|
|
|
|
//writer << contact;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AI_Det_Release();
|
|
|
|
|
|
delete[] pY8Data;
|
|
|
|
|
|
writer.release();
|
|
|
} |