|
|
/*
|
|
|
* @Author: turboLIU
|
|
|
* @Date: 2023-06-12 14:13:24
|
|
|
* @LastEditTime: 2025-06-26 09:36:32
|
|
|
* @Description: Do not edit
|
|
|
* @FilePath: /Algo_ACL_Common/demo/AI_API_demo.cpp
|
|
|
*/
|
|
|
// #include <type_traits>
|
|
|
#include <unistd.h>
|
|
|
#include <thread>
|
|
|
#include <sys/time.h>
|
|
|
#include <dirent.h>
|
|
|
#include <sys/stat.h>
|
|
|
// #include <pthread.h>
|
|
|
#include "AI_API.h"
|
|
|
#include "detection_type_api.h"
|
|
|
// #include "environment.h"
|
|
|
#include "opencv2/opencv.hpp"
|
|
|
// #include "utils.h"
|
|
|
|
|
|
#define IMAGECOUNT 500
|
|
|
// #define IMAGEWIDTH 512
|
|
|
// #define IMAGEHEIGHT 512
|
|
|
#define GSTREAM NV12
|
|
|
#define IMAGEWIDTH_VIS 1920
|
|
|
#define IMAGEHEIGHT_VIS 1088
|
|
|
#define IMAGEWIDTH_IR 640
|
|
|
#define IMAGEHEIGHT_IR 512
|
|
|
|
|
|
#define InitWithJson 1
|
|
|
#define UseGDVideoFrame 0
|
|
|
|
|
|
// #define TESTIMAGEVIS "../models/Detect/quantImages/1488264581.jpg"
|
|
|
// #define TESTIMAGEIR "../models/Detect/quantImages/webwxgetmsgimg.jpeg"
|
|
|
|
|
|
|
|
|
// static bool TESTVIS = false;
|
|
|
// static bool TESTIR = false;
|
|
|
|
|
|
static std::string VIDEONAME = "1970010100052511";
|
|
|
static std::string IMGFOLDER = "../models/Detect/test_images/" + VIDEONAME;
|
|
|
static std::string DSTFOLDER = "./results/";
|
|
|
|
|
|
// std::vector<std::string> imgnames;
|
|
|
bool stopFlag = false;
|
|
|
|
|
|
static unsigned char SegDet_Colors[36] = {255,0,0, 0,255,0, 0,0,255,
|
|
|
100,100,0, 0,100,100, 100,0,100,
|
|
|
100,100,100, 50,100,50, 50,50,100,
|
|
|
100,50,50, 20,60,120, 120,60,20};
|
|
|
|
|
|
int GetDirNames(std::string dirpath, std::vector<std::string> &filenames){
|
|
|
DIR* pdir;
|
|
|
struct dirent *ptr;
|
|
|
int ret;
|
|
|
pdir = opendir(dirpath.c_str());
|
|
|
if(!pdir){
|
|
|
printf("dripath not exist, path: %s\n", dirpath.c_str());
|
|
|
return -1;
|
|
|
}
|
|
|
while ((ptr = readdir(pdir)) != NULL)
|
|
|
{
|
|
|
if(strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0){
|
|
|
filenames.push_back(dirpath + "/" + ptr->d_name);
|
|
|
}
|
|
|
}
|
|
|
closedir(pdir);
|
|
|
return 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
float getDtimeMiliSecond(timeval starttime, timeval endtime){
|
|
|
float dtime;
|
|
|
dtime = 1000000 * (endtime.tv_sec - starttime.tv_sec) + (endtime.tv_usec - starttime.tv_usec);
|
|
|
dtime = dtime/1000000;
|
|
|
return dtime * 1000;
|
|
|
}
|
|
|
|
|
|
std::vector<int> get_color(int cls){
|
|
|
cls += 1;
|
|
|
int stride = 200;
|
|
|
std::vector<int> clr = {0,0,0};
|
|
|
for(int k=0; k<cls; k++){
|
|
|
int i = k % 3;
|
|
|
clr[i] += stride;
|
|
|
}
|
|
|
for(int k = 0; k< 3; k++){
|
|
|
clr[k] = clr[k] %255;
|
|
|
}
|
|
|
return clr;
|
|
|
}
|
|
|
|
|
|
|
|
|
void draw_dotted_line(cv::Mat img, cv::Point2f p1, cv::Point2f p2, cv::Scalar color, int thickness)
|
|
|
{
|
|
|
float n = 15; //虚点间隔
|
|
|
float w = p2.x - p1.x, h = p2.y - p1.y;
|
|
|
float l = sqrtf(w * w + h * h);
|
|
|
int m = l / n;
|
|
|
n = l / m; // 矫正虚点间隔,使虚点数为整数
|
|
|
|
|
|
cv::circle(img, p1, 1, color, thickness); // 画起点
|
|
|
cv::circle(img, p2, 1, color, thickness); // 画终点
|
|
|
// 画中间点
|
|
|
if (p1.y == p2.y) // 水平线:y = m
|
|
|
{
|
|
|
float x1 = std::min(p1.x, p2.x);
|
|
|
float x2 = std::max(p1.x, p2.x);
|
|
|
for (float x = x1 + n; x < x2; x = x + n)
|
|
|
cv::circle(img, cv::Point2f(x, p1.y), 1, color, thickness);
|
|
|
}
|
|
|
else if (p1.x == p2.x) // 垂直线, x = m
|
|
|
{
|
|
|
float y1 = std::min(p1.y, p2.y);
|
|
|
float y2 = std::max(p1.y, p2.y);
|
|
|
for (float y = y1 + n; y < y2; y = y + n)
|
|
|
cv::circle(img, cv::Point2f(p1.x, y), 1, color, thickness);
|
|
|
}
|
|
|
else // 倾斜线,与x轴、y轴都不垂直或平行
|
|
|
{
|
|
|
// 直线方程的两点式:(y-y1)/(y2-y1)=(x-x1)/(x2-x1) -> y = (y2-y1)*(x-x1)/(x2-x1)+y1
|
|
|
float m = n * abs(w) / l;
|
|
|
float k = h / w;
|
|
|
float x1 = std::min(p1.x, p2.x);
|
|
|
float x2 = std::max(p1.x, p2.x);
|
|
|
for (float x = x1 + m; x < x2; x = x + m)
|
|
|
cv::circle(img, cv::Point2f(x, k * (x - p1.x) + p1.y), 1, color, thickness);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void draw_dotted_rect(cv::Mat img, cv::Point2f p1, cv::Point2f p2, cv::Scalar color, int thickness){
|
|
|
cv::Point2f lt(p1.x, p1.y);
|
|
|
cv::Point2f rt(p2.x, p1.y);
|
|
|
cv::Point2f lb(p1.x, p2.y);
|
|
|
cv::Point2f rb(p2.x, p2.y);
|
|
|
|
|
|
draw_dotted_line(img, lt, rt, color, thickness);
|
|
|
draw_dotted_line(img, rt, rb, color, thickness);
|
|
|
draw_dotted_line(img, lb, rb, color, thickness);
|
|
|
draw_dotted_line(img, lt, lb, color, thickness);
|
|
|
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
void swapYUV_I420toNV12(unsigned char* i420bytes, unsigned char* nv12bytes, int width, int height)
|
|
|
{
|
|
|
int nLenY = width * height;
|
|
|
int nLenU = nLenY / 4;
|
|
|
|
|
|
memcpy(nv12bytes, i420bytes, width * height);
|
|
|
|
|
|
for (int i = 0; i < nLenU; i++)
|
|
|
{
|
|
|
nv12bytes[nLenY + 2 * i] = i420bytes[nLenY + i]; // U
|
|
|
nv12bytes[nLenY + 2 * i + 1] = i420bytes[nLenY + nLenU + i]; // V
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void BGR2YUV_nv12(cv::Mat src, cv::Mat &dst)
|
|
|
{
|
|
|
int w_img = src.cols;
|
|
|
int h_img = src.rows;
|
|
|
dst = cv::Mat(h_img*1.5, w_img, CV_8UC1, cv::Scalar(0));
|
|
|
cv::Mat src_YUV_I420(h_img*1.5, w_img, CV_8UC1, cv::Scalar(0)); //YUV_I420
|
|
|
cvtColor(src, src_YUV_I420, cv::COLOR_BGR2YUV_I420);
|
|
|
swapYUV_I420toNV12(src_YUV_I420.data, dst.data, w_img, h_img);
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
int draw_mask(cv::Mat &showImg, TenMat* mask){
|
|
|
float alpha = 0.5;
|
|
|
int inwidth = mask->width;
|
|
|
int inheight = mask->height;
|
|
|
int depth = showImg.channels();
|
|
|
T* maskdata = (T*)mask->data;
|
|
|
for(int i=0; i<inheight; i++){
|
|
|
for(int j=0; j<inwidth; j++){
|
|
|
uint8_t uvalue = (uint8_t)maskdata[i*inwidth + j];
|
|
|
std::vector<int> color = get_color(uvalue);
|
|
|
for(int k=0; k<depth; k++){
|
|
|
showImg.data[i*inwidth*depth + j*depth + k] \
|
|
|
= (uint8_t)((1-alpha)*showImg.data[i*inwidth*depth + j*depth + k] + alpha * color.at(k));
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int draw_dets(cv::Mat &showImg, std::vector<objinfo> dets){
|
|
|
int ret = 0;
|
|
|
for(auto det: dets){
|
|
|
int x1 = det.x1;
|
|
|
int x2 = det.x2;
|
|
|
int y1 = det.y1;
|
|
|
int y2 = det.y2;
|
|
|
int cls = det.classNum;
|
|
|
std::vector<int> color = {SegDet_Colors[3*cls + 0], SegDet_Colors[3*cls + 1], SegDet_Colors[3*cls + 2]};//get_color(cls);
|
|
|
cv::rectangle(showImg, cv::Point(x1,y1), cv::Point(x2,y2), cv::Scalar(color[0],color[1],color[2]),2);
|
|
|
// draw_dotted_rect(resizedImg, cv::Point2f(det.x1, det.y1), cv::Point2f(det.x2, det.y2), cv::Scalar(color[0],color[1],color[2]),4);
|
|
|
char name[50];
|
|
|
sprintf(name, "%d_%.3f", det.classNum, det.score);
|
|
|
cv::putText(showImg, name, cv::Point(x1,y1), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(color[0],color[1],color[2]), 4);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
|
|
|
int test_class(){
|
|
|
int ret = 0;
|
|
|
// bool initWithJson = true;
|
|
|
// bool useGDVideo = true;
|
|
|
std::string visdetmodelname = "../models/Detect/atlas200/xh.xu/507A_IR_20241111_dynamic.om";
|
|
|
std::string visdetparamname = "../models/Detect/atlas200/xh.xu/507A_IR_20241111_dynamic.json";
|
|
|
|
|
|
// build class
|
|
|
DETECTION_API* detAPI = new DETECTION_API();
|
|
|
if(InitWithJson){
|
|
|
ret = detAPI->AIGOinit(visdetmodelname.c_str(), visdetparamname.c_str(), 0);
|
|
|
}else{
|
|
|
detAPI->netWidth = 640;
|
|
|
detAPI->netHeight = 640;
|
|
|
detAPI->clsnum = 15;
|
|
|
detAPI->anchors = {{10,13, 16,30, 33,23},{30,61, 62,45, 59,119},{116,90, 156,198, 373,326}};
|
|
|
// 标签标请咨询对应的算法工程师
|
|
|
uint32_t modelclass[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14};
|
|
|
uint32_t standerclass[] = {10000, 20000, 30000, 40000, 50000,
|
|
|
60000, 70000, 80000, 90000, 100000,
|
|
|
110000, 120000, 130000, 140000, 150000};
|
|
|
|
|
|
ret = detAPI->AIGOinit(visdetmodelname.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
ret = detAPI->AIGOsetLabelMap(modelclass, standerclass, sizeof(modelclass)/sizeof(uint32_t));
|
|
|
if(ret){
|
|
|
printf("setLabelMap error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
std::string visdetimgname = "../models/Detect/quantImages/IR_test_tank.avi_000147.jpg";
|
|
|
std::string vissegimgname = "../models/Segment/test_images_vl/20230704_102059.MP4_20230711_195345.335.bmp";
|
|
|
|
|
|
float timecost = 0;
|
|
|
float durtime = 0;
|
|
|
struct timeval st, et;
|
|
|
|
|
|
cv::Mat visdetImage = cv::imread(visdetimgname);
|
|
|
cv::cvtColor(visdetImage, visdetImage, cv::COLOR_BGR2RGB);
|
|
|
if(visdetImage.empty()){
|
|
|
printf("read image error %s\n", visdetimgname.c_str());
|
|
|
return 1;
|
|
|
}
|
|
|
cv::Mat visdetresize, vissegresize;
|
|
|
|
|
|
cv::resize(visdetImage, visdetresize, cv::Size(detAPI->netWidth, detAPI->netHeight));
|
|
|
|
|
|
// char* zerosInput = (char*)malloc(visdetresize.cols*visdetresize.rows*visdetresize.channels() * sizeof(char));
|
|
|
cv::Mat NV12Matvis, NV12Matir;
|
|
|
// convert BGR to NV12
|
|
|
BGR2YUV_nv12(visdetresize, NV12Matvis);
|
|
|
|
|
|
std::vector<objinfo> dets;
|
|
|
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
|
|
|
for(int i =0; i<1; i++){
|
|
|
|
|
|
ImgMat imgvis;
|
|
|
if(UseGDVideoFrame){
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
// Y
|
|
|
config->u64VirAddr[0] = NV12Matvis.data;
|
|
|
// UV
|
|
|
config->u64VirAddr[1] = NV12Matvis.data + visdetresize.cols * visdetresize.rows;
|
|
|
config->u32Stride[0] = visdetresize.cols;
|
|
|
config->u32Stride[1] = visdetresize.rows;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = visdetresize.channels();
|
|
|
config->u64VirAddr[0] = visdetresize.data;
|
|
|
config->u32Stride[0] = visdetresize.cols;
|
|
|
}
|
|
|
imgvis.width = visdetresize.cols;
|
|
|
imgvis.height = visdetresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NV12Matvis.data;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = visdetresize.data;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = visdetresize.channels();
|
|
|
}
|
|
|
imgvis.width = visdetresize.cols;
|
|
|
imgvis.height = visdetresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}
|
|
|
|
|
|
// printf("loop ImgMat * %p", &img);
|
|
|
gettimeofday(&st, NULL);
|
|
|
|
|
|
// imgvis.data = (unsigned char*)zerosInput;
|
|
|
dets.clear();
|
|
|
ret = detAPI->AIGOpreprocess(imgvis, true);
|
|
|
ret = detAPI->AIGOinfer();
|
|
|
ret = detAPI->AIGOpostprocess(dets);
|
|
|
|
|
|
printf("result dets %d\n", dets.size());
|
|
|
|
|
|
gettimeofday(&et, NULL);
|
|
|
durtime = getDtimeMiliSecond(st, et);
|
|
|
printf("runsync cost: %.3f", durtime);
|
|
|
timecost += durtime;
|
|
|
|
|
|
}
|
|
|
|
|
|
printf("avg timecost is: %f\n", timecost/IMAGECOUNT);
|
|
|
|
|
|
for(auto d : dets){
|
|
|
std::vector<int> color = get_color(d.classNum);
|
|
|
cv::rectangle(visdetresize, cv::Point2d(d.x1, d.y1), cv::Point2d(d.x2, d.y2), cv::Scalar(color.at(0), color.at(1), color.at(2)), 2);
|
|
|
char name[50];
|
|
|
sprintf(name, "%d_%.4f", d.classNum, d.score);
|
|
|
cv::putText(visdetresize, name, cv::Point2d(d.x1, d.y1), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(color.at(0), color.at(1), color.at(2)), 1);
|
|
|
// cv::putText(img, )
|
|
|
|
|
|
}
|
|
|
cv::imwrite("./result.jpg", visdetresize);
|
|
|
|
|
|
|
|
|
ret = detAPI->AIGOdeinit();
|
|
|
|
|
|
printf("\n---------------------current model size: %dx%d\n", visdetresize.cols, visdetresize.rows);
|
|
|
printf("\n-----------end--------------\n");
|
|
|
sleep(2);
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
void run_folder(T* classor, std::string imgfolder){
|
|
|
int ret = 0;
|
|
|
std::vector<std::string> imgnames;
|
|
|
ret = GetDirNames(imgfolder, imgnames);
|
|
|
if(ret){
|
|
|
printf("get images error\n");
|
|
|
exit(1);
|
|
|
}
|
|
|
std::sort(imgnames.begin(), imgnames.end());
|
|
|
struct timeval st,et;
|
|
|
float durtime = 0;
|
|
|
|
|
|
for(int i=0; i<imgnames.size(); i++){
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
gettimeofday(&st, NULL);
|
|
|
cv::Mat img = cv::imread(imgnames.at(i));
|
|
|
int ww = img.cols;
|
|
|
int hh = img.rows;
|
|
|
|
|
|
cv::Mat resizeImage;
|
|
|
cv::resize(img, resizeImage, cv::Size(960,768));
|
|
|
cv::Rect roi(960/2 - 512/2, 768/2-512/2, 512,512);
|
|
|
cv::Mat roiImg = resizeImage(roi);
|
|
|
cv::Mat NV12Mat;
|
|
|
BGR2YUV_nv12(roiImg, NV12Mat);
|
|
|
|
|
|
ImgMat frame;
|
|
|
frame.data = NV12Mat.data;
|
|
|
frame.width = roiImg.cols;
|
|
|
frame.height = roiImg.rows;
|
|
|
frame.depth = 1;
|
|
|
frame.layout = HWC;
|
|
|
frame.inlayout = NV12;
|
|
|
frame.mode = IR;
|
|
|
config->u32FrameCnt = i;
|
|
|
config->u32Width = frame.width;
|
|
|
config->u32Height = frame.height;
|
|
|
frame.cfg = (long long)config;
|
|
|
gettimeofday(&et, NULL);
|
|
|
durtime = getDtimeMiliSecond(st, et);
|
|
|
if(durtime < 40){
|
|
|
usleep((40 - durtime)*1000); // 满足25hz的帧间隔
|
|
|
}
|
|
|
ret = classor->AIGOrunAsync(frame, true);
|
|
|
if(ret){
|
|
|
printf("\nERROR\n");
|
|
|
}
|
|
|
// sleep(0.04);
|
|
|
|
|
|
}
|
|
|
stopFlag = true;
|
|
|
// free(config);
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
void get_folder_result(T* classor, std::string imgfolder){
|
|
|
int ret = 0;
|
|
|
std::vector<std::string> imgnames;
|
|
|
ret = GetDirNames(imgfolder, imgnames);
|
|
|
std::sort(imgnames.begin(), imgnames.end());
|
|
|
|
|
|
std::string dstpath = DSTFOLDER;
|
|
|
|
|
|
std::vector<objinfo> dets;
|
|
|
TenMat* mask = (TenMat*)malloc(sizeof(TenMat));
|
|
|
mask->data = (void*)malloc(2000*2000*3); // 尽量分配足够空间
|
|
|
mask->dataByte = 2000*2000*3;
|
|
|
float alpha = 0.5;
|
|
|
|
|
|
bool drawed = true;
|
|
|
int timestamp = 0;
|
|
|
long long cfg = 0;
|
|
|
int last_timestamp = 0;
|
|
|
GD_VIDEO_FRAME_S* config = NULL;
|
|
|
|
|
|
while (timestamp + 1 < imgnames.size())
|
|
|
{
|
|
|
sleep(0.01);
|
|
|
if constexpr (std::is_same<T, DETECTION_API>::value){
|
|
|
dets.clear();
|
|
|
ret = classor->AIGOgetResult(cfg, dets, true);
|
|
|
if(ret == -1)break;
|
|
|
if(ret == 0){
|
|
|
config = (GD_VIDEO_FRAME_S*)cfg;
|
|
|
cv::Mat im = cv::imread(imgnames.at(config->u32FrameCnt));
|
|
|
timestamp = config->u32FrameCnt;
|
|
|
printf("\ntimestamp: %d get result -------\n", timestamp);
|
|
|
last_timestamp = timestamp;
|
|
|
// dets.insert(dets.end(), visdets.begin(), visdets.end());
|
|
|
if(drawed){
|
|
|
printf("get_result det=%d\n", dets.size());
|
|
|
|
|
|
cv::Mat showImg;
|
|
|
cv::resize(im, showImg, cv::Size(config->u32Width, config->u32Height));
|
|
|
ret = draw_dets(showImg, dets);
|
|
|
char savename[100];
|
|
|
sprintf(savename, "%s/%08d.jpg", dstpath.c_str(), timestamp);
|
|
|
cv::imwrite(savename, showImg);
|
|
|
// drawed = false;
|
|
|
}
|
|
|
free(config);
|
|
|
}
|
|
|
if(ret == 1){
|
|
|
usleep(10);
|
|
|
}
|
|
|
}else if constexpr(std::is_same<T, SEGMENTION_API>::value){
|
|
|
ret = classor->AIGOgetResult(cfg, mask, true);
|
|
|
if(ret == -1)break;
|
|
|
if(ret == 0){
|
|
|
config = (GD_VIDEO_FRAME_S*)cfg;
|
|
|
cv::Mat im = cv::imread(imgnames.at(config->u32FrameCnt));
|
|
|
timestamp = config->u32FrameCnt;
|
|
|
printf("\ntimestamp: %d get result -------\n", timestamp);
|
|
|
last_timestamp = timestamp;
|
|
|
cv::Mat showImg;
|
|
|
cv::resize(im, showImg, cv::Size(config->u32Width, config->u32Height));
|
|
|
if(drawed){
|
|
|
if(mask->type == AI_FLOAT){
|
|
|
ret = draw_mask<float>(showImg, mask);
|
|
|
}else if(mask->type == AI_UINT8){
|
|
|
ret = draw_mask<uint8_t>(showImg, mask);
|
|
|
}else if(mask->type == AI_INT32){
|
|
|
ret = draw_mask<int32_t>(showImg, mask);
|
|
|
}
|
|
|
if(ret){
|
|
|
printf("draw mask error\n");
|
|
|
}
|
|
|
// drawed = false;
|
|
|
char savename[100];
|
|
|
sprintf(savename, "%s/%08d.jpg", dstpath.c_str(), timestamp);
|
|
|
cv::imwrite(savename, showImg);
|
|
|
}
|
|
|
free(config);
|
|
|
}else if(ret == 1){
|
|
|
usleep(10);
|
|
|
}
|
|
|
free(mask->data);
|
|
|
free(mask);
|
|
|
}else{
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
void run_image_(T* classor, ImgMat img){
|
|
|
int ret = 0;
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)img.cfg;
|
|
|
for(int i=0; i<IMAGECOUNT; i++){
|
|
|
// config->u32FrameCnt = i;
|
|
|
// img.cfg = (long long)config;
|
|
|
// if(i == IMAGECOUNT/2){
|
|
|
// printf("\n-------sleeping 60seconds--------\n");
|
|
|
// sleep(60);
|
|
|
// }
|
|
|
config->u32FrameCnt = i;
|
|
|
img.cfg = (long long)config;
|
|
|
ret = classor->AIGOrunAsync(img, false);
|
|
|
if(ret){
|
|
|
printf("\nERROR\n");
|
|
|
}
|
|
|
sleep(0.5);
|
|
|
|
|
|
}
|
|
|
stopFlag = true;
|
|
|
// free(config);
|
|
|
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
void run_image_sync(T* classor, ImgMat img){
|
|
|
int ret = 0;
|
|
|
cv::Mat showImg;
|
|
|
bool draw = true;
|
|
|
std::vector<objinfo> dets;
|
|
|
// GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)img.cfg;
|
|
|
for(int i=0; i<IMAGECOUNT; i++){
|
|
|
dets.clear();
|
|
|
ret = classor->AIGOpreprocess(img, true);
|
|
|
ret = classor->AIGOinfer();
|
|
|
ret = classor->AIGOpostprocess(dets);
|
|
|
if(ret){
|
|
|
printf("\nERROR\n");
|
|
|
}
|
|
|
sleep(0.5);
|
|
|
}
|
|
|
// if(draw){
|
|
|
// if(GSTREAM == NV12){
|
|
|
// cv::Mat nv12Mat(int(img.height*1.5), img.width, CV_8UC1, img.data);
|
|
|
// cv::cvtColor(nv12Mat, showImg, cv::COLOR_YUV2BGR_NV12);
|
|
|
// }else{
|
|
|
// showImg = cv::Mat(cv::Size(img.height, img.width), CV_8UC3, img.data);
|
|
|
|
|
|
// }
|
|
|
// ret = draw_dets(showImg, dets);
|
|
|
// cv::imwrite("./result.jpg", showImg);
|
|
|
// draw = false;
|
|
|
// }
|
|
|
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
void get_result_(T* classor, ImgMat img){
|
|
|
int ret = 0;
|
|
|
cv::Mat showImg;
|
|
|
void* data = (void*)malloc(img.width*img.height*3);
|
|
|
if(UseGDVideoFrame){
|
|
|
GD_VIDEO_FRAME_S * config = (GD_VIDEO_FRAME_S*)img.cfg;
|
|
|
if(GSTREAM == NV12){
|
|
|
printf("memcpy Y\n");
|
|
|
for(int i=0; i<img.height; i++){
|
|
|
memcpy(data+i*img.width,
|
|
|
config->u64VirAddr[0]+i*config->u32Stride[0],
|
|
|
img.width);
|
|
|
}
|
|
|
printf("memcpy UV\n");
|
|
|
for(int i=0; i<img.height/2; i++){
|
|
|
memcpy(data+img.width*img.height + i*img.width,
|
|
|
config->u64VirAddr[1]+i*config->u32Stride[1],
|
|
|
img.width);
|
|
|
}
|
|
|
cv::Mat nv12Mat(int(img.height*1.5), img.width, CV_8UC1, data);
|
|
|
cv::cvtColor(nv12Mat, showImg, cv::COLOR_YUV2BGR_NV12);
|
|
|
}else{
|
|
|
showImg = cv::Mat(cv::Size(img.height, img.width), CV_8UC3, config->u64VirAddr[0]);
|
|
|
}
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
cv::Mat nv12Mat(int(img.height*1.5), img.width, CV_8UC1, img.data);
|
|
|
cv::cvtColor(nv12Mat, showImg, cv::COLOR_YUV2BGR_NV12);
|
|
|
}else{
|
|
|
showImg = cv::Mat(cv::Size(img.height, img.width), CV_8UC3, img.data);
|
|
|
|
|
|
}
|
|
|
}
|
|
|
bool drawed = false;
|
|
|
|
|
|
int timestamp = 0;
|
|
|
long long cfg = 0;
|
|
|
int last_timestamp = 0;
|
|
|
GD_VIDEO_FRAME_S* config = NULL;
|
|
|
|
|
|
while (timestamp+1 < IMAGECOUNT)
|
|
|
{
|
|
|
sleep(0.01);
|
|
|
if constexpr (std::is_same<T, DETECTION_API>::value){
|
|
|
std::vector<objinfo> dets;
|
|
|
ret = classor->AIGOgetResult(cfg, dets, true);
|
|
|
if(ret == -1)break;
|
|
|
if(ret == 0){
|
|
|
// dets.insert(dets.end(), visdets.begin(), visdets.end());
|
|
|
if(drawed){
|
|
|
printf("get_result det=%d\n", dets.size());
|
|
|
ret = draw_dets(showImg, dets);
|
|
|
cv::imwrite("./result.jpg", showImg);
|
|
|
drawed = false;
|
|
|
}
|
|
|
config = (GD_VIDEO_FRAME_S*)cfg;
|
|
|
timestamp = config->u32FrameCnt;
|
|
|
printf("\ntimestamp: %d get result -------\n", timestamp);
|
|
|
last_timestamp = timestamp;
|
|
|
}
|
|
|
if(ret == 1){
|
|
|
usleep(10);
|
|
|
}
|
|
|
}else if constexpr(std::is_same<T, SEGMENTION_API>::value){
|
|
|
TenMat* mask = (TenMat*)malloc(sizeof(TenMat));
|
|
|
// mask->width = img.width;
|
|
|
// mask->height = img.height;
|
|
|
// mask->depth = 1;
|
|
|
// mask->dataByte = mask->height*mask->width*sizeof(float); //
|
|
|
mask->data = (void*)malloc(2000*2000*3); // 尽量分配足够空间
|
|
|
mask->dataByte = 2000*2000*3;
|
|
|
float alpha = 0.5;
|
|
|
ret = classor->AIGOgetResult(cfg, mask, true);
|
|
|
if(ret == -1)break;
|
|
|
if(ret == 0){
|
|
|
if(drawed){
|
|
|
if(mask->type == AI_FLOAT){
|
|
|
ret = draw_mask<float>(showImg, mask);
|
|
|
}else if(mask->type == AI_UINT8){
|
|
|
ret = draw_mask<uint8_t>(showImg, mask);
|
|
|
}else if(mask->type == AI_INT32){
|
|
|
ret = draw_mask<int32_t>(showImg, mask);
|
|
|
}
|
|
|
if(ret){
|
|
|
printf("draw mask error\n");
|
|
|
// return ret;
|
|
|
}
|
|
|
drawed = false;
|
|
|
cv::imwrite("../models/Segment/result1.jpg", showImg);
|
|
|
}
|
|
|
config = (GD_VIDEO_FRAME_S*)cfg;
|
|
|
timestamp = config->u32FrameCnt;
|
|
|
printf("\ntimestamp: %d get result -------\n", timestamp);
|
|
|
last_timestamp = timestamp;
|
|
|
}else if(ret == 1){
|
|
|
usleep(10);
|
|
|
}
|
|
|
free(mask->data);
|
|
|
free(mask);
|
|
|
}else{
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
template<typename T>
|
|
|
void run_image_cfg(T* detector, ImgMat img){
|
|
|
int ret = 0;
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
ImgMat tmpimg;
|
|
|
memcpy(&tmpimg, &img, sizeof(ImgMat));
|
|
|
for(int i=0; i<IMAGECOUNT; i++){
|
|
|
if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
config->u64VirAddr[0] = tmpimg.data;
|
|
|
config->u32Stride[0] = tmpimg.width;
|
|
|
img.data = NULL;
|
|
|
}else if(GSTREAM == NV12 || GSTREAM == NV21){
|
|
|
config->u64VirAddr[0] = tmpimg.data;
|
|
|
config->u64VirAddr[1] = (unsigned char*)tmpimg.data + tmpimg.width*tmpimg.height;
|
|
|
config->u32Stride[0] = tmpimg.width;
|
|
|
config->u32Stride[1] = tmpimg.width;
|
|
|
img.data = NULL;
|
|
|
}else{
|
|
|
printf("\nGSTREAM not support\n");
|
|
|
exit(1);
|
|
|
}
|
|
|
config->u32FrameCnt = i;
|
|
|
img.cfg = (long long)config;
|
|
|
// img.cfg = i;
|
|
|
printf("\n------cfg %p--%p--%p--\n", img.cfg, config->u64VirAddr[0], config->u64VirAddr[1]);
|
|
|
ret = detector->AIGOrunAsync(img, false);
|
|
|
if(ret){
|
|
|
printf("\nERROR\n");
|
|
|
}
|
|
|
// sleep(1);
|
|
|
|
|
|
}
|
|
|
stopFlag = true;
|
|
|
free(config);
|
|
|
|
|
|
}
|
|
|
|
|
|
int test_class_async(){
|
|
|
int ret = 0;
|
|
|
// bool InitWithJson = false;
|
|
|
// bool useGDVideo = true;
|
|
|
std::string imgname = "../models/Detect/quantImages/1488264581.jpg";
|
|
|
std::string modelname = "../models/Detect/1684x/vis_common_headers_1280x1024_bm1684x_F16_false.bmodel";
|
|
|
std::string paramname = "../models/Detect/1684x/vis_common_headers_1280x1024_bm1684x_F16_false.json";
|
|
|
|
|
|
// build class
|
|
|
DETECTION_API* detAPI = new DETECTION_API();
|
|
|
if(InitWithJson){
|
|
|
ret = detAPI->AIGOinit(modelname.c_str(), paramname.c_str(), 0);
|
|
|
}else{
|
|
|
detAPI->netWidth = 512;
|
|
|
detAPI->netHeight = 512;
|
|
|
detAPI->clsnum = 15;
|
|
|
// detAPI->anchors = {{10,13, 16,30, 33,23},{30,61, 62,45, 59,119},{116,90, 156,198, 373,326}};
|
|
|
detAPI->anchors = {{11,19, 23,30, 28,68},{62,53, 51,138, 123,130},{103,265, 217,371, 563,321}}; // x2500
|
|
|
// 标签标请咨询对应的算法工程师
|
|
|
uint32_t modelclass[] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14};
|
|
|
uint32_t standerclass[] = {10000, 20000, 30000, 40000, 50000,
|
|
|
60000, 70000, 80000, 90000, 100000,
|
|
|
110000, 120000, 130000, 140000, 150000};
|
|
|
|
|
|
ret = detAPI->AIGOinit(modelname.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
ret = detAPI->AIGOsetLabelMap(modelclass, standerclass, sizeof(modelclass)/sizeof(uint32_t));
|
|
|
if(ret){
|
|
|
printf("setLabelMap error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
printf("get det threshold\n");
|
|
|
ret = detAPI->AIGOsetConfThr(0.3, 60000); // 设置 conf阈值 时调用, 仅展示调用方式
|
|
|
if(ret){
|
|
|
printf("set det threshold error\n");
|
|
|
return ret;
|
|
|
}
|
|
|
printf("\nclasses setted\n");
|
|
|
ret = detAPI->AIGOsetNMSThr(0.45); // 设置 nms阈值 时调用, 仅展示调用方式
|
|
|
float timecost = 0;
|
|
|
float durtime = 0;
|
|
|
struct timeval st, et;
|
|
|
|
|
|
cv::Mat detImage = cv::imread(imgname);
|
|
|
cv::cvtColor(detImage, detImage, cv::COLOR_BGR2RGB);
|
|
|
|
|
|
if(detImage.empty()){
|
|
|
printf("read image error %s\n", imgname.c_str());
|
|
|
return 1;
|
|
|
}
|
|
|
cv::Mat detresize;
|
|
|
cv::resize(detImage, detresize, cv::Size(detAPI->netWidth, detAPI->netHeight));
|
|
|
char* zerosInput = (char*)malloc(detresize.cols*detresize.rows*detresize.channels() * sizeof(char));
|
|
|
cv::Mat NV12Mat;
|
|
|
BGR2YUV_nv12(detresize, NV12Mat);
|
|
|
|
|
|
std::vector<objinfo> dets;
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
|
|
|
// 传建输入参数
|
|
|
ImgMat imgvis;
|
|
|
if(UseGDVideoFrame){
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
// Y
|
|
|
config->u64VirAddr[0] = NV12Mat.data;
|
|
|
// UV
|
|
|
config->u64VirAddr[1] = NV12Mat.data + detresize.cols * detresize.rows;
|
|
|
config->u32Stride[0] = detresize.cols;
|
|
|
config->u32Stride[1] = detresize.cols;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = detresize.channels();
|
|
|
config->u64VirAddr[0] = detresize.data;
|
|
|
config->u32Stride[0] = detresize.cols;
|
|
|
}
|
|
|
imgvis.width = detresize.cols;
|
|
|
imgvis.height = detresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
// imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NV12Mat.data;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = detresize.data;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = detresize.channels();
|
|
|
}
|
|
|
imgvis.width = detresize.cols;
|
|
|
imgvis.height = detresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
// imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}
|
|
|
|
|
|
gettimeofday(&st, NULL);
|
|
|
// imgvis.data = (unsigned char*)zerosInput;
|
|
|
std::thread feed_data, result_data;
|
|
|
|
|
|
feed_data = std::thread(run_image_<DETECTION_API>, detAPI, imgvis);
|
|
|
result_data = std::thread(get_result_<DETECTION_API>, detAPI, imgvis);
|
|
|
|
|
|
feed_data.join();
|
|
|
result_data.join();
|
|
|
printf("result dets %d\n", dets.size());
|
|
|
|
|
|
gettimeofday(&et, NULL);
|
|
|
durtime = getDtimeMiliSecond(st, et);
|
|
|
printf("runAsync cost: %.3f\n", durtime/IMAGECOUNT);
|
|
|
timecost += durtime;
|
|
|
|
|
|
|
|
|
printf("avg timecost is: %f\n", timecost/IMAGECOUNT);
|
|
|
|
|
|
|
|
|
ret = detAPI->AIGOdeinit();
|
|
|
|
|
|
printf("\n---------------------current model size: %dx%d\n", imgvis.width, imgvis.height);
|
|
|
printf("\n-----------end--------------\n");
|
|
|
sleep(2);
|
|
|
}
|
|
|
|
|
|
int test_segment_async(){
|
|
|
int ret = 0;
|
|
|
std::string testimg = "../models/Segment/quantImages/s729_VL.jpg";
|
|
|
std::string modelname = "../models/Segment/bm1684x/seg_1280_1024_vis_gray_1280x1024_bm1684x_F16_false.bmodel";
|
|
|
std::string cfgfile = "../models/Segment/bm1684x/seg_1280_1024_vis_gray_1280x1024_bm1684x_F16_false.json";
|
|
|
|
|
|
SEGMENTION_API* segmemtor = new SEGMENTION_API();
|
|
|
|
|
|
ret = segmemtor->AIGOinit(modelname.c_str(), cfgfile.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("AIGO init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
// building ImgMat
|
|
|
cv::Mat org = cv::imread(testimg, cv::IMREAD_COLOR);
|
|
|
cv::Mat resizeImg;
|
|
|
cv::resize(org, resizeImg, cv::Size(segmemtor->netWidth, segmemtor->netHeight));
|
|
|
cv::Mat NV12Mat;
|
|
|
BGR2YUV_nv12(resizeImg, NV12Mat);
|
|
|
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
// 传建输入参数
|
|
|
ImgMat imgvis;
|
|
|
if(UseGDVideoFrame){
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
// Y
|
|
|
config->u64VirAddr[0] = NV12Mat.data;
|
|
|
// UV
|
|
|
config->u64VirAddr[1] = NV12Mat.data + resizeImg.cols * resizeImg.rows;
|
|
|
config->u32Stride[0] = resizeImg.cols;
|
|
|
config->u32Stride[1] = resizeImg.cols;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = resizeImg.channels();
|
|
|
config->u64VirAddr[0] = resizeImg.data;
|
|
|
config->u32Stride[0] = resizeImg.cols;
|
|
|
}
|
|
|
imgvis.width = resizeImg.cols;
|
|
|
imgvis.height = resizeImg.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
// imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NV12Mat.data;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = resizeImg.data;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = resizeImg.channels();
|
|
|
}
|
|
|
imgvis.width = resizeImg.cols;
|
|
|
imgvis.height = resizeImg.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
// imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}
|
|
|
|
|
|
float timecost = 0;
|
|
|
float durtime = 0;
|
|
|
struct timeval st, et;
|
|
|
gettimeofday(&st, NULL);
|
|
|
// imgvis.data = (unsigned char*)zerosInput;
|
|
|
std::thread feed_data, result_data;
|
|
|
// {
|
|
|
// feed_data = std::thread(run_image_, detAPI, imgvis);
|
|
|
// result_data = std::thread(get_result_, detAPI, imgvis, std::ref(dets));
|
|
|
// }
|
|
|
// if(!InitWithJson){
|
|
|
// // feed_data = std::thread(run_image_, detAPI, img);
|
|
|
// // result_data = std::thread(get_result_, detAPI, std::ref(dets));
|
|
|
feed_data = std::thread(run_image_<SEGMENTION_API>, segmemtor, imgvis);
|
|
|
result_data = std::thread(get_result_<SEGMENTION_API>, segmemtor, imgvis);
|
|
|
// }else{
|
|
|
// feed_data = std::thread(run_image_cfg, detAPI, imgvis);
|
|
|
// result_data = std::thread(get_result_cfg, detAPI, std::ref(dets));
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
|
|
feed_data.join();
|
|
|
result_data.join();
|
|
|
gettimeofday(&et, NULL);
|
|
|
durtime = getDtimeMiliSecond(st, et);
|
|
|
printf("runAsync cost: %.3f\n", durtime/IMAGECOUNT);
|
|
|
timecost += durtime;
|
|
|
|
|
|
|
|
|
printf("avg timecost is: %f\n", timecost/IMAGECOUNT);
|
|
|
printf("\n---------------------current model size: %dx%d\n", segmemtor->netWidth, segmemtor->netHeight);
|
|
|
printf("\n-----------end--------------\n");
|
|
|
ret = segmemtor->AIGOdeinit();
|
|
|
|
|
|
sleep(2);
|
|
|
return ret;
|
|
|
|
|
|
}
|
|
|
|
|
|
int test_segment_sync(){
|
|
|
int ret = 0;
|
|
|
std::string testimg = "../models/Segment/quantImages/VIS/20230704_102059.MP4_20230711_195345.335.bmp";
|
|
|
std::string modelname = "../models/Segment/atlas200/vis_segment_dynamicShape_nv12_fp16.om";
|
|
|
std::string cfgfile = "../models/Segment/atlas200/vis_segment_dynamicShape_nv12_fp16.json";
|
|
|
|
|
|
SEGMENTION_API* segmemtor = new SEGMENTION_API();
|
|
|
|
|
|
ret = segmemtor->AIGOinit(modelname.c_str(), cfgfile.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("AIGO init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
// building ImgMat
|
|
|
cv::Mat org = cv::imread(testimg, cv::IMREAD_COLOR);
|
|
|
cv::Mat resizeImg;
|
|
|
cv::resize(org, resizeImg, cv::Size(segmemtor->netWidth, segmemtor->netHeight));
|
|
|
cv::Mat NV12Mat;
|
|
|
BGR2YUV_nv12(resizeImg, NV12Mat);
|
|
|
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
// 传建输入参数
|
|
|
ImgMat imgvis;
|
|
|
if(UseGDVideoFrame){
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
// Y
|
|
|
config->u64VirAddr[0] = NV12Mat.data;
|
|
|
// UV
|
|
|
config->u64VirAddr[1] = NV12Mat.data + resizeImg.cols * resizeImg.rows;
|
|
|
config->u32Stride[0] = resizeImg.cols;
|
|
|
config->u32Stride[1] = resizeImg.cols;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = resizeImg.channels();
|
|
|
config->u64VirAddr[0] = resizeImg.data;
|
|
|
config->u32Stride[0] = resizeImg.cols;
|
|
|
}
|
|
|
imgvis.width = resizeImg.cols;
|
|
|
imgvis.height = resizeImg.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
// imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NV12Mat.data;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = resizeImg.data;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = resizeImg.channels();
|
|
|
}
|
|
|
imgvis.width = resizeImg.cols;
|
|
|
imgvis.height = resizeImg.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
// imgvis.timestamp = i;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}
|
|
|
|
|
|
float timecost = 0;
|
|
|
float durtime = 0;
|
|
|
struct timeval st, et;
|
|
|
gettimeofday(&st, NULL);
|
|
|
|
|
|
ret = segmemtor->AIGOpreprocess(imgvis, true);
|
|
|
if(ret){
|
|
|
printf("preprocess error\n");
|
|
|
return 1;
|
|
|
}
|
|
|
ret = segmemtor->AIGOinfer();
|
|
|
if(ret){
|
|
|
printf("infer error\n");
|
|
|
return 1;
|
|
|
}
|
|
|
TenMat* mask = (TenMat*)malloc(sizeof(TenMat));
|
|
|
mask->width = imgvis.width;
|
|
|
mask->height = imgvis.height;
|
|
|
mask->depth = 1;
|
|
|
// mask->dataByte = mask->height*mask->width*sizeof(float); //
|
|
|
mask->data = NULL;//(void*)malloc(mask->dataByte);
|
|
|
ret = segmemtor->AIGOpostprocess(mask);
|
|
|
if(ret){
|
|
|
printf("postprocess error");
|
|
|
return 1;
|
|
|
}
|
|
|
if(mask->type == AI_FLOAT){
|
|
|
ret = draw_mask<float>(resizeImg, mask);
|
|
|
}else if(mask->type == AI_UINT8){
|
|
|
ret = draw_mask<uint8_t>(resizeImg, mask);
|
|
|
}else if(mask->type == AI_INT32){
|
|
|
ret = draw_mask<int32_t>(resizeImg, mask);
|
|
|
}
|
|
|
if(ret){
|
|
|
printf("draw mask error\n");
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
// cv::Mat maskcv(cv::Size(mask->height, mask->width), CV_8UC3, mask->data);
|
|
|
// cv::imwrite("../models/Segment/mask.jpg", maskcv);
|
|
|
cv::imwrite("../models/Segment/result.jpg", resizeImg);
|
|
|
free(mask);
|
|
|
free(config);
|
|
|
ret = segmemtor->AIGOdeinit();
|
|
|
if(ret){
|
|
|
printf("AIGOdeinit error\n");
|
|
|
return ret;
|
|
|
}
|
|
|
free(segmemtor);
|
|
|
sleep(1);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
int test_segment(bool useAsync){
|
|
|
if(useAsync){
|
|
|
return test_segment_async();
|
|
|
}else{
|
|
|
return test_segment_sync();
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
int test_sync_multi(){
|
|
|
int ret = 0;
|
|
|
int envNum = 2;
|
|
|
std::string visdetmodelname = "../models/Detect/hisi3403/hq.han/3403_2class_rpn.om";
|
|
|
std::string visdetparamname = "../models/Detect/hisi3403/hq.han/3403_2class_rpn.json";
|
|
|
|
|
|
std::vector<DETECTION_API*> apis;
|
|
|
std::vector<std::thread> runthreads, getthreads;
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
DETECTION_API* detapi = new DETECTION_API();
|
|
|
ret = detapi->AIGOinit(visdetmodelname.c_str(), visdetparamname.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
apis.push_back(detapi);
|
|
|
// std::thread runthread, getthread;
|
|
|
// runthreads.push_back(runthread);
|
|
|
// getthreads.push_back(getthread);
|
|
|
}
|
|
|
|
|
|
std::string visdetimgname = "../models/Detect/quantImages/1488264581.jpg";
|
|
|
std::string vissegimgname = "../models/Segment/test_images_vl/20230704_102059.MP4_20230711_195345.335.bmp";
|
|
|
|
|
|
float timecost = 0;
|
|
|
float durtime = 0;
|
|
|
struct timeval st, et;
|
|
|
|
|
|
cv::Mat visdetImage = cv::imread(visdetimgname);
|
|
|
cv::cvtColor(visdetImage, visdetImage, cv::COLOR_BGR2RGB);
|
|
|
if(visdetImage.empty()){
|
|
|
printf("read image error %s\n", visdetimgname.c_str());
|
|
|
return 1;
|
|
|
}
|
|
|
cv::Mat visdetresize, vissegresize;
|
|
|
cv::resize(visdetImage, visdetresize, cv::Size(apis.at(0)->netWidth, apis.at(0)->netHeight));
|
|
|
// char* zerosInput = (char*)malloc(visdetresize.cols*visdetresize.rows*visdetresize.channels() * sizeof(char));
|
|
|
cv::Mat NV12Matvis, NV12Matir;
|
|
|
// convert BGR to NV12
|
|
|
BGR2YUV_nv12(visdetresize, NV12Matvis);
|
|
|
std::vector<objinfo> dets;
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
ImgMat imgvis;
|
|
|
if(UseGDVideoFrame){
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
// Y
|
|
|
config->u64VirAddr[0] = NV12Matvis.data;
|
|
|
// UV
|
|
|
config->u64VirAddr[1] = NV12Matvis.data + visdetresize.cols * visdetresize.rows;
|
|
|
config->u32Stride[0] = visdetresize.cols;
|
|
|
config->u32Stride[1] = visdetresize.rows;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = visdetresize.channels();
|
|
|
config->u64VirAddr[0] = visdetresize.data;
|
|
|
config->u32Stride[0] = visdetresize.cols;
|
|
|
}
|
|
|
imgvis.width = visdetresize.cols;
|
|
|
imgvis.height = visdetresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = 0;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NV12Matvis.data;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = visdetresize.data;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = visdetresize.channels();
|
|
|
}
|
|
|
imgvis.width = visdetresize.cols;
|
|
|
imgvis.height = visdetresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = 0;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}
|
|
|
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
// std::thread runthread = std::thread(run_image_sync<DETECTION_API>, apis.at(i), imgvis);
|
|
|
runthreads.push_back(std::thread(run_image_sync<DETECTION_API>, apis.at(i), imgvis));
|
|
|
}
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
runthreads.at(i).join();
|
|
|
}
|
|
|
printf("result dets %d\n", dets.size());
|
|
|
|
|
|
for(DETECTION_API* api : apis){
|
|
|
ret = api->AIGOdeinit();
|
|
|
if(ret){
|
|
|
printf("AIGOdeinit error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
printf("sleeping\n");
|
|
|
sleep(15);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
|
|
|
int test_async_multi(){
|
|
|
int ret = 0;
|
|
|
int envNum = 3;
|
|
|
std::string visdetmodelname = "../models/Detect/atlas200/hq.han/732_VL_dynamic_nms_nv12_0609.om";
|
|
|
std::string visdetparamname = "../models/Detect/atlas200/hq.han/732_VL_dynamic_nms_nv12_0609.json";
|
|
|
|
|
|
std::vector<DETECTION_API*> apis;
|
|
|
std::vector<std::thread> runthreads, getthreads;
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
DETECTION_API* detapi = new DETECTION_API();
|
|
|
ret = detapi->AIGOinit(visdetmodelname.c_str(), visdetparamname.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
apis.push_back(detapi);
|
|
|
// std::thread runthread, getthread;
|
|
|
// runthreads.push_back(runthread);
|
|
|
// getthreads.push_back(getthread);
|
|
|
}
|
|
|
|
|
|
|
|
|
std::string visdetimgname = "../models/Detect/quantImages/1488264581.jpg";
|
|
|
std::string vissegimgname = "../models/Segment/test_images_vl/20230704_102059.MP4_20230711_195345.335.bmp";
|
|
|
|
|
|
float timecost = 0;
|
|
|
float durtime = 0;
|
|
|
struct timeval st, et;
|
|
|
|
|
|
cv::Mat visdetImage = cv::imread(visdetimgname);
|
|
|
cv::cvtColor(visdetImage, visdetImage, cv::COLOR_BGR2RGB);
|
|
|
if(visdetImage.empty()){
|
|
|
printf("read image error %s\n", visdetimgname.c_str());
|
|
|
return 1;
|
|
|
}
|
|
|
cv::Mat visdetresize, vissegresize;
|
|
|
cv::resize(visdetImage, visdetresize, cv::Size(apis.at(0)->netWidth, apis.at(0)->netHeight));
|
|
|
// char* zerosInput = (char*)malloc(visdetresize.cols*visdetresize.rows*visdetresize.channels() * sizeof(char));
|
|
|
cv::Mat NV12Matvis, NV12Matir;
|
|
|
// convert BGR to NV12
|
|
|
BGR2YUV_nv12(visdetresize, NV12Matvis);
|
|
|
std::vector<objinfo> dets;
|
|
|
GD_VIDEO_FRAME_S* config = (GD_VIDEO_FRAME_S*)malloc(sizeof(GD_VIDEO_FRAME_S));
|
|
|
ImgMat imgvis;
|
|
|
if(UseGDVideoFrame){
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
// Y
|
|
|
config->u64VirAddr[0] = NV12Matvis.data;
|
|
|
// UV
|
|
|
config->u64VirAddr[1] = NV12Matvis.data + visdetresize.cols * visdetresize.rows;
|
|
|
config->u32Stride[0] = visdetresize.cols;
|
|
|
config->u32Stride[1] = visdetresize.rows;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = NULL;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = visdetresize.channels();
|
|
|
config->u64VirAddr[0] = visdetresize.data;
|
|
|
config->u32Stride[0] = visdetresize.cols;
|
|
|
}
|
|
|
imgvis.width = visdetresize.cols;
|
|
|
imgvis.height = visdetresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = 0;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}else{
|
|
|
if(GSTREAM == NV12){
|
|
|
imgvis.data = NV12Matvis.data;
|
|
|
imgvis.inlayout = NV12;
|
|
|
imgvis.depth = 1;
|
|
|
}else if(GSTREAM == RGB || GSTREAM == BGR){
|
|
|
imgvis.data = visdetresize.data;
|
|
|
imgvis.inlayout = RGB;
|
|
|
imgvis.depth = visdetresize.channels();
|
|
|
}
|
|
|
imgvis.width = visdetresize.cols;
|
|
|
imgvis.height = visdetresize.rows;
|
|
|
imgvis.layout = HWC;
|
|
|
imgvis.mode = VIS;
|
|
|
imgvis.timestamp = 0;
|
|
|
imgvis.cfg = (long long)config;
|
|
|
}
|
|
|
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
runthreads.push_back(std::thread(run_image_<DETECTION_API>, apis.at(i), imgvis));
|
|
|
getthreads.push_back(std::thread(get_result_<DETECTION_API>, apis.at(i), imgvis));
|
|
|
}
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
runthreads.at(i).join();
|
|
|
getthreads.at(i).join();
|
|
|
}
|
|
|
printf("result dets %d\n", dets.size());
|
|
|
|
|
|
for(DETECTION_API* api : apis){
|
|
|
ret = api->AIGOdeinit();
|
|
|
if(ret){
|
|
|
printf("AIGOdeinit error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
int test_multi_class(bool useAsync){
|
|
|
if(useAsync){
|
|
|
return test_async_multi();
|
|
|
}else{
|
|
|
// for(int i=0; i<10;i++){
|
|
|
// test_sync_multi();
|
|
|
// }
|
|
|
return test_sync_multi();
|
|
|
// sleep(10);
|
|
|
// return 0;
|
|
|
}
|
|
|
};
|
|
|
|
|
|
|
|
|
int test_initdeinit(){
|
|
|
int ret = 0;
|
|
|
int envNum = 2;
|
|
|
std::string visdetmodelname = "../models/Detect/hisi3403/hq.han/yolov5_1280_704_2class_nv12_8_nms.om";
|
|
|
std::string visdetparamname = "../models/Detect/hisi3403/hq.han/yolov5_1280_704_2class_nv12_8_nms.json";
|
|
|
|
|
|
std::vector<DETECTION_API*> apis;
|
|
|
std::vector<std::thread> runthreads, getthreads;
|
|
|
// DETECTION_API* detapi = new DETECTION_API();
|
|
|
// ret = detapi->AIGOinit(visdetmodelname.c_str(), visdetparamname.c_str(), 0);
|
|
|
// if(ret){
|
|
|
// printf("init error with ret=%d\n", ret);
|
|
|
// return ret;
|
|
|
// }
|
|
|
// ret = detapi->AIGOdeinit();
|
|
|
// if(ret){
|
|
|
// printf("AIGOdeinit error with ret=%d\n", ret);
|
|
|
// return ret;
|
|
|
// }
|
|
|
// delete detapi;
|
|
|
// detapi = NULL;
|
|
|
for(int i=0; i<envNum; i++){
|
|
|
DETECTION_API* detapi = new DETECTION_API();
|
|
|
ret = detapi->AIGOinit(visdetmodelname.c_str(), visdetparamname.c_str(), 0);
|
|
|
if(ret){
|
|
|
printf("init error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
apis.push_back(detapi);
|
|
|
}
|
|
|
printf("init done sleeping");
|
|
|
sleep(5);
|
|
|
for(DETECTION_API* api : apis){
|
|
|
ret = api->AIGOdeinit();
|
|
|
if(ret){
|
|
|
printf("AIGOdeinit error with ret=%d\n", ret);
|
|
|
return ret;
|
|
|
}
|
|
|
delete api;
|
|
|
api = NULL;
|
|
|
}
|
|
|
|
|
|
printf("sleeping\n");
|
|
|
sleep(15);
|
|
|
}
|
|
|
|
|
|
|
|
|
int main(){
|
|
|
// cpu_set_t mask;
|
|
|
// CPU_ZERO(&mask);
|
|
|
// CPU_SET(0, &mask);
|
|
|
// if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) == -1) {
|
|
|
// printf("warning: could not set CPU affinity, continuing...\n");
|
|
|
// }
|
|
|
int ret = 0;
|
|
|
ret = Init_ACL();
|
|
|
// ret = test_class();
|
|
|
// ret = test_class_async();
|
|
|
// ret = test_segment(true);
|
|
|
ret = test_multi_class(false);
|
|
|
// ret = test_initdeinit();
|
|
|
ret= Uninit_ACL();
|
|
|
return ret;
|
|
|
} |