完成前视扫描demo,耗时太长

main
wangchongwu 5 months ago
parent 832d207ccd
commit 7a9c689e89

@ -363,11 +363,13 @@ void ProcessFrontVL(string filePath)
GaussianRandom gr(0.0, 1, 0.0);
SINT32 nVLFrameSize = 1.5 * IMAGE_WIDTH_VL * IMAGE_HEIGHT_VL + IMAGE_WIDTH_VL * PARA_IR_LINE;
_fseeki64(file, nVLFrameSize * 2200, SEEK_SET);
int i = 0;
while (!feof(file))
{
SINT32 nVLFrameSize = 1.5 * IMAGE_WIDTH_VL * IMAGE_HEIGHT_VL + IMAGE_WIDTH_VL * PARA_IR_LINE;
fread(pFrameVL, 1, nVLFrameSize, file);
S729paras_VL Paras_VL = { 0 };
@ -415,19 +417,20 @@ void ProcessFrontVL(string filePath)
if (i == 0)
{
stitcher->Init(info,5,5);
stitcher->Init(info,180,180);
pan = stitcher->ExportPanAddr();
mat_pan = cv::Mat(pan.u32Height, pan.u32Width, CV_8UC4, pan.u64VirAddr[0]);
mat_pan = cv::Mat(pan.u32Height, pan.u32Width, CV_8UC3, pan.u64VirAddr[0]);
}
else
{
if (i % 50 != 0)
if (i % 30 != 0)
{
i = i + 1;
continue;
}
std::cout << info.craft.stPos.B << " " << info.craft.stPos.L << " " << info.craft.stPos.H << " "
<< info.craft.stAtt.fYaw << " " << info.craft.stAtt.fPitch << " " << info.craft.stAtt.fRoll << " "
<< info.servoInfo.fServoAz << " " << info.servoInfo.fServoPt
@ -437,7 +440,7 @@ void ProcessFrontVL(string filePath)
tm.start();
// 基于外参的快拼
//stitcher->FrontStitch(frame, info);
stitcher->GeoStitch(frame, info);
tm.stop();
@ -447,7 +450,7 @@ void ProcessFrontVL(string filePath)
}
cv::Mat res;
cv::resize(mat_pan, res, cv::Size(pan.u32Width / 4, pan.u32Height / 4));
cv::resize(mat_pan, res, cv::Size(pan.u32Width / 8, pan.u32Height / 8));
imshow("pan_opt", res);
waitKey(1);
@ -456,7 +459,7 @@ void ProcessFrontVL(string filePath)
cv::Mat res;
cv::resize(mat_pan, res, cv::Size(pan.u32Width / 4, pan.u32Height / 4));
cv::resize(mat_pan, res, cv::Size(pan.u32Width / 8, pan.u32Height / 8));
imshow("pan_opt", res);
waitKey(0);
@ -473,7 +476,7 @@ int main(int, char**)
//ProcessVL("H:/vl_1920_1080_para40_y8/20241219152917_4.video", "20241219152917_4");
//ProcessVL("H:/vl_1920_1080_para40_y8/20241219153515_10.video", "20241219153515_10");
//
//ProcessVL("H:/vl_1920_1080_para40_y8/55.video", "5");
//ProcessVL("H:/vl_1920_1080_para40_y8/1.video", "1");
ProcessFrontVL("H:/vl_1920_1080_para40_y8/22.video");
ProcessFrontVL("H:/vl_1920_1080_para40_y8/1.video");
}

@ -30,6 +30,12 @@ include_directories(${ArithTrkPubInc}) # 引入算法公共头文件
link_directories(${OpenCV_LIBS_DIR})
include_directories(${OpenCV_INCLUDE_DIRS})
find_package(CUDA REQUIRED)
set(CMAKE_CUDA_STANDARD 11)
include_directories(${CUDA_INCLUDE_DIRS})
set(ArithSrcDIR_MAIN "src") #
# 使Common

@ -5,6 +5,7 @@
#include "Arith_FeaMatch.h"
#include <opencv2/opencv.hpp>
#include <omp.h>
#include <cuda_runtime.h>
using namespace std;
using namespace cv;
@ -29,29 +30,113 @@ FrontStitch::~FrontStitch()
{
}
FPanInfo FrontStitch::Init(FrameInfo info, float AzRange, float PtRange)
{
_fAglRes = 0.001;
_panPara.fAglRes = 0.02;
// 设置当前拼接起始位置
_panPara.center = _GeoSolver->SetOriginPoint(info);
_GeoSolver->SetOriginPoint(info);
cv::Mat H = _GeoSolver->findHomography(info);
// 计算中心点的极坐标
Pole centPole = getPoleFromImgWithH(H, cv::Point2f(info.nWidth / 2, info.nHeight / 2), info.nEvHeight);
// 可以正反验证
//cv::Point2f imgpos = getImgPosFromPole(H, centPole, info.nEvHeight);
_panPara.center.fAz = centPole.beta;
_panPara.center.fPt = centPole.alpha;
_panPara.range.fAz = AzRange;
_panPara.range.fPt = PtRange;
_panPara.m_pan_width = AzRange / _fAglRes;
_panPara.m_pan_height = PtRange / _fAglRes;
// 全景图规模
_panPara.m_pan_width = AzRange / _panPara.fAglRes;
_panPara.m_pan_height = PtRange / _panPara.fAglRes;
_panImage = cv::Mat::zeros(_panPara.m_pan_height, _panPara.m_pan_width, CV_8UC4);
_panImage = cv::Mat::zeros(_panPara.m_pan_height, _panPara.m_pan_width, CV_8UC3);
return _panPara;
}
BYTE8 FrontStitch::GeoStitch(GD_VIDEO_FRAME_S img, FrameInfo para)
BYTE8 FrontStitch::GeoStitch(GD_VIDEO_FRAME_S img, FrameInfo info)
{
return BYTE8();
cv::Mat H = _GeoSolver->findHomography(info);
float dep = info.nEvHeight;
// 获取rgb源图像
cv::Mat rgb = getRGBMatFromGDFrame(img, img.u64VirAddr[0]);
Pole CT_map = getPoleFromImgWithH(H, cv::Point2f(img.u32Width/2, img.u32Height/2), info.nEvHeight);
// 计算帧的map四至
Pole leftTop_map = getPoleFromImgWithH(H,cv::Point2f(0,0), info.nEvHeight);
Pole rightTop_map = getPoleFromImgWithH(H, cv::Point2f(img.u32Width,0), info.nEvHeight);
Pole rightBottom_map = getPoleFromImgWithH(H, cv::Point2f(img.u32Width,img.u32Height), info.nEvHeight);
Pole leftBottom_map = getPoleFromImgWithH(H, cv::Point2f(0,img.u32Height), info.nEvHeight);
// 转全景图的像方
cv::Point2f p_leftTop_map = getFPanFromPole(leftTop_map);
cv::Point2f p_rightTop_map = getFPanFromPole(rightTop_map);
cv::Point2f p_rightBottom_map = getFPanFromPole(rightBottom_map);
cv::Point2f p_leftBottom_map = getFPanFromPole(leftBottom_map);
// 计算全景图上范围
int right = max(max(max(p_leftTop_map.x, p_rightTop_map.x), p_rightBottom_map.x), p_leftBottom_map.x);
int left = min(min(min(p_leftTop_map.x, p_rightTop_map.x), p_rightBottom_map.x), p_leftBottom_map.x);
int top = min(min(min(p_leftTop_map.y, p_rightTop_map.y), p_rightBottom_map.y), p_leftBottom_map.y);
int bottom = max(max(max(p_leftTop_map.y, p_rightTop_map.y), p_rightBottom_map.y), p_leftBottom_map.y);
int xRange = right - left;
int yRnage = bottom - top;
//反映射到像素坐标
int valid_top = std::max(0, top);
int valid_bottom = std::min(_panImage.cols, bottom);
int valid_left = std::max(0, left);
int valid_right = std::min(_panImage.rows, right);
#if 1
#pragma omp parallel for
for (int i = valid_top; i < valid_bottom; i++)
{
for (int j = valid_left; j < valid_right; j++)
{
////转换为pixel坐标
cv::Point2f p_img = getImgPosFromPole(H, getPoleFromFPan(Point2f(j, i)), dep);
if (p_img.x >= 200 && p_img.y >= 200 && p_img.x < img.u32Width-200 && p_img.y < img.u32Height-200)
{
auto rgbVal = Interpolation_RGB24(rgb.data, rgb.cols, rgb.rows, p_img.x, p_img.y);
_panImage.data[3 * (i * _panImage.rows + j) + 0] = rgbVal.R;
_panImage.data[3 * (i * _panImage.rows + j) + 1] = rgbVal.G;
_panImage.data[3 * (i * _panImage.rows + j) + 2] = rgbVal.B;
}
}
}
#endif
return 0;
}
SINT32 FrontStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
{
return SINT32();
@ -73,3 +158,61 @@ GD_VIDEO_FRAME_S FrontStitch::ExportPanAddr()
return pan_out;
}
Pole FrontStitch::getPoleFromImgWithH(cv::Mat H, cv::Point2f pt, float dep)
{
// 投影到地面坐标,这一步可以利用前面优化成果
cv::Point2f grdPt = warpPointWithH(H, pt);
// 补深度信息并转为常用的NUE坐标系
PointXYZ grdPtXYZ = { 0 };
grdPtXYZ.X = grdPt.y;
grdPtXYZ.Y = -dep;
grdPtXYZ.Z = grdPt.x;
// 地面点转极坐标
Pole pole = getPoleFromXYZ(grdPtXYZ);
return pole;
}
cv::Point2f FrontStitch::getImgPosFromPole(cv::Mat H, Pole _pole, float dep)
{
PointXYZ virPt = getXYZFromPole(_pole);
//虚拟点投影到地面
float ratio = -dep / virPt.Y;
PointXYZ realPt = { 0 };
realPt.X = virPt.X * ratio;
realPt.Y = virPt.Y * ratio;
realPt.Z = virPt.Z * ratio;
// 转东北地
PointXYZ realPtGeo = { 0 };
realPtGeo.X = realPt.Z;
realPtGeo.Y = realPt.X;
realPtGeo.Z = -realPt.Y;
// 投影回像方
cv::Point2f px = warpPointWithH(H.inv(), cv::Point2f(realPtGeo.X, realPtGeo.Y));
return px;
}
Pole FrontStitch::getPoleFromFPan(cv::Point2f pt)
{
Pole _pole = { 0 };
_pole.beta = _panPara.center.fAz + (pt.x - _panPara.m_pan_width / 2) * _panPara.fAglRes;
_pole.alpha = _panPara.center.fPt + (_panPara.m_pan_height / 2 - pt.y) * _panPara.fAglRes;
return _pole;
}
cv::Point2f FrontStitch::getFPanFromPole(Pole _pole)
{
cv::Point2f pt = { 0 };
pt.x = DEGLIM(_pole.beta - _panPara.center.fAz) / _panPara.fAglRes + _panPara.m_pan_width / 2;
pt.y = DEGLIM(_panPara.center.fPt - _pole.alpha) / _panPara.fAglRes + _panPara.m_pan_height / 2;
return pt;
}

@ -38,11 +38,22 @@ public:
private:
// 将像方投影为原点极坐标利用H矩阵. 需要深度。因为H丢失了深度
Pole getPoleFromImgWithH(cv::Mat H, cv::Point2f pt, float dep);
// 将原点极坐标转为像方
cv::Point2f getImgPosFromPole(cv::Mat H, Pole _pole, float dep);
// 全景图像方到原点极坐标
Pole getPoleFromFPan(cv::Point2f pt);
// 原点极坐标到全景图像
cv::Point2f getFPanFromPole(Pole _pole);
GeoSolver* _GeoSolver;
private:
FPanInfo _panPara;//全景配置
float _fAglRes;//全景角分辨率
cv::Mat _panImage;
};

@ -31,55 +31,11 @@ GeoSolver::~GeoSolver()
// return pos_pan;
//}
ANGLE32F GeoSolver::SetOriginPoint(FrameInfo info)
void GeoSolver::SetOriginPoint(FrameInfo info)
{
originPoint = getXYZFromBLH(info.craft.stPos);
origininfo = info;
// 这是一个依靠大地系直角坐标进行极坐标转换的正反算过程可见极坐标转换相对平面的H矩阵转换要复杂很多包含了非线性过程。
#if 0
// test1从像方转拼接点极坐标
ANGLE32F result = { 0 };
cv::Mat H = findHomography(info);
cv::Point2f grdPt = warpPointWithH(H, cv::Point2f(info.nWidth / 2, info.nHeight / 2));
PointXYZ pt = { 0 };
pt.X = grdPt.y;
pt.Y = -info.nEvHeight;
pt.Z = grdPt.x;
// 地面点转极坐标
Pole pole = getPoleFromXYZ(pt);
// test2从拼接点极坐标转像方
pole.distance = 1;
PointXYZ virPt = getXYZFromPole(pole);
//虚拟点投影到地面
float ratio = -info.nEvHeight / virPt.Y;
PointXYZ realPt = { 0 };
realPt.X = virPt.X * ratio;
realPt.Y = virPt.Y * ratio;
realPt.Z = virPt.Z * ratio;
// 转东北地
PointXYZ realPtGeo = { 0 };
realPtGeo.X = realPt.Z;
realPtGeo.Y = realPt.X;
realPtGeo.Z = -realPt.Y;
// 投影回像方
cv::Point2f px = warpPointWithH(H.inv(), cv::Point2f(realPtGeo.X, realPtGeo.Y));
#endif
return result;
return;
}
@ -360,15 +316,4 @@ vector<cv::Point2f> warpRectWithH(cv::Mat H,cv::Size size)
return _res;
}
PointXYZ getNUEFromENG(PointXYZ p)
{
PointXYZ ptrans = { 0 };
}
PointXYZ getENGFromNUE(PointXYZ p)
{
return PointXYZ();
}

@ -46,7 +46,7 @@ public:
cv::Mat findHomography2(FrameInfo info);
// 设置起始拼接点外参,返回当前光轴大地指向
ANGLE32F SetOriginPoint(FrameInfo info);
void SetOriginPoint(FrameInfo info);
// 根据H计算原始像方的经纬度
PointBLH getBLHFromFrame(cv::Mat H, cv::Point2f ptInFrame);
@ -97,6 +97,3 @@ cv::Point2f warpPointWithH(cv::Mat H,cv::Point2f srcPt);
// H映射多边形按照顺时针
vector<cv::Point2f> warpRectWithH(cv::Mat H,cv::Size size);
// 坐标系统转换
PointXYZ getNUEFromENG(PointXYZ p);
PointXYZ getENGFromNUE(PointXYZ p);

@ -63,7 +63,7 @@ UPanInfo UnderStitch::InitMap(FrameInfo info)
UPanInfo panPara = { 0 };
panPara.m_pan_width = MIN(info.nWidth * 5,5000);//全景宽
panPara.m_pan_height = MIN(info.nWidth * 5,5000);//全景高
panPara.scale = gsd / 2 ;//比例尺,1m = ?pix
panPara.scale = gsd / 6 ;//比例尺,1m = ?pix
int zoom = _googleProduct.getZoomLevel(panPara.scale);

@ -335,6 +335,38 @@ cv::Mat getRGBAMatFromGDFrame(GD_VIDEO_FRAME_S frame, void* pArr)
return cv::Mat();
}
cv::Mat getRGBMatFromGDFrame(GD_VIDEO_FRAME_S frame, void* pArr)
{
// 针对拼接目前支持Y8和NV12
cv::Mat dst(frame.u32Height, frame.u32Width, CV_8UC3);
//y8类型
if (frame.enPixelFormat == GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_GRAY_Y8)
{
cv::Mat mat(frame.u32Height, frame.u32Width, CV_8UC1, pArr);
cv::cvtColor(mat, dst, cv::COLOR_GRAY2BGR);
return dst.clone();
}
// nv12
if (frame.enPixelFormat == GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_NV12)
{
cv::Mat mat(frame.u32Height * 1.5, frame.u32Width, CV_8UC1, pArr);
cv::cvtColor(mat, dst, cv::COLOR_YUV2BGR_NV12);
return dst.clone();
}
// rgb
if (frame.enPixelFormat == GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_RGB_PACKED)
{
cv::Mat mat(frame.u32Height, frame.u32Width, CV_8UC3, pArr);
return mat.clone();
}
return cv::Mat();
}
unsigned char FourPointInterpolation(unsigned char* pSrcImage, int iWidth, int iHeight, float x, float y)
{
if (x < 0 || y < 0 || x > iWidth || y > iHeight)
@ -405,4 +437,120 @@ unsigned char FourPointInterpolation(unsigned char* pSrcImage, int iWidth, int i
return (unsigned char)(uv0 * f1 + uv1 * f2 + uv2 * f3 + uv3 * f4);
}
}
PXL_VAL Interpolation_RGB24(UBYTE8* pSrcImage, int iWidth, int iHeight, float x, float y)
{
PXL_VAL VAL = { 0 };
int nStride = iWidth * 3;
// 四个最临近象素的坐标(i1, j1), (i2, j1), (i1, j2), (i2, j2)
int i1, i2;
int j1, j2;
// 四个最临近象素值
UBYTE8 f1[3], f2[3], f3[3], f4[3];
// 计算四个最临近象素的坐标
i1 = MAX((int)x, 0);
i2 = MIN(i1 + 1, iWidth - 1);
j1 = MAX((int)y, 0);
j2 = MIN(j1 + 1, iHeight - 1);
float t, s; //t = x - [x], s = y- [y];
t = x - i1;
s = y - j1;
//四个最临近象素点的值的加权值
float uv0, uv1, uv2, uv3; //权重之和为1uv0+uv1+uv2+uv3 = 1
uv0 = (1 - s) * (1 - t);
uv1 = (1 - s) * t;
uv2 = (1 - t) * s;
uv3 = s * t;
//根据不同情况分别处理
if (x - iWidth + 1 > 0)
{
// 要计算的点在图像右边缘上
if (y - iHeight + 1 < 0)
{
// 要计算的点正好是图像最右/左下角那一个象素,直接返回该点象素值
VAL.R = *(pSrcImage + nStride * j1 + i1 * 3);
VAL.G = *(pSrcImage + nStride * j1 + i1 * 3 + 1);
VAL.B = *(pSrcImage + nStride * j1 + i1 * 3 + 2);
return VAL;
}
else
{
// 在图像右/左边缘上且不是最后一点,直接一次插值即可
f1[0] = *(pSrcImage + nStride * j1 + i1 * 3);
f1[1] = *(pSrcImage + nStride * j1 + i1 * 3 + 1);
f1[2] = *(pSrcImage + nStride * j1 + i1 * 3 + 2);
f3[0] = *(pSrcImage + nStride * j2 + i1 * 3 + 0);
f3[1] = *(pSrcImage + nStride * j2 + i1 * 3 + 1);
f3[2] = *(pSrcImage + nStride * j2 + i1 * 3 + 2);
VAL.R = f1[0] + s * (f3[0] - f1[0]);
VAL.G = f1[1] + s * (f3[1] - f1[1]);
VAL.B = f1[2] + s * (f3[2] - f1[2]);
// 返回插值结果
return VAL;
}
}
else if (y - iHeight + 1 > 0)
{
// 要计算的点在图像下边缘上且不是最后一点,直接一次插值即可
//f1 = *(pSrcImage + iWidth * j1 + i1);
//f2 = *(pSrcImage + iWidth * j1 + i2);
f1[0] = *(pSrcImage + nStride * j1 + i1 * 3);
f1[1] = *(pSrcImage + nStride * j1 + i1 * 3 + 1);
f1[2] = *(pSrcImage + nStride * j1 + i1 * 3 + 2);
f2[0] = *(pSrcImage + nStride * j1 + i2 * 3 + 0);
f2[1] = *(pSrcImage + nStride * j1 + i2 * 3 + 1);
f2[2] = *(pSrcImage + nStride * j1 + i2 * 3 + 2);
VAL.R = f1[0] + t * (f2[0] - f1[0]);
VAL.G = f1[1] + t * (f2[1] - f1[1]);
VAL.B = f1[2] + t * (f2[2] - f1[2]);
// 返回插值结果
return VAL;
}
else
{
// 计算四个最临近象素值
f1[0] = *(pSrcImage + nStride * j1 + i1 * 3);
f1[1] = *(pSrcImage + nStride * j1 + i1 * 3 + 1);
f1[2] = *(pSrcImage + nStride * j1 + i1 * 3 + 2);
f2[0] = *(pSrcImage + nStride * j1 + i2 * 3);
f2[1] = *(pSrcImage + nStride * j1 + i2 * 3 + 1);
f2[2] = *(pSrcImage + nStride * j1 + i2 * 3 + 2);
f3[0] = *(pSrcImage + nStride * j2 + i1 * 3);
f3[1] = *(pSrcImage + nStride * j2 + i1 * 3 + 1);
f3[2] = *(pSrcImage + nStride * j2 + i1 * 3 + 2);
f4[0] = *(pSrcImage + nStride * j2 + i2 * 3);
f4[1] = *(pSrcImage + nStride * j2 + i2 * 3 + 1);
f4[2] = *(pSrcImage + nStride * j2 + i2 * 3 + 2);
VAL.R = uv0 * f1[0] + uv1 * f2[0] + uv2 * f3[0] + uv3 * f4[0];
VAL.G = uv0 * f1[1] + uv1 * f2[1] + uv2 * f3[1] + uv3 * f4[1];
VAL.B = uv0 * f1[2] + uv1 * f2[2] + uv2 * f3[2] + uv3 * f4[2];
return VAL;
}
return VAL;
}

@ -8,11 +8,23 @@
using cv::Point2d;
using cv::Point2f;
// 像素值灰度和RGB共用
typedef struct tagPXL_VAL
{
UBYTE8 R;
UBYTE8 G;
UBYTE8 B;
}PXL_VAL;
// 将GD帧转为 cv::Mat RGB
cv::Mat getRGBAMatFromGDFrame(GD_VIDEO_FRAME_S frame,void* pArr);
cv::Mat getRGBMatFromGDFrame(GD_VIDEO_FRAME_S frame, void* pArr);
unsigned char FourPointInterpolation(unsigned char* pSrcImage, int iWidth, int iHeight, float x, float y);
PXL_VAL Interpolation_RGB24(UBYTE8* pSrcImage, int iWidth, int iHeight, float x, float y);
// 矩阵加法
void AddMat(double* A, double* B, int mn, double* AaddB);

Loading…
Cancel
Save