tag:全景图与BA过程框架已完成

main
wangchongwu 5 months ago
parent 0acfe7114f
commit e9e024406a

@ -190,9 +190,7 @@ int main(int, char**)
// 基于外参的快拼
stitcher->GeoStitch(frame,info);
// test接口
// 接收帧
auto a = stitcher->ReceiveFrame(frame,info);
}
@ -200,8 +198,6 @@ int main(int, char**)
imshow("pan", mat_pan);
waitKey(1);
}
// 处理帧
@ -210,5 +206,4 @@ int main(int, char**)
imshow("pan_opt", mat_pan);
waitKey(0);
}

@ -6,8 +6,8 @@ using namespace ceres;
// 定义残差结构体
struct HomographyResidual
{
HomographyResidual(const cv::KeyPoint& keypoint_i, const cv::KeyPoint& keypoint_j, const cv::Mat H1, const cv::Mat H2)
: keypoint_i_(keypoint_i), keypoint_j_(keypoint_j), H1_(H1), H2_(H2) {
HomographyResidual(const cv::KeyPoint& keypoint_i, const cv::KeyPoint& keypoint_j, const cv::Mat H1, const cv::Mat H2, const float sx,const float sy)
: keypoint_i_(keypoint_i), keypoint_j_(keypoint_j), H1_(H1), H2_(H2) ,sclae_x_(sx),scale_y_(sy){
}
template <typename T>
@ -47,17 +47,22 @@ struct HomographyResidual
residual[1] = P_i[1] - P_j[1];
// 计算单应性矩阵的F范数
T norm_H_i = sqrt(h_i[0] * h_i[0] + h_i[1] * h_i[1] + h_i[2] * h_i[2] +
T norm_H_i = (h_i[0] * h_i[0] + h_i[1] * h_i[1] + h_i[2] * h_i[2] +
h_i[3] * h_i[3] + h_i[4] * h_i[4] + h_i[5] * h_i[5] +
h_i[6] * h_i[6] + h_i[7] * h_i[7] + T(1.0));
T norm_H_j = sqrt(h_j[0] * h_j[0] + h_j[1] * h_j[1] + h_j[2] * h_j[2] +
T norm_H_j = (h_j[0] * h_j[0] + h_j[1] * h_j[1] + h_j[2] * h_j[2] +
h_j[3] * h_j[3] + h_j[4] * h_j[4] + h_j[5] * h_j[5] +
h_j[6] * h_j[6] + h_j[7] * h_j[7] + T(1.0));
// 2.添加尺度正则项
residual[2] = norm_H_i - T(cv::norm(H1_));
residual[3] = norm_H_j - T(cv::norm(H2_));
T lambda = T(0.001);
residual[2] = lambda*(norm_H_i - T(cv::norm(H1_)*cv::norm(H1_)));
residual[3] = lambda*(norm_H_j - T(cv::norm(H2_)*cv::norm(H2_)));
//residual[2] = lambda*((P_i[0] + P_j[0]) - T(sclae_x_));
//residual[3] = lambda*((P_i[1] + P_j[1]) - T(scale_y_));
return true;
}
@ -69,6 +74,9 @@ private:
const cv::Mat H1_;
const cv::Mat H2_;
const float sclae_x_;
const float scale_y_;
};
@ -76,6 +84,8 @@ BA_Task::BA_Task(FileCache<FrameCache>* cache)
{
_cache = cache;
_FeaMatcher = new FeatureMatcher(DetectorType::SIFT, MatcherType::FLANN);
google::InitGoogleLogging("ceres");
}
BA_Task::~BA_Task()
@ -83,9 +93,25 @@ BA_Task::~BA_Task()
}
void BA_Task::InitTask()
{
_imgVec.clear();
_frameInd.clear();
_origMatrix.clear();
_currMatrix.clear();
_polygon.clear();
_FeaPtVec.clear();
_FeaDespVec.clear();
_paraVec.clear();
}
void BA_Task::OptFrame(vector<KeyType> frameInd,cv::Mat H_map)
{
// 任务容器初始化
InitTask();
// 读取帧信息
readFrameInfo(frameInd);
@ -94,7 +120,7 @@ void BA_Task::OptFrame(vector<KeyType> frameInd,cv::Mat H_map)
// 开始BA
google::InitGoogleLogging("ceres");
// 将 cv::Mat 转换为 Ceres 需要的数组形式
std::vector<double*> h_list(_currMatrix.size());
@ -155,9 +181,16 @@ void BA_Task::OptFrame(vector<KeyType> frameInd,cv::Mat H_map)
cv::Mat H1 = _origMatrix[i];
cv::Mat H2 = _origMatrix[j];
auto warpPi_0 = warpPointWithH(H1,keypoint_i.pt);
auto warpPj_0 = warpPointWithH(H2,keypoint_j.pt);
float sx = warpPi_0.x + warpPj_0.x;
float sy = warpPi_0.y + warpPj_0.y;
ceres::CostFunction* cost_function =
new ceres::AutoDiffCostFunction<HomographyResidual, 4, 8, 8>(
new HomographyResidual(keypoint_i, keypoint_j, H1, H2));
new HomographyResidual(keypoint_i, keypoint_j, H1, H2,sx,sy));
problem.AddResidualBlock(cost_function, loss_function, h_list[i], h_list[j]);
}
@ -229,7 +262,7 @@ int BA_Task::readFrameInfo(vector<KeyType> frameInd)
_FeaPtVec.push_back(keypoints);
// 描述子
cv::Mat descriptors(_t_frame_cache->ptNum, FEA_DES_SIZE, CV_32FC1, _t_frame_cache->_desp);
_FeaDespVec.push_back(descriptors);
_FeaDespVec.push_back(descriptors.clone());
// 原始外参
_paraVec.push_back(_t_frame_cache->_para);

@ -25,6 +25,9 @@ public:
BA_Task(FileCache<FrameCache>* cache);
~BA_Task();
// 初始化BA任务
void InitTask();
// 优化指定帧并更新H到缓存中,H_map用于优化过程可视化
void OptFrame(vector<KeyType> frameInd, cv::Mat H_map);
@ -32,6 +35,9 @@ public:
// 更新缓存中的帧H矩阵并存储
bool updateCacheH(KeyType Key, cv::Mat H);
private:
// 从缓存读取指定帧信息(不需要图像)

@ -55,7 +55,7 @@ PanInfo VideoStitch::InitMap(FrameInfo info)
PanInfo panPara = { 0 };
panPara.m_pan_width = 1000;//全景宽
panPara.m_pan_height = 1000;//全景高
panPara.scale = 0.3;//比例尺,1m = ?pix
panPara.scale = 0.5;//比例尺,1m = ?pix
// 直接无平移解算
cv::Mat H_0 = getAffineFromGeo2Pan(panPara);
@ -82,7 +82,7 @@ PanInfo VideoStitch::Init(FrameInfo info)
{
_panPara = InitMap(info);
_pan = cv::Mat::zeros(_panPara.m_pan_height, _panPara.m_pan_width, CV_8UC1);
_panImage = cv::Mat::zeros(_panPara.m_pan_height, _panPara.m_pan_width, CV_8UC1);
return _panPara;
}
@ -149,7 +149,7 @@ BYTE8 VideoStitch::GeoStitch(GD_VIDEO_FRAME_S img, FrameInfo para)
cv::Mat H = _H_pan * H1;
// 利用H投影当前帧到全景
cv::Mat imagetmp(_pan.size(), CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat imagetmp(_panImage.size(), CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat src(img.u32Height, img.u32Width, CV_8UC1, img.u64VirAddr[0]);
@ -158,7 +158,7 @@ BYTE8 VideoStitch::GeoStitch(GD_VIDEO_FRAME_S img, FrameInfo para)
cv::Mat mask = cv::Mat::ones(src.size(), CV_8UC1) * 255;
cv::Mat warped_mask;
cv::warpPerspective(mask, warped_mask, H, imagetmp.size());
imagetmp.copyTo(_pan, warped_mask);
imagetmp.copyTo(_panImage, warped_mask);
return 0;
}
@ -192,7 +192,9 @@ SINT32 VideoStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
// 提取特征点
std::vector<cv::KeyPoint> keypoints;
cv::Mat descriptors;
_FeaMatcher->extractFeatures(cv::Mat(img.u32Height, img.u32Width,CV_8UC1, _t_frame_cache->_data), keypoints, descriptors);
cv::Mat src(img.u32Height, img.u32Width,CV_8UC1, _t_frame_cache->_data);
_FeaMatcher->extractFeatures(src, keypoints, descriptors);
size_t keyNum = MIN(keypoints.size(), FEA_NUM_MAX);
@ -213,6 +215,8 @@ SINT32 VideoStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
// 预处理结果加入文件缓存
_cache->set(para.nFrmID,_t_frame_cache);
_recvFrameKey.push_back(para.nFrmID);
_totalFrameCnt++;
return _totalFrameCnt;
@ -222,20 +226,21 @@ SINT32 VideoStitch::ProcessFrame()
{
vector<KeyType> vec;
for (size_t i = 10; i < 300; i+=10)
{
vec.push_back(i);
}
vector<KeyType> vec_opt;
_BATask->OptFrame(vec, _H_pan);
// 优化所有帧
_BATask->OptFrame(_recvFrameKey, _H_pan);
mapFrame(vec);
// 重投影所有帧到全景
mapFrame(_recvFrameKey);
return SINT32();
return 0;
}
void VideoStitch::mapFrame(vector<KeyType> frameInd)
{
_panImage.setTo(255);
// 从文件缓存获取帧
auto _t_frame_cache = std::make_shared<FrameCache>();
for (size_t i = 0; i < frameInd.size(); i++)
@ -255,7 +260,7 @@ void VideoStitch::mapFrame(vector<KeyType> frameInd)
cv::Mat H = _H_pan * H1;
// 利用H投影当前帧到全景
cv::Mat imagetmp(_pan.size(), CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat imagetmp(_panImage.size(), CV_8UC3, cv::Scalar(0, 0, 0));
// 获取图像数据
cv::Mat src(_t_frame_cache->_frame_info.u32Height, _t_frame_cache->_frame_info.u32Width, CV_8UC1,
@ -266,10 +271,7 @@ void VideoStitch::mapFrame(vector<KeyType> frameInd)
cv::Mat mask = cv::Mat::ones(src.size(), CV_8UC1) * 255;
cv::Mat warped_mask;
cv::warpPerspective(mask, warped_mask, H, imagetmp.size());
imagetmp.copyTo(_pan, warped_mask);
imagetmp.copyTo(_panImage, warped_mask);
}
@ -285,7 +287,7 @@ GD_VIDEO_FRAME_S VideoStitch::ExportPanAddr()
pan_out.enPixelFormat = GD_PIXEL_FORMAT_GRAY_Y8;
pan_out.u32Width = _panPara.m_pan_width;
pan_out.u32Height = _panPara.m_pan_height;
pan_out.u64VirAddr[0] = _pan.data;
pan_out.u64VirAddr[0] = _panImage.data;
return pan_out;
}

@ -19,25 +19,20 @@ public:
// 接收帧
SINT32 ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para);
// 处理帧
SINT32 ProcessFrame();
// 投影显示
// 投影到全景图
void mapFrame(vector<KeyType> frameInd);
public:
GD_VIDEO_FRAME_S ExportPanAddr();
private:
PanInfo InitMap(FrameInfo info);
vector<KeyType> _recvFrameKey;// 接收帧总表
FileCache<FrameCache>* _cache;//文件缓存,存储外部传入的原始帧信息以及预处理结果
@ -56,7 +51,7 @@ private:
cv::Mat getAffineFromGeo2Pan(PanInfo _pan);//计算全景图投影,从地理系到全景地图,统一计算
cv::Mat _pan;
cv::Mat _panImage;
int _totalFrameCnt;//处理帧计数

Loading…
Cancel
Save