改为内部RGB处理

main
wangchongwu 5 months ago
parent e9e024406a
commit 3e784fb999

@ -101,16 +101,25 @@ void Map16BitTo8Bit(unsigned short *psh16BitData, long lDataLen, BYTE *pby8BitDa
unsigned short pFrameIR[IMAGE_WIDTH_IR*(IMAGE_HEIGHT_IR + PARA_IR_LINE)] = {0};
unsigned char pImageIR[IMAGE_WIDTH_IR*(IMAGE_HEIGHT_IR)] = {0};
int main(int, char**)
// 电视帧
unsigned char pFrameVL[IMAGE_WIDTH_VL * (IMAGE_HEIGHT_VL + PARA_IR_LINE) * 2] = { 0 };
unsigned char pImageVL[IMAGE_WIDTH_VL * (IMAGE_HEIGHT_VL) * 2] = { 0 };
void ProcessIR()
{
auto stitcher = API_VideoStitch::Create(IMAGE_WIDTH_IR,IMAGE_HEIGHT_IR);
//stitcher->Test();
auto stitcher = API_VideoStitch::Create(IMAGE_WIDTH_IR, IMAGE_HEIGHT_IR);
//stitcher->Test();
GD_VIDEO_FRAME_S frame = {0};//输入帧
GD_VIDEO_FRAME_S frame = { 0 };//输入帧
GD_VIDEO_FRAME_S pan = { 0 };//输出全景
cv::Mat mat_pan;//全景显示
//FILE* file = fopen("/media/wang/WORK/wangchongwu_gitea_2023/20241219152917_4.xraw","rb");
FILE* file = fopen("D:/wangchongwu_gitea_2023/20241219152917_4.xraw", "rb");
//FILE* file = fopen("/home/wang/data62/729dataset/ir_1280_1024_para40_y16/5.xraw","rb");
@ -120,13 +129,13 @@ int main(int, char**)
for (size_t i = 0; i < 600; i++)
{
fread(pFrameIR,2,IMAGE_WIDTH_IR*(IMAGE_HEIGHT_IR + PARA_IR_LINE),file);
fread(pFrameIR, 2, IMAGE_WIDTH_IR * (IMAGE_HEIGHT_IR + PARA_IR_LINE), file);
S729paras_IR Paras_IR = { 0 };
memcpy(&Paras_IR, (unsigned char*)(pFrameIR + IMAGE_WIDTH_IR*IMAGE_HEIGHT_IR), sizeof(S729paras_IR));
memcpy(&Paras_IR, (unsigned char*)(pFrameIR + IMAGE_WIDTH_IR * IMAGE_HEIGHT_IR), sizeof(S729paras_IR));
FrameInfo info = {0};
FrameInfo info = { 0 };
info.nFrmID = i;
info.camInfo.nFocus = Paras_IR.Paras_IR.caminfo.nFocal;
info.camInfo.fPixelSize = Paras_IR.Paras_IR.caminfo.nPixleSize;
@ -154,44 +163,44 @@ int main(int, char**)
info.craft.stAtt.fRoll += gr.generate();
Map16BitTo8Bit(pFrameIR,IMAGE_WIDTH_IR*IMAGE_HEIGHT_IR,pImageIR);
Map16BitTo8Bit(pFrameIR, IMAGE_WIDTH_IR * IMAGE_HEIGHT_IR, pImageIR);
cv::Mat mat_src(IMAGE_HEIGHT_IR, IMAGE_WIDTH_IR, CV_8UC1, pImageIR);
frame.enPixelFormat = GD_PIXEL_FORMAT_GRAY_Y8;
frame.u32Width = IMAGE_WIDTH_IR;
frame.u32Height = IMAGE_HEIGHT_IR;
frame.u64VirAddr[0] = pImageIR;
frame.enPixelFormat = GD_PIXEL_FORMAT_GRAY_Y8;
frame.u32Width = IMAGE_WIDTH_IR;
frame.u32Height = IMAGE_HEIGHT_IR;
frame.u64VirAddr[0] = pImageIR;
if(i == 0)
if (i == 0)
{
stitcher->Init(info);
pan = stitcher->ExportPanAddr();
mat_pan = cv::Mat(pan.u32Height, pan.u32Width, CV_8UC1, pan.u64VirAddr[0]);
mat_pan = cv::Mat(pan.u32Height, pan.u32Width, CV_8UC3, pan.u64VirAddr[0]);
}
else
{
if(i % 10 != 0)
if (i % 30 != 0)
{
continue;
}
std::cout << info.craft.stPos.B << " " <<info.craft.stPos.L << " " << info.craft.stPos.H << " "
<< info.craft.stAtt.fYaw << " " << info.craft.stAtt.fPitch << " " << info.craft.stAtt.fRoll<< " "
<< info.servoInfo.fServoAz << " " << info.servoInfo.fServoPt
<< std::endl;
std::cout << info.craft.stPos.B << " " << info.craft.stPos.L << " " << info.craft.stPos.H << " "
<< info.craft.stAtt.fYaw << " " << info.craft.stAtt.fPitch << " " << info.craft.stAtt.fRoll << " "
<< info.servoInfo.fServoAz << " " << info.servoInfo.fServoPt
<< std::endl;
// 基于外参的快拼
stitcher->GeoStitch(frame,info);
stitcher->GeoStitch(frame, info);
// 接收帧
auto a = stitcher->ReceiveFrame(frame,info);
auto a = stitcher->ReceiveFrame(frame, info);
}
@ -207,3 +216,116 @@ int main(int, char**)
waitKey(0);
}
void ProcessVL()
{
auto stitcher = API_VideoStitch::Create(IMAGE_WIDTH_VL, IMAGE_HEIGHT_VL);
GD_VIDEO_FRAME_S frame = { 0 };//输入帧
GD_VIDEO_FRAME_S pan = { 0 };//输出全景
cv::Mat mat_pan;//全景显示
FILE* file = fopen("D:/wangchongwu_gitea_2023/20241219152643_1.video", "rb");
GaussianRandom gr(0.0, 1, 0.0);
for (size_t i = 0; i < 600; i++)
{
SINT32 nVLFrameSize = 1.5 * IMAGE_WIDTH_VL * IMAGE_HEIGHT_VL + IMAGE_WIDTH_VL * PARA_IR_LINE;
fread(pFrameVL, 1, nVLFrameSize, file);
S729paras_VL Paras_VL = { 0 };
memcpy(&Paras_VL, (unsigned char*)(pFrameVL + int(1.5 * IMAGE_WIDTH_VL * IMAGE_HEIGHT_VL)), sizeof(S729paras_VL));
FrameInfo info = { 0 };
info.nFrmID = i;
info.camInfo.nFocus = Paras_VL.Paras_VL.caminfo.nFocal;
info.camInfo.fPixelSize = Paras_VL.Paras_VL.caminfo.nPixleSize;
info.craft.stAtt.fYaw = Paras_VL.Paras_VL.airCraftInfo.fYaw;
info.craft.stAtt.fPitch = Paras_VL.Paras_VL.airCraftInfo.fPitch;
info.craft.stAtt.fRoll = Paras_VL.Paras_VL.airCraftInfo.fRoll;
info.craft.stPos.B = Paras_VL.Paras_VL.airCraftInfo.B;
info.craft.stPos.L = Paras_VL.Paras_VL.airCraftInfo.L;
info.craft.stPos.H = Paras_VL.Paras_VL.airCraftInfo.H;
info.nEvHeight = Paras_VL.Paras_VL.airCraftInfo.H;
info.servoInfo.fServoAz = Paras_VL.Paras_VL.servoInfo.fAz;
info.servoInfo.fServoPt = Paras_VL.Paras_VL.servoInfo.fPz + 90;
info.nWidth = IMAGE_WIDTH_VL;
info.nHeight = IMAGE_HEIGHT_VL;
info.craft.stAtt.fYaw += gr.generate();
info.craft.stAtt.fPitch += gr.generate();
info.craft.stAtt.fRoll += gr.generate();
cv::Mat mat_src(IMAGE_HEIGHT_VL * 1.5, IMAGE_WIDTH_VL, CV_8UC1, pFrameVL);
cv::Mat IMG;
cv::cvtColor(mat_src, IMG, cv::COLOR_YUV2BGR_NV12);
frame.enPixelFormat = GD_PIXEL_FORMAT_NV12;
frame.u32Width = IMAGE_WIDTH_VL;
frame.u32Height = IMAGE_HEIGHT_VL;
frame.u64VirAddr[0] = pFrameVL;
if (i == 0)
{
stitcher->Init(info);
pan = stitcher->ExportPanAddr();
mat_pan = cv::Mat(pan.u32Height, pan.u32Width, CV_8UC1, pan.u64VirAddr[0]);
}
else
{
if (i % 10 != 0)
{
continue;
}
std::cout << info.craft.stPos.B << " " << info.craft.stPos.L << " " << info.craft.stPos.H << " "
<< info.craft.stAtt.fYaw << " " << info.craft.stAtt.fPitch << " " << info.craft.stAtt.fRoll << " "
<< info.servoInfo.fServoAz << " " << info.servoInfo.fServoPt
<< std::endl;
// 基于外参的快拼
stitcher->GeoStitch(frame, info);
// 接收帧
auto a = stitcher->ReceiveFrame(frame, info);
}
imshow("VL", IMG);
waitKey(1);
}
imshow("pan_opt", mat_pan);
waitKey(0);
}
int main(int, char**)
{
ProcessIR();
//ProcessVL();
}

@ -34,6 +34,20 @@ GeoSolver::~GeoSolver()
void GeoSolver::SetOriginPoint(FrameInfo info)
{
originPoint = getXYZFromBLH(info.craft.stPos);
origininfo = info;
}
PointBLH GeoSolver::getBLH(cv::Mat H, cv::Point2f pt)
{
cv::Point2f pt_Geo = warpPointWithH(H, pt);
PointXYZ pt_XYZ = { 0 };
pt_XYZ.X = pt_Geo.x;
pt_XYZ.Y = pt_Geo.y;
pt_XYZ.Z = -origininfo.nEvHeight;
PointBLH res = getBLHFromXYZ(getCGCSXYZFromNUEXYZ(pt_XYZ, originPoint));
return res;
}
cv::Point2f GeoSolver::Trans_uv2Geo(cv::Point2f pos_frame, TForm form)
@ -80,7 +94,7 @@ Mat GeoSolver::Mat_TransENGMove(FrameInfo info)
move.at<double>(0, 0) = diff.Z;
move.at<double>(1, 0) = diff.X;
move.at<double>(2, 0) = diff.Y;
move.at<double>(2, 0) = -diff.Y;
return move;
}

@ -45,6 +45,13 @@ public:
// 设置起始拼接点外参
void SetOriginPoint(FrameInfo info);
// 根据H计算原始像方的经纬度
PointBLH getBLH(cv::Mat H, cv::Point2f pt);
// 根据NUE地理坐标计算
private:
// 计算当前帧像方-地理坐标系R t反投影关系
Proj AnlayseTform(FrameInfo info);
@ -67,6 +74,8 @@ private:
PointXYZ originPoint;//成图初始点的地心地固坐标,作为拼接参考
FrameInfo origininfo;
};
// 多边形面积

@ -82,7 +82,7 @@ PanInfo VideoStitch::Init(FrameInfo info)
{
_panPara = InitMap(info);
_panImage = cv::Mat::zeros(_panPara.m_pan_height, _panPara.m_pan_width, CV_8UC1);
_panImage = cv::Mat::zeros(_panPara.m_pan_height, _panPara.m_pan_width, CV_8UC3);
return _panPara;
}
@ -143,6 +143,8 @@ PanInfo VideoStitch::Init(FrameInfo info)
BYTE8 VideoStitch::GeoStitch(GD_VIDEO_FRAME_S img, FrameInfo para)
{
cv::Mat src = getRGBMatFromGDFrame(img,img.u64VirAddr[0]);
// uv2Geo H
cv::Mat H1 = _GeoSolver->findHomography(para);
@ -151,8 +153,6 @@ BYTE8 VideoStitch::GeoStitch(GD_VIDEO_FRAME_S img, FrameInfo para)
// 利用H投影当前帧到全景
cv::Mat imagetmp(_panImage.size(), CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat src(img.u32Height, img.u32Width, CV_8UC1, img.u64VirAddr[0]);
cv::warpPerspective(src, imagetmp, H, imagetmp.size());
cv::Mat mask = cv::Mat::ones(src.size(), CV_8UC1) * 255;
@ -175,6 +175,8 @@ SINT32 VideoStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
_t_frame_cache->_para = para;
_t_frame_cache->_frame_info = img;
int imgSize = 0;
if(img.enPixelFormat == GD_PIXEL_FORMAT_GRAY_Y8)
@ -184,6 +186,10 @@ SINT32 VideoStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
if(img.enPixelFormat == GD_PIXEL_FORMAT_RGB_PACKED)
{
imgSize = img.u32Height * img.u32Width * 3;
}
if (img.enPixelFormat == GD_PIXEL_FORMAT_NV12)
{
imgSize = img.u32Height * img.u32Width * 1.5;
}
// 深拷贝图像数据
@ -193,7 +199,8 @@ SINT32 VideoStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
std::vector<cv::KeyPoint> keypoints;
cv::Mat descriptors;
cv::Mat src(img.u32Height, img.u32Width,CV_8UC1, _t_frame_cache->_data);
cv::Mat src = getRGBMatFromGDFrame(img, img.u64VirAddr[0]);
_FeaMatcher->extractFeatures(src, keypoints, descriptors);
size_t keyNum = MIN(keypoints.size(), FEA_NUM_MAX);
@ -219,6 +226,8 @@ SINT32 VideoStitch::ReceiveFrame(GD_VIDEO_FRAME_S img, FrameInfo para)
_totalFrameCnt++;
return _totalFrameCnt;
}
@ -234,6 +243,24 @@ SINT32 VideoStitch::ProcessFrame()
// 重投影所有帧到全景
mapFrame(_recvFrameKey);
auto _t_frame_cache = std::make_shared<FrameCache>();
bool flag = _cache->get(10, _t_frame_cache);
// 读取当前H
cv::Mat H1 = cv::Mat(3, 3, CV_64FC1, _t_frame_cache->H);
// 与全景图的H相乘
cv::Mat H = _H_pan * H1;
cv::Point2f p_GEO = CalGeoInfo(warpPointWithH(H, cv::Point2f(0, 0)), _H_pan);
PointBLH BLHpT = _GeoSolver->getBLH(H1, cv::Point2f(640, 512));
return 0;
}
@ -263,8 +290,7 @@ void VideoStitch::mapFrame(vector<KeyType> frameInd)
cv::Mat imagetmp(_panImage.size(), CV_8UC3, cv::Scalar(0, 0, 0));
// 获取图像数据
cv::Mat src(_t_frame_cache->_frame_info.u32Height, _t_frame_cache->_frame_info.u32Width, CV_8UC1,
_t_frame_cache->_data);
cv::Mat src = getRGBMatFromGDFrame(_t_frame_cache->_frame_info, _t_frame_cache->_data);
cv::warpPerspective(src, imagetmp, H, imagetmp.size());
@ -284,7 +310,7 @@ GD_VIDEO_FRAME_S VideoStitch::ExportPanAddr()
{
GD_VIDEO_FRAME_S pan_out;
pan_out.enPixelFormat = GD_PIXEL_FORMAT_GRAY_Y8;
pan_out.enPixelFormat = GD_PIXEL_FORMAT_RGB_PACKED;
pan_out.u32Width = _panPara.m_pan_width;
pan_out.u32Height = _panPara.m_pan_height;
pan_out.u64VirAddr[0] = _panImage.data;
@ -292,6 +318,16 @@ GD_VIDEO_FRAME_S VideoStitch::ExportPanAddr()
return pan_out;
}
cv::Point2f VideoStitch::CalGeoInfo(cv::Point2f ptInPan, cv::Mat _H_panPara)
{
cv::Mat H_inv = _H_panPara.inv();
cv::Point2f ptInGeo = warpPointWithH(H_inv,ptInPan);
return ptInGeo;
}
cv::Mat VideoStitch::getAffineFromGeo2Pan(PanInfo _pan)
{
Mat H = (Mat_<double>(3, 3) << _pan.scale, 0, _pan.map_shiftX,

@ -28,6 +28,12 @@ public:
public:
GD_VIDEO_FRAME_S ExportPanAddr();
// 全景图地理信息计算
cv::Point2f CalGeoInfo(cv::Point2f ptInPan, cv::Mat _H_panPara);
private:
PanInfo InitMap(FrameInfo info);
@ -36,6 +42,8 @@ private:
FileCache<FrameCache>* _cache;//文件缓存,存储外部传入的原始帧信息以及预处理结果
private:
GeoSolver* _GeoSolver;//外参计算

@ -0,0 +1,154 @@
#include "googleTile.h"
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <fstream>
#include <cmath>
#include <filesystem>
googleTile::googleTile(const cv::Mat& img, const std::vector<std::pair<double, double>>& geoCorners, int zoom, const std::string& outputDir)
{
image_ = img.clone();
geoCorners_ = geoCorners;
zoom_ = zoom;
outputDir_ = outputDir;
}
cv::Point2f googleTile::latLonToGlobal(double lat, double lon)
{
double tileSize = 256.0;
double n = std::pow(2.0, zoom_);
double x = (lon + 180.0) / 360.0 * tileSize * n;
double latRad = lat * CV_PI / 180.0;
double y = (1 - std::log(std::tan(latRad) + 1 / std::cos(latRad)) / CV_PI) / 2 * tileSize * n;
return cv::Point2f(static_cast<float>(x), static_cast<float>(y));
}
std::pair<double, double> googleTile::globalToLatLon(const cv::Point2f& pt)
{
double tileSize = 256.0;
double n = std::pow(2.0, zoom_);
double lon = pt.x / (tileSize * n) * 360.0 - 180.0;
double latRad = std::atan(std::sinh(CV_PI * (1 - 2 * pt.y / (tileSize * n))));
double lat = latRad * 180.0 / CV_PI;
return std::make_pair(lat, lon);
}
cv::Mat googleTile::computeHomography()
{
std::vector<cv::Point2f> srcPts;
std::vector<cv::Point2f> dstPts;
for (const auto& corner : geoCorners_)
{
srcPts.push_back(latLonToGlobal(corner.first, corner.second));
}
dstPts.push_back(cv::Point2f(0.0f, 0.0f));
dstPts.push_back(cv::Point2f(static_cast<float>(image_.cols), 0.0f));
dstPts.push_back(cv::Point2f(static_cast<float>(image_.cols), static_cast<float>(image_.rows)));
dstPts.push_back(cv::Point2f(0.0f, static_cast<float>(image_.rows)));
return cv::getPerspectiveTransform(srcPts, dstPts);
}
void googleTile::generateKML(const std::vector<TileInfo>& tiles)
{
std::string kmlPath = outputDir_ + "/tiles.kml";
std::ofstream ofs(kmlPath);
if (!ofs)
{
std::cerr << "无法创建 KML 文件: " << kmlPath << std::endl;
return;
}
ofs << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
ofs << "<kml xmlns=\"http://www.opengis.net/kml/2.2\">\n";
ofs << " <Document>\n";
for (const auto& tile : tiles)
{
ofs << " <GroundOverlay>\n";
ofs << " <name>" << zoom_ << "/" << tile.x << "/" << tile.y << ".png</name>\n";
ofs << " <Icon>\n";
ofs << " <href>" << zoom_ << "/" << tile.x << "/" << tile.y << ".png</href>\n";
ofs << " </Icon>\n";
ofs << " <LatLonBox>\n";
ofs << " <north>" << tile.north << "</north>\n";
ofs << " <south>" << tile.south << "</south>\n";
ofs << " <east>" << tile.east << "</east>\n";
ofs << " <west>" << tile.west << "</west>\n";
ofs << " </LatLonBox>\n";
ofs << " </GroundOverlay>\n";
}
ofs << " </Document>\n";
ofs << "</kml>\n";
ofs.close();
std::cout << "生成 KML 文件:" << kmlPath << std::endl;
}
void googleTile::generateTiles()
{
std::filesystem::create_directories(outputDir_);
cv::Mat H = computeHomography();
std::vector<cv::Point2f> globalCorners;
for (const auto& corner : geoCorners_)
{
globalCorners.push_back(latLonToGlobal(corner.first, corner.second));
}
float minX = globalCorners[0].x, maxX = globalCorners[0].x;
float minY = globalCorners[0].y, maxY = globalCorners[0].y;
for (const auto& p : globalCorners)
{
minX = std::min(minX, p.x);
maxX = std::max(maxX, p.x);
minY = std::min(minY, p.y);
maxY = std::max(maxY, p.y);
}
int tileSize = 256;
int xTileMin = static_cast<int>(std::floor(minX / tileSize));
int xTileMax = static_cast<int>(std::floor(maxX / tileSize));
int yTileMin = static_cast<int>(std::floor(minY / tileSize));
int yTileMax = static_cast<int>(std::floor(maxY / tileSize));
std::vector<TileInfo> tileInfos;
for (int tx = xTileMin; tx <= xTileMax; tx++)
{
for (int ty = yTileMin; ty <= yTileMax; ty++)
{
std::vector<cv::Point2f> tileGlobalPts = {
{cv::Point2f(tx * tileSize, ty * tileSize)},
{cv::Point2f((tx + 1) * tileSize, ty * tileSize)},
{cv::Point2f((tx + 1) * tileSize, (ty + 1) * tileSize)},
{cv::Point2f(tx * tileSize, (ty + 1) * tileSize)} };
std::vector<cv::Point2f> tileImgPts;
cv::perspectiveTransform(tileGlobalPts, tileImgPts, H);
//cv::Mat tileTransform = cv::getPerspectiveTransform(tileImgPts, {
// {0, 0}, {tileSize, 0}, {tileSize, tileSize}, {0, tileSize} });
cv::Mat tileImg;
//cv::warpPerspective(image_, tileImg, tileTransform, cv::Size(tileSize, tileSize));
std::string tileDir = outputDir_ + "/" + std::to_string(zoom_) + "/" + std::to_string(tx);
std::filesystem::create_directories(tileDir);
std::string tilePath = tileDir + "/" + std::to_string(ty) + ".png";
cv::imwrite(tilePath, tileImg);
tileInfos.push_back({ tx, ty, globalToLatLon({cv::Point2f(tx * tileSize, ty * tileSize)}).first,
globalToLatLon({cv::Point2f((tx + 1) * tileSize, (ty + 1) * tileSize)}).first,
globalToLatLon({cv::Point2f((tx + 1) * tileSize, (ty + 1) * tileSize)}).second,
globalToLatLon({cv::Point2f(tx * tileSize, ty * tileSize)}).second });
}
}
generateKML(tileInfos);
}

@ -1,113 +1,37 @@
#pragma once
#ifndef GOOGLE_TILE_H
#define GOOGLE_TILE_H
#include <opencv2/opencv.hpp>
#include <cmath>
#include <sstream>
#include <string>
#include <vector>
class GoogleTile
class googleTile
{
public:
// 构造函数
GoogleTile(int zoom, int x, int y, cv::Mat content = cv::Mat())
struct TileInfo
{
_zoom = zoom;
_x = x;
_y = y;
_content = content.clone();
_isValid = !content.empty();
}
int x;
int y;
double north;
double south;
double east;
double west;
};
// 获取缩放级别
int getZoom() const
{
return _zoom;
}
// 获取X坐标
int getX() const
{
return _x;
}
// 获取Y坐标
int getY() const
{
return _y;
}
// 获取瓦片内容
cv::Mat getContent() const
{
return _content;
}
// 设置瓦片内容
void setContent(cv::Mat content)
{
_content = content.clone();
_isValid = !content.empty();
}
// 检查瓦片是否有效
bool isValid() const
{
return _isValid;
}
// 获取相邻瓦片坐标
GoogleTile getLeftTile() const
{
int newX = (_x - 1 + (1 << _zoom)) % (1 << _zoom);
return GoogleTile(_zoom, newX, _y);
}
GoogleTile getRightTile() const
{
int newX = (_x + 1) % (1 << _zoom);
return GoogleTile(_zoom, newX, _y);
}
GoogleTile getTopTile() const
{
int newY = (_y - 1 + (1 << _zoom)) % (1 << _zoom);
return GoogleTile(_zoom, _x, newY);
}
GoogleTile getBottomTile() const
{
int newY = (_y + 1) % (1 << _zoom);
return GoogleTile(_zoom, _x, newY);
}
// 将瓦片坐标转换为经纬度
void tileToLatLon(double& lat, double& lon) const
{
double n = M_PI - 2.0 * M_PI * _y / (1 << _zoom);
lat = 180.0 / M_PI * atan(0.5 * (exp(n) - exp(-n)));
lon = _x / (double)(1 << _zoom) * 360.0 - 180.0;
}
googleTile(const cv::Mat& img, const std::vector<std::pair<double, double>>& geoCorners, int zoom, const std::string& outputDir);
// 将经纬度转换为瓦片坐标
static void latLonToTile(double lat, double lon, int zoom, int& x, int& y)
{
double lat_rad = lat * M_PI / 180.0;
double n = 1 << zoom;
x = static_cast<int>((lon + 180.0) / 360.0 * n);
y = static_cast<int>((1.0 - log(tan(lat_rad) + 1.0 / cos(lat_rad)) / M_PI) / 2.0 * n);
}
// 获取瓦片URL根据谷歌地图规范
std::string getUrl() const
{
std::stringstream ss;
ss << "https://mt.google.com/vt/lyrs=s&x=" << _x << "&y=" << _y << "&z=" << _zoom;
return ss.str();
}
void generateTiles();
private:
int _zoom; // 缩放级别 (0-21)
int _x; // 瓦片X坐标
int _y; // 瓦片Y坐标
cv::Mat _content; // 瓦片图像数据
bool _isValid; // 瓦片是否有效
};
cv::Mat image_;
std::vector<std::pair<double, double>> geoCorners_; // 左上、右上、右下、左下
int zoom_;
std::string outputDir_;
cv::Point2f latLonToGlobal(double lat, double lon);
std::pair<double, double> globalToLatLon(const cv::Point2f& pt);
cv::Mat computeHomography();
void generateKML(const std::vector<TileInfo>& tiles);
};
#endif // GOOGLE_TILE_H

@ -41,7 +41,7 @@ struct Match_Net
};
#define IMG_CACHE_SIZE (1920 * 1080 * 3) //图像缓存尺寸
#define IMG_CACHE_SIZE (1920 * 1080 * 2) //图像缓存尺寸
#define FEA_NUM_MAX 500 // 单帧特征点数量
#define FEA_DES_SIZE 128 // 特征点描述子尺度

@ -303,6 +303,38 @@ std::string GetDynamicLibraryPath()
cv::Mat getRGBMatFromGDFrame(GD_VIDEO_FRAME_S frame, void* pArr)
{
// 针对拼接目前支持Y8和NV12
cv::Mat dst(frame.u32Height, frame.u32Width, CV_8UC3);
//y8类型
if (frame.enPixelFormat == GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_GRAY_Y8)
{
cv::Mat mat(frame.u32Height, frame.u32Width, CV_8UC1, pArr);
cv::cvtColor(mat, dst, cv::COLOR_GRAY2BGR);
return dst.clone();
}
// nv12
if (frame.enPixelFormat == GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_NV12)
{
cv::Mat mat(frame.u32Height * 1.5, frame.u32Width, CV_8UC1, pArr);
cv::cvtColor(mat, dst, cv::COLOR_YUV2BGR_NV12);
return dst.clone();
}
// rgb
if (frame.enPixelFormat == GD_PIXEL_FORMAT_E::GD_PIXEL_FORMAT_RGB_PACKED)
{
cv::Mat mat(frame.u32Height, frame.u32Width, CV_8UC3, pArr);
return mat.clone();
}
return cv::Mat();
}
unsigned char FourPointInterpolation(unsigned char* pSrcImage, int iWidth, int iHeight, float x, float y)
{
if (x < 0 || y < 0 || x > iWidth || y > iHeight)

@ -4,12 +4,12 @@
#pragma once
#include "Arith_SysStruct.h"
#include "opencv2/opencv.hpp"
#include "PlatformDefine.h"
using cv::Point2d;
using cv::Point2f;
// 将GD帧转为 cv::Mat RGB
cv::Mat getRGBMatFromGDFrame(GD_VIDEO_FRAME_S frame,void* pArr);
unsigned char FourPointInterpolation(unsigned char* pSrcImage, int iWidth, int iHeight, float x, float y);

Loading…
Cancel
Save