Opencv C++ 学习视频整理源代码(1)
时间:2022-11-19 04:00:01
0.代码基本框架
#include
#include
#include
using namespace cv;
using namespace std;
int main(int agrc, char ** agrv)
{
waitKey(0);
return -1;
}
1.调试代码:
#include
#include
using namespace cv;
int main(int agrc, char** agrv)
{
Mat src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (src.empty())
{
printf("could not load image...\n");
return - 1;
}
namedWindow("test opencv setup", CV_WINDOW_AUTOSIZE);
imshow("test opencv setup", src);
waitKey(0);
return 0;
}
2.加载、修改和保存图像
#include
#include
#include
using namespace cv;
int main(int argc, char** argv)
{
Mat src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");///阅读图像
if (src.empty())
{
printf("could not load image...\n");
return -1;
}
namedWindow("opencv setup demo", CV_WINDOW_AUTOSIZE);///图像显示窗设置大小
imshow("opencv setup demo", src);//显示图像(原始图像)
namedWindow("output windows", CV_WINDOW_AUTOSIZE);///设置图像显示窗口大小
Mat output_image;
cvtColor(src, output_image, CV_BGR2HLS);//将彩色图像改变成HLS图像
imshow("output windows", output_image);
imwrite("C:/Users/25503/Desktop/张教授项目/1.png",output_image);///保存图像,包括保存类型和路径
waitKey(0);
return 0;
}
3.提高对比度(1)
#include
#include
#include
using namespace cv;
int main(int argc, char** argv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");///阅读图像
if (src.empty())
{
printf("could not load image...\n");
return -1;
}
namedWindow("opencv setup demo", CV_WINDOW_AUTOSIZE);///图像显示窗设置大小
imshow("opencv setup demo", src);//显示图像(原始图像)
int cols = (src.cols - 1)*src.channels();
int offsetx = src.channels();
int rows = src.rows;
dst = Mat::zeros(src.size(), src.type());
for (int row = 1; row < (rows - 1); row )
{
const uchar* previous = src.ptr
const uchar* current = src.ptr
const uchar* next = src.ptr
uchar* output = dst.ptr
for (int col = offsetx; col < cols; col )
{
output[col] = saturate_cast
}
}
namedWindow("test opencv setup", CV_WINDOW_AUTOSIZE);
imshow("test opencv setup", dst);
waitKey(0);
return 0;
}
3.增加对比度(2)
#include
#include
#include
using namespace cv;
int main(int argc, char** argv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");///阅读图像
if (src.empty())
{
printf("could not load image...\n");
return -1;
}
namedWindow("opencv setup demo", CV_WINDOW_ATOSIZE);//图像显示窗口设置大小
imshow("opencv setup demo", src);//显示图像(原始图像)
/*int cols = (src.cols - 1)*src.channels();
int offsetx = src.channels();
int rows = src.rows;
dst = Mat::zeros(src.size(), src.type());
for (int row = 1; row < (rows - 1); row++)
{
const uchar* previous = src.ptr
const uchar* current = src.ptr
const uchar* next = src.ptr
uchar* output = dst.ptr
for (int col = offsetx; col < cols; col++)
{
output[col] = saturate_cast
}
}
*/
Mat kernel = (Mat_
filter2D(src, dst, src.depth(), kernel);
namedWindow("test opencv setup", CV_WINDOW_AUTOSIZE);
imshow("test opencv setup", dst);
waitKey(0);
return 0;
}
4.调节图像的对比度和亮度
5.图像模糊
6.视频读写(1)--读取已保存的.mp4文件
#include
#include
using namespace cv;
using namespace std;
int main(int agrc, char** agrv)
{
VideoCapture capture;
capture.open("C:/Users/25503/Desktop/张教授项目/text.mp4");
if (!capture.isOpened())
{
printf("could not load video data....\n");
return -1;
}
Mat frame;
namedWindow("video-demo", CV_WINDOW_AUTOSIZE);
while (capture.read(frame))
{
imshow("video-demo", frame);
char c = waitKey(100);
if (c == 27)
{
break;
}
}
waitKey(0);
return 0;
}
6.视频读写(2)--打开摄像头读取文件.mp4文件
#include
#include
using namespace cv;
using namespace std;
int main(int agrc, char** agrv)
{
VideoCapture capture(0);//打开摄像头
//capture.open("C:/Users/25503/Desktop/张教授项目/text.mp4");//从已保存的文件中读取视频
if (!capture.isOpened())
{
printf("could not load video data....\n");
return -1;
}
Mat frame;
namedWindow("video-demo", CV_WINDOW_AUTOSIZE);
while (capture.read(frame))
{
imshow("video-demo", frame);
char c = waitKey(100);
if (c == 27)
{
break;
}
}
waitKey(0);
return 0;
}
7.基于颜色轨迹跟踪
#include
#include
using namespace std;
using namespace cv;
Rect roi;
void processFrame(Mat &binary, Rect &rect);
int main()
{
// load video
VideoCapture capture;
capture.open("C:/Users/25503/Desktop/张教授项目/text.mp4");
if (!capture.isOpened())
{
printf("could not find video file....\n");
return -1;
}
Mat frame,mask;
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
Mat kernel2 = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));
namedWindow("input video", CV_WINDOW_AUTOSIZE);
namedWindow("track mask", CV_WINDOW_AUTOSIZE);
while (capture.read(frame))
{
inRange(frame, Scalar(0, 127, 0), Scalar(120, 255, 120), mask);//过滤
morphologyEx(mask, mask, MORPH_OPEN, kernel1, Point(-1, -1), 1);//开运算
dilate(mask, mask, kernel2, Point(-1, -1), 4);//膨胀
imshow("track mask", mask);
processFrame(mask, roi);//轮廓发现与位置标定
rectangle(frame, roi, Scalar(0, 0, 255), 3, 8, 0);
imshow("input video", frame);
//trigger exit
char c = waitKey(1);
if (c == 27)
{
break;
}
}
capture.release();
waitKey(0);
return 0;
}
void processFrame(Mat &binary, Rect &rect)
{
vector
vector
findContours(binary, contours, hireachy,RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0,0));
if (contours.size() > 0)
{
double maxArea = 0.0;
for (size_t t = 0; t < contours.size(); t++)
{
double area = contourArea(contours[static_cast
if (area > maxArea)
{
maxArea = area;
rect = boundingRect(contours[static_cast
}
}
}
else
{
rect.x = rect.y = rect.width = rect.height = 0;
}
}
8.单目标开摄像头跟踪
#include
#include
using namespace std;
using namespace cv;
Rect roi;
void processFrame(Mat &binary, Rect &rect);
int main()
{
// load video
VideoCapture capture(0);//打开摄像头
//VideoCapture capture;
//capture.open("C:/Users/25503/Desktop/张教授项目/text.mp4");
if (!capture.isOpened())
{
printf("could not find video file....\n");
return -1;
}
Mat frame, mask;
Mat kernel1 = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
Mat kernel2 = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));
namedWindow("input video", CV_WINDOW_AUTOSIZE);
namedWindow("track mask", CV_WINDOW_AUTOSIZE);
while (capture.read(frame))
{
inRange(frame, Scalar(220, 220, 220), Scalar(255, 255, 255), mask);//过滤
imshow("track mask1", mask);
morphologyEx(mask, mask, MORPH_OPEN, kernel1, Point(-1, -1), 1);//开运算
dilate(mask, mask, kernel2, Point(-1, -1), 4);//膨胀
imshow("track mask2", mask);
processFrame(mask, roi);//轮廓发现与位置标定
rectangle(frame, roi, Scalar(0, 0, 255), 3, 8, 0);//BGR
imshow("input video", frame);
//trigger exit
char c = waitKey(1);
if (c == 27)
{
break;
}
}
capture.release();
waitKey(0);
return 0;
}
void processFrame(Mat &binary, Rect &rect)
{
vector
vector
findContours(binary, contours, hireachy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
if (contours.size() > 0)
{
double maxArea = 0.0;
for (size_t t = 0; t < contours.size(); t++)
{
double area = contourArea(contours[static_cast
if (area > maxArea)
{
maxArea = area;
rect = boundingRect(contours[static_cast
}
}
}
else
{
rect.x = rect.y = rect.width = rect.height = 0;
}
}
9.单目标跟踪方法:TrackerKCF速度快、基本好用;TrackerBoosting;TrackerMIL;TrackerMedianFlow;TrackerTLD
#include
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture capture;
capture.open("C:/Users/25503/Desktop/张教授项目/text.mp4");//读取视频
if (!capture.isOpened())
{
printf("could not load video data....\n");
return -1;
}
Mat frame;
namedWindow("tracker demo", CV_WINDOW_AUTOSIZE);
Ptr
capture.read(frame);
Rect2d roi = selectROI("tracker demo", frame);
if (roi.width == 0 || roi.height == 0)
{
return -1;
}
tracker->init(frame, roi);
while (capture.read(frame))
{
tracker->update(frame, roi);
rectangle(frame, roi, Scalar(255, 0, 0), 2, 8, 0);
imshow("track demo", frame);
char c = waitKey(20);
if (c == 27)
{
break;
}
}
capture.release();
waitKey(0);
return 0;
}
(2)单目标图像跟踪程序
#include
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture capture(0);//打开摄像头
if (!capture.isOpened())
{
printf("could not load video data....\n");
return -1;
}
Mat frame;
namedWindow("tracker demo", CV_WINDOW_AUTOSIZE);
Ptr
capture.read(frame);
Rect2d roi = selectROI("tracker demo", frame);
if (roi.width == 0 || roi.height == 0)
{
return -1;
}
tracker->init(frame, roi);
while (capture.read(frame))
{
tracker->update(frame, roi);
rectangle(frame, roi, Scalar(255, 0, 0), 2, 8, 0);
imshow("track demo", frame);
char c = waitKey(20);
if (c == 27)
{
break;
}
}
capture.release();
waitKey(0);
return 0;
}
多目标跟踪1*****失败,因为OpenCV 教学视频与自己的版本不同,最终调试好的可参考P123中的(2)
#include
#include
#include
using namespace cv;
using namespace std;
int main(int agrc, char ** agrv)
{
VideoCapture capture;
capture.open("C:/Users/25503/Desktop/张教授项目/text.mp4");
if (!capture.isOpened())
{
printf("could not load data....\n");
return -1;
}
namedWindow("Multiple Objects Tracking", CV_WINDOW_AUTOSIZE);
Ptr
vector
Mat frame, gray;
capture.read(frame);
selectROIs("Multiple Object Tracking", frame, objects);
if (objects.size() < 1)
{
return -1;
}
trackers.add(frame, objects);
while (capture.read(frame))
{
trackers.update(frame);
for (size_t t = 0; t < trackers.getObjects.size(); t++)
{
rectangle(frame, trackers.objects[t], Scalar(0, 0, 255), 2, 8, 0);
}
imshow("Multiple Objects Tracking", frame);
char c = waitKey(50);
if (c == 27)
{
break;
}
}
capture.release();
waitKey(0);
return -1;
}
单目标跟踪
#include
bool selectObject = false; //用于是否有选取目标
int trackObject = 0; // 1表示有追踪对象 0 表示无追踪对象
//-1表示追踪对象尚未计算camshift所需的属性
cv::Rect selection;//保存鼠标选择的区域
cv::Mat image;//用于缓存读取到的视频帧
/*
opencv 对所注册的鼠标回调函数定义为:
void onMouse(int event,int x,int y,int flag,void * param)
其中第四个flag为event下的附加状态,param是用户传入的参数
*/
void onMouse( int event, int x, int y, int, void* ) {
static cv::Point origin;
/*static关键字的作用:
1、隐藏功能 利用这一特性可以在不同的文件中定义同名函数和同名变量
2、保持变量内容的持久
*/
if(selectObject) {//如果检测到选取目标进行下面的操作
//确定鼠标选定区域的左上角坐标以及区域的长和宽
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
//&运算符被cv::Rect进行重载
//表示两个区域的交集,主要目的是为了处理当鼠标在选择区域时移除画面外
selection &= cv::Rect(0, 0, image.cols, image.rows);
}
switch(event) {
//处理鼠标左键被按下
case CV_EVENT_LBUTTONDOWN:
origin = cv::Point(x, y);
selection = cv::Rect(x, y, 0, 0);
selectObject = true;
break;
//处理鼠标左键被抬起
case CV_EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1;//追踪的目标还未计算camshift所需要的属性
break;
}
}
int main( int argc, const char** argv )
{
cv::VideoCapture video("video.ogv");//读取文件
//如果需要使用摄像头则代码为
//cv::VideoCapture video(0);
cv::namedWindow( "test" );
//注册鼠标事件的回调函数,第三个参数是用户提供给回掉函数的
//也就是回调函数中最后的param参数
cv::setMouseCallback( "test", onMouse, 0 );
/*
捕获画面的容器,opencv中的mat对象
opencv中最关键的mat类,mat是matrix的缩写
opencv中延续了像素图的概念,用矩阵来描述由像素构成的图像
*/
cv::Mat frame, hsv, hue, mask, hist, backproj;
cv::Rect trackWindow; //追踪到的窗口
int hsize = 16;//计算直方图所必备的内容
float hranges[] = {0,180};//计算直方图所必备的内容
const float* phranges = hranges;//计算直方图所必备的内容
while(true) {
//将video中的内容写入到frame中,
//这里的>>运算符是经过opencv重载的
video >> frame;
if( frame.empty() )//没有帧可以读取的时候,退出循环
break;
//将frame中的图像写入全局变量image作为进行camshift的缓存
frame.copyTo(image);
//转换到的HSV空间
cv::cvtColor(image, hsv, cv::COLOR_BGR2HSV);
//当有目标时候进行处理
if( trackObject ) {
// 只处理像素值为H:0~180,S:30~256,V:10~256之间的部分,过滤掉其他的部分并复制给 mask
cv::inRange(hsv, cv::Scalar(0, 30, 10), cv::Scalar(180, 256, 256), mask);
//下面三句将hsv图像中的H通道分离出来
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
cv::mixChannels(&hsv, 1, &hue, 1, ch, 1);
//如果需要追踪的物体还没有进行属性提取,则对选择的目标中的图像属性提取
if( trackObject < 0 ) {
//设置H通道和mask图像的ROI
cv::Mat roi(hue, selection), maskroi(mask, selection);
//计算ROI所在区域的直方图
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
//将直方图归一
normalize(hist, hist, 0, 255, CV_MINMAX);
//设置追踪的窗口
trackWindow = selection;
//标记追踪的目标已经计算过直方图属性
trackObject = 1;
}
//将直方图进行反向投影
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
//取公共部分
backproj &= mask;
//调用camshift算法的接口
cv::RotatedRect trackBox = CamShift(backproj, trackWindow, cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
//处理追踪面积过小的情况
if( trackWindow.area() <= 1 ) {
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = cv::Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
cv::Rect(0, 0, cols, rows);
}
//绘制追踪区域
ellipse( image, trackBox, cv::Scalar(0,0,255), 3, CV_AA );
}
//如果正在选择追踪目标,则画出选择框
if( selectObject && selection.width > 0 && selection.height > 0 ) {
cv::Mat roi(image, selection);
bitwise_not(roi, roi);//对选择的区域图像反色
}
imshow( "test", image );//显示当前帧
// 录制视频帧率为 15, 等待 1000/15 保证视频播放流畅。
// waitKey(int delay) 是 OpenCV 提供的一个等待函数,
// 当运行到这个函数时会阻塞 delay 毫秒的时间来等待键盘输入
char c = (char)cv::waitKey(1000/15.0);
if( c == 27 )//当按键为esc时,退出循环
break;
}
//释放申请的相关内存
cv::destroyAllWindows();
video.release();
return 0;
}
/*以下为本程序仿真步骤
需要安装opencv、opengl
提供linux环境下安装opencv的方法
sudo apt-get install libopencv-dev
linux环境安装opengl的命令
//sudo apt-get update && sudo apt-get install freeglut3 freeglut3-dev
另外提供安装linux环境下的屏幕录制工具
sudo apt-get update && sudo apt-get install gtk-recordmydesktop
linux环境下编译命令
g++ main.cpp `pkg-config opencv --libs --cflags opencv` -o main
*/
2多目标跟踪
// Opencv_MultiTracker.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#include
#include
#include
using namespace cv;
using namespace std;
vector
/**
* @brief Create a Tracker By Name object 根据设定的类型初始化跟踪器
*
* @param trackerType
* @return Ptr
*/
Ptr
{
Ptr
if (trackerType == trackerTypes[0])
tracker = TrackerBoosting::create();
else if (trackerType == trackerTypes[1])
tracker = TrackerMIL::create();
else if (trackerType == trackerTypes[2])
tracker = TrackerKCF::create();
else if (trackerType == trackerTypes[3])
tracker = TrackerTLD::create();
else if (trackerType == trackerTypes[4])
tracker = TrackerMedianFlow::create();
else if (trackerType == trackerTypes[5])
tracker = TrackerGOTURN::create();
else if (trackerType == trackerTypes[6])
tracker = TrackerMOSSE::create();
else if (trackerType == trackerTypes[7])
tracker = TrackerCSRT::create();
else
{
cout << "Incorrect tracker name" << endl;
cout << "Available trackers are: " << endl;
for (vector
{
std::cout << " " << *it << endl;
}
}
return tracker;
}
/**
* @brief Get the Random Colors object 随机涂色
*
* @param colors
* @param numColors
*/
void getRandomColors(vector
{
RNG rng(0);
for (int i = 0; i < numColors; i++)
{
colors.push_back(Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)));
}
}
int main(int argc, char *argv[])
{
// Set tracker type. Change this to try different trackers. 选择追踪器类型
string trackerType = trackerTypes[7];
// set default values for tracking algorithm and video 视频读取
//string videoPath = "video/run.mp4";
VideoCapture cap(0);//打开摄像头
// Initialize MultiTracker with tracking algo 边界框
vector
// create a video capture object to read videos 读视频
//cv::VideoCapture cap(videoPath);
Mat frame;
// quit if unable to read video file
if (!cap.isOpened())
{
printf("could not load video data....\n");
return -1;
}
// read first frame 读第一帧
cap >> frame;
// draw bounding boxes over objects 在第一帧内确定对象框
/*
先在图像上画框,然后按ENTER确定画下一个框。按ESC退出画框开始执行程序
*/
cout << "\n==========================================================\n";
cout << "OpenCV says press c to cancel objects selection process" << endl;
cout << "It doesn't work. Press Esc to exit selection process" << endl;
cout << "\n==========================================================\n";
cv::selectROIs("MultiTracker", frame, bboxes, false);
//自己设定对象的检测框
//x,y,width,height
//bboxes.push_back(Rect(388, 155, 30, 40));
//bboxes.push_back(Rect(492, 205, 50, 80));
// quit if there are no objects to track 如果没有选择对象
if (bboxes.size() < 1)
{
return 0;
}
vector
//给各个框涂色
getRandomColors(colors, bboxes.size());
// Create multitracker 创建多目标跟踪类
Ptr
// initialize multitracker 初始化
for (int i = 0; i < bboxes.size(); i++)
{
multiTracker->add(createTrackerByName(trackerType), frame, Rect2d(bboxes[i]));
}
// process video and track objects 开始处理图像
cout << "\n==========================================================\n";
cout << "Started tracking, press ESC to quit." << endl;
while (cap.isOpened())
{
// get frame from the video 逐帧处理
cap >> frame;
// stop the program if reached end of video
if (frame.empty())
{
break;
}
//update the tracking result with new frame 更新每一帧
bool ok = multiTracker->update(frame);
if (ok == true)
{
cout << "Tracking success" << endl;
}
else
{
cout << "Tracking failure" << endl;
}
// draw tracked objects 画框
for (unsigned i = 0; i < multiTracker->getObjects().size(); i++)
{
rectangle(frame, multiTracker->getObjects()[i], colors[i], 2, 1);
}
// show frame
imshow("MultiTracker", frame);
// quit on x button
if (waitKey(1) == 27)
{
break;
}
}
waitKey(0);
return 0;
}
P09模糊图像//2021/1/5 均值模糊和高斯模糊
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
char input_title[] = "input image";
char output_title[] = "blur image";
namedWindow(input_title, CV_WINDOW_AUTOSIZE);
namedWindow(output_title, CV_WINDOW_AUTOSIZE);
imshow(input_title, src);
blur(src, dst, Size(9, 3), Point(-1, -1));//均值模糊
imshow(output_title, dst);
Mat gblur;
GaussianBlur(src, gblur, Size(9, 3), 11, 11);//高斯模糊
imshow("gaussian blur", gblur);
waitKey(0);
return 0;
}
P10中值滤波(对椒盐噪声抑制很好)、高斯双边滤波(能够很好的保留图像边缘信息)***
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
namedWindow("input_title", CV_WINDOW_AUTOSIZE);
imshow("input_title", src);
//medianBlur(src, dst, 3)
bilateralFilter(src, dst, 15, 100, 5);
namedWindow("BiBlur Filter result", CV_WINDOW_AUTOSIZE);
imshow("BiBlur Filter result", dst);
Mat resultImg;
Mat kernel = (Mat_
filter2D(dst, resultImg, -1, kernel, Point(-1, -1), 0);
imshow("Final result", resultImg);
waitKey(0);
return 0;
}
P11膨胀与腐蚀
# include
# include
using namespace cv;
Mat src, dst;
char OUTPUT_WIN[] = "output image";
int element_size = 3;
int max_size = 21;
void CallBack_Demo(int, void*);
int main(int argc, char** agrv)
{
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
namedWindow("input_title", CV_WINDOW_AUTOSIZE);
imshow("input_title", src);
namedWindow(OUTPUT_WIN, CV_WINDOW_AUTOSIZE);
createTrackbar("Element Size:", OUTPUT_WIN, &element_size, max_size, CallBack_Demo);
CallBack_Demo(0, 0);
waitKey(0);
return 0;
}
void CallBack_Demo(int, void*)
{
int s = element_size * 2 + 1;
Mat structureElement = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));//Size(x,y)中的x,y可以调节
dilate(src, dst, structureElement, Point(-1, -1), 1);//膨胀
//erode(src, dst, structureElement);//腐蚀
imshow(OUTPUT_WIN, dst);
return;
}
P12形态学操作2021/1/6
开操作open、闭操作close、形态学梯度、顶帽top_hat、黑帽
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
namedWindow("input_title", CV_WINDOW_AUTOSIZE);
imshow("input_title", src);
char out_title[] = "morphology demo";
namedWindow(out_title, CV_WINDOW_AUTOSIZE);
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
//morphologyEx(src, dst, CV_MOP_OPEN, kernel);//先腐蚀后膨胀,可以去掉小的对象,假设对象是前景色,背景是黑色
//morphologyEx(src, dst, CV_MOP_CLOSE, kernel);//先膨胀后腐蚀,可以填充小的洞,假设对象是前景色,背景是黑色
//morphologyEx(src, dst, CV_MOP_GRADIENT, kernel);//膨胀减去腐蚀
//morphologyEx(src, dst, CV_MOP_TOPHAT, kernel);//顶帽是原图像与开操作之间的差值图像
morphologyEx(src, dst, CV_MOP_BLACKHAT, kernel);//黑帽是闭操作图像与源图像的差值图像
imshow(out_title, dst);
waitKey(0);
return 0;
}
P13形态学操作-提取水平线、竖直线、字母*******
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/124.png");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
char INPUT_WIN[] = "input image";
char OUTPUT_WIN[] = "result image";
namedWindow(INPUT_WIN, CV_WINDOW_AUTOSIZE);
imshow(INPUT_WIN, src);
Mat gray_src;
cvtColor(src, gray_src, CV_BGR2GRAY);//彩色图转换为灰度图
imshow("gray image", gray_src);
Mat binImg;
adaptiveThreshold(gray_src, binImg, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, -2);//灰度图转换为二值图
imshow("binary image", binImg);
Mat hline = getStructuringElement(MORPH_RECT, Size(src.cols / 16, 1), Point(-1, -1));//定义水平线提取元素
Mat vline = getStructuringElement(MORPH_RECT, Size(1, src.cols / 16), Point(-1, -1));//定义数值线提取元素
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));//矩形元素提取
/*读取直线或者竖直线操作*/
Mat temp_h;
//erode(binImg, temp_h, hline);//腐蚀
//dilate(temp_h, dst, hline);//膨胀
morphologyEx(binImg, dst, CV_MOP_OPEN, hline);//开操作相当于先腐蚀后膨胀
bitwise_not(dst, dst);//背景色取反,即背景色白色转换为黑色;背景色为白色转换为白色
blur(dst, dst, Size(3, 3), Point(-1, -1));//让输出结果更加好看
imshow("Final Result hline", dst);//得到水平线
/*读取直线或者竖直线操作*/
Mat temp_v;
erode(binImg, temp_v, vline);//腐蚀
dilate(temp_v, dst, vline);//膨胀
bitwise_not(dst, dst);//背景色取反,即背景色白色转换为黑色;背景色为白色转换为白色
blur(dst, dst, Size(3, 3), Point(-1, -1));//让输出结果更加好看
imshow("Final Result vline", dst);//得到水平线
/*矩形元素提取*/
Mat temp_JuXing;
erode(binImg, temp_JuXing, kernel);//腐蚀
dilate(temp_JuXing, dst, kernel);//膨胀
bitwise_not(dst, dst);//背景色取反,即背景色白色转换为黑色;背景色为白色转换为白色
//blur(dst, dst, Size(3, 3), Point(-1, -1));//让输出结果更加好看
imshow("Final Result Juxing", dst);//得到矩形
waitKey(0);
return 0;
}
P14图像金字塔-上采样与下采样
# include
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/124.png");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
char INPUT_WIN[] = "input image";
char OUTPUT_WIN[] = "result image";
namedWindow(INPUT_WIN, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_WIN, CV_WINDOW_AUTOSIZE);
imshow(INPUT_WIN, src);
//上采样表示图像比原来图像宽高增大2倍或者几倍
pyrUp(src, dst, Size(src.cols * 2, src.rows * 2));
imshow(OUTPUT_WIN, dst);
//下采样表示图像比原来图像宽高缩小2倍或者几倍
Mat s_down;
pyrDown(src, s_down, Size(src.cols / 2, src.rows / 2));
imshow("sample down", s_down);
//高斯不同DOG******
Mat gray_src, g1, g2, dogImg;
cvtColor(src, gray_src, CV_BGR2GRAY);//RGB彩色图像转换为灰度图像
GaussianBlur(gray_src, g1, Size(3, 3), 0, 0);//高斯模糊得到g1
GaussianBlur(g1, g2, Size(3, 3), 0, 0);//再次高斯模糊得到g2
subtract(g1, g2, dogImg, Mat());//从g1中减去g2,得到dogImg
normalize(dogImg, dogImg, 255, 0, NORM_MINMAX);
imshow("DOG Image", dogImg);
waitKey(0);
return 0;
}
P15基本阈值操作
# include
# include
# include
using namespace cv;
Mat src, gray_src, dst;
int threshold_value = 127;
int threshold_max = 255;
const char* output_title = "binary image";
void Threshold_Demo(int, void*);
int main(int argc, char** agrv)
{
src = imread("C:/Users/25503/Desktop/124.png");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
namedWindow("input image", CV_WINDOW_AUTOSIZE);
namedWindow(output_title, CV_WINDOW_AUTOSIZE);
imshow("input image", src);
cvtColor(src, gray_src, CV_BGR2GRAY);
createTrackbar("Threshold Value:", output_title, &threshold_value, threshold_max, Threshold_Demo);
Threshold_Demo(0, 0);
waitKey(0);
return 0;
}
void Threshold_Demo(int, void*)
{
cvtColor(src, gray_src, CV_BGR2GRAY);//彩色图像转换为灰度图像
threshold(gray_src, dst, threshold_value, threshold_max, THRESH_BINARY);//正常阈值分割,背景为白色,还有其他阈值方法
threshold(gray_src, dst, threshold_value, threshold_max, THRESH_BINARY_INV);//反阈值分割,背景为黑色
imshow(output_title, dst);
}
P16自定义滤波(Robat算子、索贝尔算子、拉布拉斯算子、自定义卷积模糊)
# include
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
Mat kernel;
int ksize = 0;
Mat s_down;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
char INPUT_WIN[] = "input image";
char OUTPUT_WIN[] = "result image";
namedWindow(INPUT_WIN, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_WIN, CV_WINDOW_AUTOSIZE);
pyrDown(src, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow(INPUT_WIN, s_down);
//Robert X方向滤波
Mat kernel_x = (Mat_
filter2D(src, dst, -1, kernel_x, Point(-1, -1), 0.0);
pyrDown(dst, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("Robert X方向", s_down);
//Robert Y方向滤波
Mat ying;
Mat kernel_y = (Mat_
filter2D(src, ying, -1, kernel_y, Point(-1, -1), 0.0);
pyrDown(ying, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("Robert Y方向", s_down);
//Sobel X方向滤波
Mat sobel_x;
Mat kernel_xx = (Mat_
filter2D(src, sobel_x, -1, kernel_xx, Point(-1, -1), 0.0);
pyrDown(sobel_x, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("Sobel X方向", s_down);
//Sobel Y方向滤波
Mat sobel_y;
Mat kernel_yy = (Mat_
filter2D(src, sobel_y, -1, kernel_yy, Point(-1, -1), 0.0);
pyrDown(sobel_y, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("Sobel Y方向", s_down);
//拉普拉斯算子滤波
Mat LOG_y;
Mat kernel_LOG = (Mat_
filter2D(src, LOG_y, -1, kernel_LOG, Point(-1, -1), 0.0);
pyrDown(LOG_y, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("拉普拉斯算子滤波", s_down);
/* //自定义滤波
int c = 0;
int index = 0;
Mat dst_z;
while (true)
{
c = waitKey(500);
if ((char)c == 27)//ESC键
{
break;
}
ksize = 4 + (index % 8) * 2 + 1;
Mat kernel_z = Mat::ones(Size(ksize, ksize), CV_32F) / (float)(ksize *ksize);
filter2D(src, dst_z, -1, kernel_z, Point(-1, -1));
index++;
//imshow("自定义滤波", dst_z);
}
*/
waitKey(0);
return 0;
}
P17 图片边框效果处理
# include
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
char INPUT_WIN[] = "input image";
char OUTPUT_WIN[] = "Border Demo";
namedWindow(INPUT_WIN, CV_WINDOW_AUTOSIZE);
namedWindow(OUTPUT_WIN, CV_WINDOW_AUTOSIZE);
imshow(INPUT_WIN, src);
int top = (int)(0.05*src.rows);
int bottom = (int)(0.05*src.rows);
int left = (int)(0.05*src.cols);
int right = (int)(0.05*src.cols);
RNG rng(12345);
int borderType = BORDER_DEFAULT;
int c = 0;
while (true)
{
c = waitKey(500);
if ((char)c == 27)//ESC键
{
break;
}
if ((char)c == 'r')//r键
{
borderType = BORDER_REPLICATE;
}
else if ((char)c == 'v')//v键
{
borderType = BORDER_WRAP;
}
else if ((char)c == 'c')//c键,效果很漂亮
{
borderType = BORDER_CONSTANT;
}
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
copyMakeBorder(src, dst, top, bottom, left, right, borderType, color);
imshow(OUTPUT_WIN, dst);
}
waitKey(0);
return 0;
}
P18 Sobel算子、Scharr算子边缘检测
# include
# include
# include
using namespace cv;
int main(int argc, char** agrv)
{
Mat src, dst;
Mat s_down;
src = imread("C:/Users/25503/Desktop/张教授项目/1.jpg");
if (!src.data)
{
printf("could not load image...\n");
return -1;
}
char INPUT_WIN[] = "input image";
char OUTPUT_WIN[] = "sobel-Demo";
namedWindow(INPUT_WIN, CV_WINDOW_AUTOSIZE);
//namedWindow(OUTPUT_WIN, CV_WINDOW_AUTOSIZE);
pyrDown(src, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow(INPUT_WIN, s_down);
Mat gray_src;
GaussianBlur(src, dst, Size(3, 3), 0, 0);
cvtColor(dst, gray_src, CV_BGR2GRAY);//将彩色图像转换为灰度图像
pyrDown(gray_src, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("灰度图像", s_down);
Mat xgrad, ygrad;//x,y方向上的sobel边缘检测
Sobel(gray_src, xgrad, CV_16S, 1, 0, 3);
Sobel(gray_src, ygrad, CV_16S, 0, 1, 3);
convertScaleAbs(xgrad, xgrad);
convertScaleAbs(ygrad, ygrad);
pyrDown(xgrad, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("xSobel算子结果", s_down);
pyrDown(ygrad, s_down, Size(src.cols / 2, src.rows / 2));//缩小显示窗口
imshow("ySobel算子结果", s_down);
Mat xygrad = Mat(xgrad.size(), xgrad.type());
int width = xgrad.cols;
int height = ygrad.rows;
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++)
{
int xg = xgrad.at
int yg = ygrad.at
int xy = xg + yg;
xygrad.at
}
}
addWeighted(xgrad, 0.5, ygrad, 0.5, 0, xygrad);
pyrDown(xygrad, s_down, Size(src.cols / 2, src.rows / 2));//缩