【opencvsharp】opencvsharp_samples.core示例代码笔记
时间:2023-06-23 02:07:00
源码网址:https://github.com/shimat/opencvsharp_samples
SamplesCore
C# (.NET Core / .NET Framework) sample 笔记
#1、人脸检测级联分类器 using OpenCvSharp; using SampleBase; namespace SamplesCore { /// /// Human face detection /// http://docs.opencv.org/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html /// class FaceDetection : ConsoleTestBase { public override void RunTest() { // 加载级联分类器 using var haarCascade = new CascadeClassifier(TextPath.HaarCascade); using var lbpCascade = new CascadeClassifier(TextPath.LbpCascade); // 检测人脸 Mat haarResult = DetectFace(haarCascade); Mat lbpResult = DetectFace(lbpCascade); Cv2.ImShow("Faces by Haar", haarResult); Cv2.ImShow("Faces by LBP", lbpResult); Cv2.WaitKey(0); Cv2.DestroyAllWindows(); } //检测人脸 private Mat DetectFace(CascadeClassifier cascade) { Mat result; using (var src = new Mat(ImagePath.Yalta, ImreadModes.Color)) using (var gray = new Mat()) { result = src.Clone(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY); // 多尺度检测 Rect[] faces = cascade.DetectMultiScale( gray, 1.08, 2, HaarDetectionTypes.ScaleImage, new Size(30, 30)); // 绘制所有检测到的人脸 foreach (Rect face in faces) { var center = new Point { X = (int)(face.X face.Width * 0.5), Y = (int)(face.Y face.Height * 0.5) }; var axes = new Size { Width = (int)(face.Width * 0.5), Height = (int)(face.Height * 0.5) }; Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4); } } return result; } } } #2、人脸检测DNN caffemodel using System; using OpenCvSharp; using OpenCvSharp.Dnn; using SampleBase; namespace SamplesCore { /// /// To run this example first download the face model available here: https://github.com/spmallick/learnopencv/tree/master/FaceDetectionComparison/models /// Add the files to the bin folder. /// You should also prepare the input images (faces.jpg) yourself. /// internal class FaceDetectionDNN : ConsoleTestBase { const string configFile = "deploy.prototxt"; //配置文件 const string faceModel = "res10_300x300_ssd_iter_140000_fp16.caffemodel"; //网络模型 const string image = "faces.jpg";//待检测图片 public override void RunTest() { //阅读示例图片image using var frame = Cv2.ImRead(image); int frameHeight = frame.Rows; int frameWidth = frame.Cols; using var faceNet = CvDnn.ReadNetFromCaffe(configFile, faceModel);///阅读网络模型 //Mat BlobFromImage(Mat image, double scaleFactor = 1, Size size = default, Scalar mean = default, bool swapRB = true, bool crop = true); //image:输入图像(1、3或4通道) //scalefactor:图像各通道数值的缩放比例 //size:输出图像的空间尺寸,如size=(200,300)高h=300,宽w=200 //mean:减去各通道的值,以减少光的影响(e.g. image为bgr三通道图像,mean=[104.0, 177.0, 123.表示b通道值-104,g-177,r-123) //swapRB:交换RB通道,默认为False.(cv2.imread读的是彩图bgr通道) //crop:默认为图像裁剪False.当值为True按比例缩放,然后从中心切割size尺寸 //ddepth:图像深度输出,可选CV_32F 或者CV_8U. using var blob = CvDnn.BlobFromImage(frame, 1.0, new Size(300, 300), new Scalar(104, 117, 123), false, false); faceNet.SetInput(blob, "data");//设置网络输入 using var detection = faceNet.Forward("detection_out");//返回Mat blob 用于指定层的第一个输出。需要输出层的名称detection_out using var detectionMat = new Mat(detection.Size(2), detection.Size(3), MatType.CV_32F, detection.Ptr(0) for (int i = 0; i < detectionMat.Rows; i )///遍历检测矩阵Mat的每行 { float confidence = detectionMat.At(i, 2);//每行第三列为置信率 if (confidence > 0.7) { //检测矩阵 第4-7列为 xmin ymin xmax ymax int x1 = (int) (detectionMat.At(i, 3) * frameWidth); int y1 = (int) (detectionMat.At(i, 4) * frameHeight); in x2 = (int) (detectionMat.At(i, 5) * frameWidth);
int y2 = (int) (detectionMat.At(i, 6) * frameHeight);
//绘制矩形 绿色
Cv2.Rectangle(frame, new Point(x1, y1), new Point(x2, y2), new Scalar(0, 255, 0), 2, LineTypes.Link4);
}
}
//显示图像
Window.ShowImages(frame);
}
}
}
#3. cv::FAST 使用 FAST 算法检测角点
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// cv::FAST 使用 FAST 算法检测角点
///
class FASTSample : ConsoleTestBase
{
public override void RunTest()
{
using Mat imgSrc = new Mat(ImagePath.Lenna, ImreadModes.Color);
using Mat imgGray = new Mat();
using Mat imgDst = imgSrc.Clone();
Cv2.CvtColor(imgSrc, imgGray, ColorConversionCodes.BGR2GRAY, 0);
//imgGray 检测到关键点(角)的灰度图像
//50 中心像素的强度与围绕该像素的圆的像素之间的差异阈值。
//true 如果为真,则对检测到的角点(关键点)应用非最大抑制。
KeyPoint[] keypoints = Cv2.FAST(imgGray, 50, true);
foreach (KeyPoint kp in keypoints)//遍历在图像上检测到的关键点。
{
imgDst.Circle((Point)kp.Pt, 3, Scalar.Red, -1, LineTypes.AntiAlias, 0);//绘制圆点
}
Cv2.ImShow("FAST", imgDst);
Cv2.WaitKey(0);
Cv2.DestroyAllWindows();
}
}
}
#4. cv::flann FLANN是快速最近邻搜索包(Fast_Library_for_Approximate_Nearest_Neighbors)
//对大数据集和高维特征进行最近邻搜索的算法的集合,而且这些算法都已经被优化过了。在面对大数据集是它的效果要好于BFMatcher。
//使用FLANN匹配,我们需要传入两个字典作为参数。这两个用来确定要使用的算法和其他相关参数等。
using System;
using OpenCvSharp;
using OpenCvSharp.Flann;
using SampleBase;
namespace SamplesCore
{
///
/// cv::flann
///
class FlannSample : ConsoleTestBase
{
public override void RunTest()
{
Console.WriteLine("===== FlannTest =====");
// 创建数据集
using (var features = new Mat(10000, 2, MatType.CV_32FC1)) //10000个点
{
var rand = new Random();//随机数发生器
for (int i = 0; i < features.Rows; i++)
{
features.Set(i, 0, rand.Next(10000));//随机生成x坐标
features.Set(i, 1, rand.Next(10000));//随机生成y坐标
}
// 查询点 Mat格式
var queryPoint = new Point2f(7777, 7777);//待搜索点
var queries = new Mat(1, 2, MatType.CV_32FC1);//Mat格式
queries.Set(0, 0, queryPoint.X);//设置Mat的元素
queries.Set(0, 1, queryPoint.Y);
Console.WriteLine("query:({0}, {1})", queryPoint.X, queryPoint.Y);
Console.WriteLine("-----");
//K近邻搜索 knnSearch
// features – CV _ 32F 类型的矩阵,包含要索引的特征(点)。矩阵的大小是 num_features x feature_dimensionality.。
using var nnIndex = new OpenCvSharp.Flann.Index(features, new KDTreeIndexParams(4));//为给定数据集构造最近邻搜索索引。
const int Knn = 1; //近邻数
// queries 查询点,每行一个
//indices 找到的最近邻的索引
//dists 到最近邻居的距离
//Knn 要搜索的最近邻居数
//SearchParams(int checks = 32, float eps = 0, bool sorted = true); 搜索参数
nnIndex.KnnSearch(queries, out int[] indices, out float[] dists, Knn, new SearchParams(32));
for (int i = 0; i < Knn; i++)//遍历近邻
{
int index = indices[i];//索引
float dist = dists[i];//距离
var pt = new Point2f(features.Get(index, 0), features.Get(index, 1));//近邻点
Console.Write("No.{0}\t", i);
Console.Write("index:{0}", index);
Console.Write(" distance:{0}", dist);
Console.Write(" data:({0}, {1})", pt.X, pt.Y);
Console.WriteLine();
}
}
Console.Read();
}
}
}
#5.FREAK 使用 FREAK 算法检索关键点。
using OpenCvSharp;
using OpenCvSharp.XFeatures2D;
using SampleBase;
namespace SamplesCore
{
///
/// 使用 FREAK 算法检索Retrieves关键点。
///
class FREAKSample : ConsoleTestBase
{
public override void RunTest()
{
using var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale); //灰度图
using var dst = new Mat(ImagePath.Lenna, ImreadModes.Color); //结果图
// ORB 关键点检测
using var orb = ORB.Create(1000);
KeyPoint[] keypoints = orb.Detect(gray);//灰度图检测ORB关键点,作为freak初始关键点
// FREAK
using var freak = FREAK.Create();
Mat freakDescriptors = new Mat(); //FREAK 描述子
freak.Compute(gray, ref keypoints, freakDescriptors);//计算FREAK关键点和描述子
if (keypoints != null)
{
var color = new Scalar(0, 255, 0);//绿色
foreach (KeyPoint kpt in keypoints)//遍历关键点
{
float r = kpt.Size / 2;
Cv2.Circle(dst, (Point)kpt.Pt, (int)r, color);//绘制关键点
//绘制十字
Cv2.Line(dst,
(Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r),
(Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r),
color);
Cv2.Line(dst,
(Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r),
(Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r),
color);
}
}
using (new Window("FREAK", dst)) //显示图像
{
Cv2.WaitKey();
}
}
}
}
#6. 手势HandPose检测
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;
namespace SamplesCore
{
///
/// To run this example first download the hand model available here: http://posefs1.perception.cs.cmu.edu/OpenPose/models/hand/pose_iter_102000.caffemodel
/// Or also available here https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models
/// Add the files to the bin folder
///
internal class HandPose : ConsoleTestBase
{
public override void RunTest()
{
const string model = "pose_iter_102000.caffemodel";//手势检测网络模型
const string modelTxt = "pose_deploy.prototxt";//模型配置文件
const string sampleImage = "hand.jpg";//示例图片
const string outputLoc = "Output_Hand.jpg";//输出图片位置
const int nPoints = 22; //点数
const double thresh = 0.01;//阈值
//手势对
int[][] posePairs =
{
new[] {0, 1}, new[] {1, 2}, new[] {2, 3}, new[] {3, 4}, //thumb拇指
new[] {0, 5}, new[] {5, 6}, new[] {6, 7}, new[] {7, 8}, //index 食指
new[] {0, 9}, new[] {9, 10}, new[] {10, 11}, new[] {11, 12}, //middle 中指
new[] {0, 13}, new[] {13, 14}, new[] {14, 15}, new[] {15, 16}, //ring 无名指 戒指指头
new[] {0, 17}, new[] {17, 18}, new[] {18, 19}, new[] {19, 20}, //small 小指
};
using var frame = Cv2.ImRead(sampleImage);//读取示例图片
using var frameCopy = frame.Clone();//备份示例图片用于绘制结果
int frameWidth = frame.Cols;
int frameHeight = frame.Rows;
float aspectRatio = frameWidth / (float) frameHeight;//宽高比率
int inHeight = 368;//处理后高度
int inWidth = ((int) (aspectRatio * inHeight) * 8) / 8;//处理后宽度
using var net = CvDnn.ReadNetFromCaffe(modelTxt, model);//读取网络模型
//BlobFromImage 对图像进行预处理,包括减均值,比例缩放,裁剪,交换通道等,返回一个4通道的blob(blob可以简单理解为一个N维的数组,用于神经网络的输入)
using var inpBlob = CvDnn.BlobFromImage(frame, 1.0 / 255, new Size(inWidth, inHeight),
new Scalar(0, 0, 0), false, false);
net.SetInput(inpBlob);//设置网络输入
using var output = net.Forward();//计算输出
int H = output.Size(2);//高
int W = output.Size(3);//宽
var points = new List();
for (int n = 0; n < nPoints; n++)
{
//对应身体部位的概率图。Probability map of corresponding body's part.
using var probMap = new Mat(H, W, MatType.CV_32F, output.Ptr(0, n));
Cv2.Resize(probMap, probMap, new Size(frameWidth, frameHeight));//缩放为原图尺寸
Cv2.MinMaxLoc(probMap, out _, out var maxVal, out _, out var maxLoc);//找到最大点索引和位置
if (maxVal > thresh)//置信度大于阈值
{ //绘制最大概率点
Cv2.Circle(frameCopy, maxLoc.X, maxLoc.Y, 8, new Scalar(0, 255, 255), -1,
LineTypes.Link8);
//点序号
Cv2.PutText(frameCopy, Cv2.Format(n), new OpenCvSharp.Point(maxLoc.X, maxLoc.Y),
HersheyFonts.HersheyComplex, 1, new Scalar(0, 0, 255), 2, LineTypes.AntiAlias);
}
points.Add(maxLoc);//添加到点集合
}
int nPairs = 20; //(POSE_PAIRS).Length / POSE_PAIRS[0].Length;
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point partA = points[posePairs[n][0]];
Point partB = points[posePairs[n][1]];
if (partA.X <= 0 || partA.Y <= 0 || partB.X <= 0 || partB.Y <= 0)
continue;
//连线相连部分
Cv2.Line(frame, partA, partB, new Scalar(0, 255, 255), 8);
Cv2.Circle(frame, partA.X, partA.Y, 8, new Scalar(0, 0, 255), -1);
Cv2.Circle(frame, partB.X, partB.Y, 8, new Scalar(0, 0, 255), -1);
}
var finalOutput = outputLoc;
Cv2.ImWrite(finalOutput, frame);
}
}
}
#7. 直方图Histogram 示例
using System;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// Histogram sample
/// http://book.mynavi.jp/support/pc/opencv2/c3/opencv_img.html
///
class HistSample : ConsoleTestBase
{
public override void RunTest()
{
using var src = Cv2.ImRead(ImagePath.Lenna, ImreadModes.Grayscale);
// 直方图视图 Histogram view
const int Width = 260, Height = 200;
using var render = new Mat(new Size(Width, Height), MatType.CV_8UC3, Scalar.All(255));//白色背景260x200
// 计算直方图Calculate histogram
var hist = new Mat();
int[] hdims = {256}; // 每个维度的直方图尺寸 Histogram size for each dimension
Rangef[] ranges = { new Rangef(0,256), }; // 范围 min/max
Cv2.CalcHist(
new Mat[]{src},
new int[]{0},
null,
hist, //直方图
1,
hdims, //统计256个像素的数量
ranges);//计算直方图
// 获取直方图最大值 Get the max value of histogram
Cv2.MinMaxLoc(hist, out _, out double maxVal);
var color = Scalar.All(100);//颜色
// 缩放和绘制直方图 Scales and draws histogram
hist = hist * (maxVal != 0 ? Height / maxVal : 0.0); //直方图归一化后 缩放到目标高度
for (int j = 0; j < hdims[0]; ++j)
{
int binW = (int)((double)Width / hdims[0]);//每个 矩形的宽度
render.Rectangle(
new Point(j * binW, render.Rows - (int)hist.Get(j)),
new Point((j + 1) * binW, render.Rows),
color,
-1);//绘制矩形
}
using (new Window("Image", src, WindowFlags.AutoSize | WindowFlags.FreeRatio))//显示原图像
using (new Window("Histogram", render, WindowFlags.AutoSize | WindowFlags.FreeRatio))//显示直方图
{
Cv2.WaitKey();
}
}
}
}
#8. Hog 示例
using System;
using System.Diagnostics;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// samples/c/peopledetect.c
///
internal class HOGSample : ConsoleTestBase
{
public HOGSample()
{
}
public override void RunTest()
{
using var img = Cv2.ImRead(ImagePath.Asahiyama, ImreadModes.Color);//读取示例图片
using var hog = new HOGDescriptor();
hog.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());//设置hog特征的支持向量机检测器
bool b = hog.CheckDetectorSize();//检测尺度
Console.WriteLine("CheckDetectorSize: {0}", b);
var watch = Stopwatch.StartNew();
// run the detector with default parameters. to get a higher hit-rate
// (and more false alarms, respectively), decrease the hitThreshold and
// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
//使用默认参数运行检测器。要获得更高的命中率(以及更多的误报),
//请降低 hitThreshold 和 groupThreshold(将 groupThreshold 设置为 0 以完全关闭分组)。
//Rect[] DetectMultiScale(Mat img, double hitThreshold = 0, Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2);
Rect[] found = hog.DetectMultiScale(img, 0, new Size(8, 8), new Size(24, 16), 1.05, 2);//多尺度检测
watch.Stop();
Console.WriteLine("Detection time = {0}ms", watch.ElapsedMilliseconds);//检测时间
Console.WriteLine("{0} region(s) found", found.Length);//找到多少区域
foreach (Rect rect in found)
{
// the HOG detector returns slightly larger rectangles than the real objects.
// so we slightly shrink the rectangles to get a nicer output.
//HOG 检测器返回比真实对象稍大的矩形。
// 所以我们稍微缩小矩形以获得更好的输出。
var r = new Rect
{
X = rect.X + (int)Math.Round(rect.Width * 0.1),
Y = rect.Y + (int)Math.Round(rect.Height * 0.1),
Width = (int)Math.Round(rect.Width * 0.8),
Height = (int)Math.Round(rect.Height * 0.8)
};
img.Rectangle(r.TopLeft, r.BottomRight, Scalar.Red, 3);//绘制矩形
}
using var window = new Window("people detector", img, WindowFlags.Normal);//显示检测结果
window.SetProperty(WindowPropertyFlags.Fullscreen, 1);
Cv2.WaitKey(0);
}
}
}
#9. 霍夫变换示例 /通过霍夫变换进行直线检测
using System;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// Hough Transform Sample / ハフ変換による直線検出
///
/// http://opencv.jp/sample/special_transforms.html#hough_line
class HoughLinesSample : ConsoleTestBase
{
public override void RunTest()
{
SampleCpp();
}
///
/// sample of new C++ style wrapper
///
private void SampleCpp()
{
// (1) 加载图像
using var imgGray = new Mat(ImagePath.Goryokaku, ImreadModes.Grayscale);//灰度图
using var imgStd = new Mat(ImagePath.Goryokaku, ImreadModes.Color);//标准直线检测结果图
using var imgProb = imgStd.Clone();//概率直线检测结果图
// Canny边缘检测 Preprocess
Cv2.Canny(imgGray, imgGray, 50, 200, 3, false);
// (3)运行标准霍夫变换 Run Standard Hough Transform
//HoughLines(InputArray image, double rho, double theta, int threshold, double srn = 0, double stn = 0);
//image 8 位、单通道、二进制源图像。图片可以通过函数修改
//rho 累加器的距离分辨率(以像素为单位)
//theta 累加器的角度分辨率(以弧度为单位)
//threshold 累加器阈值参数。仅返回获得足够票数的行(> 阈值)
//srn 对于多尺度霍夫变换,它是距离分辨率 rho 的除数。[默认为 0]
//stn 对于多尺度霍夫变换,它是距离分辨率 theta 的除数。[默认为 0]
// 线的输出向量。每条线由一个二元素向量 (rho, theta) 表示。rho 是距坐标原点 (0,0)(图像左上角)的距离,theta 是以弧度为单位的直线旋转角度
LineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);//使用标准霍夫变换在二值图像中查找线。
int limit = Math.Min(segStd.Length, 10);//最多绘制十条线
for (int i = 0; i < limit; i++)
{
// 绘制结果直线
float rho = segStd[i].Rho;
float theta = segStd[i].Theta;
double a = Math.Cos(theta);
double b = Math.Sin(theta);
double x0 = a * rho;//直线位置x
double y0 = b * rho;
Point pt1 = new Point { X = (int)Math.Round(x0 + 1000 * (-b)), Y = (int)Math.Round(y0 + 1000 * (a)) };//直线两端延长1000 获得端点
Point pt2 = new Point { X = (int)Math.Round(x0 - 1000 * (-b)), Y = (int)Math.Round(y0 - 1000 * (a)) };
imgStd.Line(pt1, pt2, Scalar.Red, 3, LineTypes.AntiAlias, 0);//绘制直线
}
// (4) 运行概率霍夫变换 Run Probabilistic Hough Transform
//LineSegmentPoint[] HoughLinesP(InputArray image, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0);
//rho 累加器的距离分辨率(以像素为单位)
//theta 累加器的角度分辨率(以弧度为单位)
//threshold 累加器阈值参数。仅返回获得足够票数的行(> 阈值)
//minLineLength。比这更短的线段将被拒绝。[默认为 0]
//maxLineGap 同一条线上的点之间连接它们的最大允许间隙。[默认为 0]
//输出line。每条线由一个 4 元素向量 (x1, y1, x2,y2) 表示
LineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
foreach (LineSegmentPoint s in segProb)
{
imgProb.Line(s.P1, s.P2, Scalar.Red, 3, LineTypes.AntiAlias, 0);//绘制直线
}
// (5) 显示结果 Show results
using (new Window("Hough_line_standard", imgStd, WindowFlags.AutoSize))
using (new Window("Hough_line_probabilistic", imgProb, WindowFlags.AutoSize))
{
Window.WaitKey(0);
}
}
}
}
#10. 修复,去水印 修复是图像插值。数字修复算法在图像插值,照片恢复,缩放和超分辨率等方面具有广泛的应用。
using System;
using System.IO;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// Inpainting 基本思路很简单:用邻近的像素替换那些坏标记,使其看起来像是邻居
///
/// http://opencv.jp/sample/special_transforms.html#inpaint
public class InpaintSample : ConsoleTestBase
{
public override void RunTest()
{
// cvInpaint
Console.WriteLine(
"Hot keys: \n" +
"\tESC - quit the program\n" +
"\tr - restore the original image恢复原始图片\n" +
"\ti or ENTER - run inpainting algorithm\n" +
"\t\t(before running it, paint something on the image)\n" +
"\ts - save the original image, mask image, original+mask image and inpainted image to desktop,将原始图像、蒙版图像、原始+蒙版图像和修复图像保存到桌面"
);
using var img0 = Cv2.ImRead(ImagePath.Fruits, ImreadModes.AnyDepth | ImreadModes.AnyColor);//原始图像
using var img = img0.Clone();//用作绘图的原始图片
using var inpaintMask = new Mat(img0.Size(), MatType.CV_8U, Scalar.Black); //蒙版
using var inpainted = img0.EmptyClone();
using var wImage = new Window("image", img);//
var prevPt = new Point(-1, -1);
//设置鼠标回调
wImage.SetMouseCallback((MouseEventTypes ev, int x, int y, MouseEventFlags flags, IntPtr userdata) =>
{
if (ev == MouseEventTypes.LButtonUp || (flags & MouseEventFlags.LButton) == 0)
{
prevPt = new Point(-1, -1);
}
else if (ev == MouseEventTypes.LButtonDown)
{
prevPt = new Point(x, y);
}
else if (ev == MouseEventTypes.MouseMove && (flags & MouseEventFlags.LButton) != 0)
{
Point pt = new Point(x, y);
if (prevPt.X < 0)
{
prevPt = pt;//第一个点
}
inpaintMask.Line(prevPt, pt, Scalar.White, 5, LineTypes.AntiAlias, 0);//蒙版上绘制直线
img.Line(prevPt, pt, Scalar.White, 5, LineTypes.AntiAlias, 0);//原图上绘制直线
prevPt = pt;//更新起点
wImage.ShowImage(img);//显示绘制的图片
}
});
Window wInpaint1 = null;
Window wInpaint2 = null;
try
{
for (; ; )
{
switch ((char)Window.WaitKey(0))
{
case (char)27: // exit
return;
case 'r': // 恢复原始图像restore original image
inpaintMask.SetTo(Scalar.Black);// 蒙版初始化为黑色
img0.CopyTo(img);//
wImage.ShowImage(img);//显示原始图片
break;
case 'i': // do Inpaint
case '\r':
//src:输入8位1通道或3通道图像。
//inpaintMask:修复掩码,8位1通道图像。非零像素表示需要修复的区域。
//dst:输出与src具有相同大小和类型的图像。
//inpaintRadius:算法考虑的每个点的圆形邻域的半径。
//flags:
//INPAINT_NS基于Navier-Stokes的方法
//Alexandru Telea的INPAINT_TELEA方法
Cv2.Inpaint(img, inpaintMask, inpainted, 3, InpaintMethod.Telea);
wInpaint1 ??= new Window("inpainted image (algorithm by Alexandru Telea)", WindowFlags.AutoSize);
wInpaint1.ShowImage(inpainted);
Cv2.Inpaint(img, inpaintMask, inpainted, 3, InpaintMethod.NS);
wInpaint2 ??= new Window("inpainted image (algorithm by Navier-Strokes)", WindowFlags.AutoSize);
wInpaint2.ShowImage(inpainted);
break;
case 's': //保存图片 save images
string desktop = Environment.GetFolderPath(Environment.SpecialFolder.Desktop);//桌面路径
img0.SaveImage(Path.Combine(desktop, "original.png"));
inpaintMask.SaveImage(Path.Combine(desktop, "mask.png"));//修复掩码
img.SaveImage(Path.Combine(desktop, "original+mask.png"));//原始图像+掩码
inpainted.SaveImage(Path.Combine(desktop, "inpainted.png"));//修复的图像
break;
}
}
}
finally
{
wInpaint1?.Dispose();
wInpaint2?.Dispose();
Window.DestroyAllWindows();
}
}
}
}
#11. 使用 KAZE 和 AKAZE 算法检索关键点。
using System;
using System.Diagnostics;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// Retrieves keypoints using the KAZE and AKAZE algorithm.
///
internal class KAZESample : ConsoleTestBase
{
public override void RunTest()
{
var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale);//灰度图
//创建 KAZE AKAZE
var kaze = KAZE.Create();
var akaze = AKAZE.Create();
//初始化描述子
var kazeDescriptors = new Mat();//
var akazeDescriptors = new Mat();
//初始化关键点 kazeKeyPoints akazeKeyPoints
KeyPoint[] kazeKeyPoints = null, akazeKeyPoints = null;
//检测和计算 关键点和描述子
var kazeTime = MeasureTime(() =>
kaze.DetectAndCompute(gray, null, out kazeKeyPoints, kazeDescriptors));
var akazeTime = MeasureTime(() =>
akaze.DetectAndCompute(gray, null, out akazeKeyPoints, akazeDescriptors));
//检测结果图像初始化 dstKaze dstAkaze
var dstKaze = new Mat();
var dstAkaze = new Mat();
//绘制关键点
Cv2.DrawKeypoints(gray, kazeKeyPoints, dstKaze);
Cv2.DrawKeypoints(gray, akazeKeyPoints, dstAkaze);
//显示检测结果图
using (new Window(String.Format("KAZE [{0:F2}ms]", kazeTime.TotalMilliseconds), dstKaze))
using (new Window(String.Format("AKAZE [{0:F2}ms]", akazeTime.TotalMilliseconds), dstAkaze))
{
Cv2.WaitKey();
}
}
//计算时间
private TimeSpan MeasureTime(Action action)
{
var watch = Stopwatch.StartNew();
action();
watch.Stop();
return watch.Elapsed;
}
}
}
#12. 关键点长度不匹配时的 单映射变换 矩阵 H计算
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// https://github.com/shimat/opencvsharp/issues/176
///关键点长度不匹配时的 FindHomography
///
class KAZESample2 : ConsoleTestBase
{
public static Point2d Point2fToPoint2d(Point2f pf)
{
return new Point2d(((int) pf.X), ((int) pf.Y));
}
public override void RunTest()
{
//加载两张图像
using var img1 = new Mat(ImagePath.SurfBox);
using var img2 = new Mat(ImagePath.SurfBoxinscene);
using var descriptors1 = new Mat();
using var descriptors2 = new Mat();
//发现两幅图片分别提取出来N,M个特征向量
//然后对N和M的特征向量进行匹配,找到最佳匹配
//然后再画出匹配的特征显示出来
using var matcher = new BFMatcher(NormTypes.L2SQR);//Brute Force(暴力法)opencv二维特征点匹配常见的办法,BFMatcher总是尝试所有可能的匹配,从而使得它总能够找到最佳匹配
using var kaze = KAZE.Create();
//计算KAZE关键点和描述子
kaze.DetectAndCompute(img1, null, out var keypoints1, descriptors1);
kaze.DetectAndCompute(img2, null, out var keypoints2, descriptors2);
//匹配描述子
DMatch[][] matches = matcher.KnnMatch(descriptors1, descriptors2, 2);
using Mat mask = new Mat(matches.Length, 1, MatType.CV_8U);//构造蒙版nx1
mask.SetTo(new Scalar(255));//白色背景
int nonZero = Cv2.CountNonZero(mask);//返回灰度值不为0的像素数,可用来判断图像是否全黑
VoteForUniqueness(matches, mask);//唯一性投票
nonZero = Cv2.CountNonZero(mask);//返回有唯一匹配点的像素数
nonZero = VoteForSizeAndOrientation(keypoints2, keypoints1, matches, mask, 1.5f, 20);//返回mask非零像素数
List obj = new List();//对象上的点
List scene = new List();//场景上的点
List goodMatchesList = new List();//好的匹配
//iterate through the mask only pulling out nonzero items because they're matches
//遍历掩码仅提取非零项,因为它们是匹配项
for (int i = 0; i < mask.Rows; i++)
{
MatIndexer maskIndexer = mask.GetGenericIndexer();
if (maskIndexer[i] > 0)
{
obj.Add(keypoints1[matches[i][0].QueryIdx].Pt);//对象上关键点
scene.Add(keypoints2[matches[i][0].TrainIdx].Pt);//场景上关键点
goodMatchesList.Add(matches[i][0]);//好的匹配
}
}
//double 关键点
List objPts = obj.ConvertAll(Point2fToPoint2d);
List scenePts = scene.ConvertAll(Point2fToPoint2d);
if (nonZero >= 4)//匹配点数4个以上
{ //https://blog.csdn.net/fengyeer20120/article/details/87798638
//计算多个二维点对之间的最优单映射变换矩阵 H(3行x3列) ,使用最小均方误差或者RANSAC方法
//函数功能:找到两个平面之间的转换矩阵。https://blog.csdn.net/xull88619814/article/details/81587595
Mat homography = Cv2.FindHomography(objPts, scenePts, HomographyMethods.Ransac, 1.5, mask);//
nonZero = Cv2.CountNonZero(mask);
if (homography != null)
{
Point2f[] objCorners = { new Point2f(0, 0),
new Point2f(img1.Cols, 0),
new Point2f(img1.Cols, img1.Rows),
new Point2f(0, img1.Rows) };//对象四个角点
Point2d[] sceneCorners = MyPerspectiveTransform3(objCorners, homography);//场景的四个角点:对象四个角点通过坐标变换计算得到
//This is a good concat horizontal 这是一个很好的水平拼接
using Mat img3 = new Mat(Math.Max(img1.Height, img2.Height), img2.Width + img1.Width, MatType.CV_8UC3);
using Mat left = new Mat(img3, new Rect(0, 0, img1.Width, img1.Height));
using Mat right = new Mat(img3, new Rect(img1.Width, 0, img2.Width, img2.Height));
img1.CopyTo(left);
img2.CopyTo(right);
//
mask.GetArray(out byte[] maskBytes);
//绘制匹配点对
Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, goodMatchesList, img3, Scalar.All(-1), Scalar.All(-1), maskBytes, DrawMatchesFlags.NotDrawSinglePoints);
List> listOfListOfPoint2D = new List>();//
List listOfPoint2D = new List
{
new Point(sceneCorners[0].X + img1.Cols, sceneCorners[0].Y),
new Point(sceneCorners[1].X + img1.Cols, sceneCorners[1].Y),
new Point(sceneCorners[2].X + img1.Cols, sceneCorners[2].Y),
new Point(sceneCorners[3].X + img1.Cols, sceneCorners[3].Y)
};//平移后的场景四个角点
listOfListOfPoint2D.Add(listOfPoint2D);
img3.Polylines(listOfListOfPoint2D, true, Scalar.LimeGreen, 2);//绘制场景四个角点多边形
//This works too
//Cv2.Line(img3, scene_corners[0] + new Point2d(img1.Cols, 0), scene_corners[1] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
//Cv2.Line(img3, scene_corners[1] + new Point2d(img1.Cols, 0), scene_corners[2] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
//Cv2.Line(img3, scene_corners[2] + new Point2d(img1.Cols, 0), scene_corners[3] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
//Cv2.Line(img3, scene_corners[3] + new Point2d(img1.Cols, 0), scene_corners[0] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
img3.SaveImage("Kaze_Output.png");//保存结果
Window.ShowImages(img3);//显示结果
}
}
}
//未使用 to avoid opencvsharp's bug
static Point2d[] MyPerspectiveTransform1(Point2f[] yourData, Mat transformationMatrix)
{
using Mat src = new Mat(yourData.Length, 1, MatType.CV_32FC2, yourData);
using Mat dst = new Mat();
Cv2.PerspectiveTransform(src, dst, transformationMatrix);
dst.GetArray(out Point2f[] dstArray);
Point2d[] result = Array.ConvertAll(dstArray, Point2fToPoint2d);
return result;
}
//未使用 fixed FromArray behavior
static Point2d[] MyPerspectiveTransform2(Point2f[] yourData, Mat transformationMatrix)
{
using var s = Mat.FromArray(yourData);
using var d = new Mat();
Cv2.PerspectiveTransform(s, d, transformationMatrix);
Point2f[] f = d.ToArray();
return f.Select(Point2fToPoint2d).ToArray();
}
// new API
static Point2d[] MyPerspectiveTransform3(Point2f[] yourData, Mat transformationMatrix)
{
Point2f[] ret = Cv2.PerspectiveTransform(yourData, transformationMatrix);
return ret.Select(Point2fToPoint2d).ToArray();
}
// 有唯一匹配点的mask 为白色255
static int VoteForSizeAndOrientation(KeyPoint[] modelKeyPoints, KeyPoint[] observedKeyPoints, DMatch[][] matches, Mat mask, float scaleIncrement, int rotationBins)
{
int idx = 0;
int nonZeroCount = 0;
byte[] maskMat = new byte[mask.Rows];
GCHandle maskHandle = GCHandle.Alloc(maskMat, GCHandleType.Pinned);
using (Mat m = new Mat(mask.Rows, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
{
mask.CopyTo(m);///
List logScale = new List();//log比例 列表
List rotations = new List();//旋转角度 列表
double s, maxS, minS, r;
maxS = -1.0e-10f; minS = 1.0e10f;//
//if you get an exception here, it's because you're passing in the model and observed keypoints backwards. Just switch the order.
//如果您在这里遇到异常,那是因为您正在传入模型并向后观察关键点。换个顺序就行了。
for (int i = 0; i < maskMat.Length; i++)//遍历有唯一匹配点的 匹配项
{
if (maskMat[i] > 0)
{
KeyPoint observedKeyPoint = observedKeyPoints[i];// 观察图像上匹配点
KeyPoint modelKeyPoint = modelKeyPoints[matches[i][0].TrainIdx];//模型上的关键点
s = Math.Log10(observedKeyPoint.Size / modelKeyPoint.Size);//
logScale.Add((float)s);
maxS = s > maxS ? s : maxS;
minS = s < minS ? s : minS;
r = observedKeyPoint.Angle - modelKeyPoint.Angle;
r = r < 0.0f ? r + 360.0f : r;
rotations.Add((float)r);
}
}
int scaleBinSize = (int)Math.Ceiling((maxS - minS) / Math.Log10(scaleIncrement));
if (scaleBinSize < 2)
scaleBinSize = 2;
float[] scaleRanges = { (float)minS, (float)(minS + scaleBinSize + Math.Log10(scaleIncrement)) };
using var scalesMat = new Mat(rows: logScale.Count, cols: 1, data: logScale.ToArray());
using var rotationsMat = new Mat(rows: rotations.Count, cols: 1, data: rotations.ToArray());
using var flagsMat = new Mat(logScale.Count, 1);
using Mat hist = new Mat();
flagsMat.SetTo(new Scalar(0.0f));
float[] flagsMatFloat1 = flagsMat.ToArray();
int[] histSize = { scaleBinSize, rotationBins };
float[] rotationRanges = { 0.0f, 360.0f };
int[] channels = { 0, 1 };
Rangef[] ranges = { new Rangef(scaleRanges[0], scaleRanges[1]), new Rangef(rotations.Min(), rotations.Max()) };
Mat[] arrs = { scalesMat, rotationsMat };
Cv2.CalcHist(arrs, channels, null, hist, 2, histSize, ranges);
Cv2.MinMaxLoc(hist, out double minVal, out double maxVal);
Cv2.Threshold(hist, hist, maxVal * 0.5, 0, ThresholdTypes.Tozero);
Cv2.CalcBackProject(arrs, channels, hist, flagsMat, ranges);
MatIndexer flagsMatIndexer = flagsMat.GetIndexer();
for (int i = 0; i < maskMat.Length; i++)
{
if (maskMat[i] > 0)
{
if (flagsMatIndexer[idx++] != 0.0f)
{
nonZeroCount++;
}
else
maskMat[i] = 0;
}
}
m.CopyTo(mask);/
}
maskHandle.Free();
return nonZeroCount;
}
//为唯一性投票
private static void VoteForUniqueness(DMatch[][] matches, Mat mask, float uniqnessThreshold = 0.80f)
{
byte[] maskData = new byte[matches.Length];
GCHandle maskHandle = GCHandle.Alloc(maskData, GCHandleType.Pinned);//该方法创建托管对象的句柄,从而阻止收集托管对象
using (Mat m = new Mat(matches.Length, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
{
mask.CopyTo(m);
for (int i = 0; i < matches.Length; i++)
{
//这也称为 NNDR 最近邻距离比 This is also known as NNDR Nearest Neighbor Distance Ratio
if ((matches[i][0].Distance / matches[i][1].Distance) <= uniqnessThreshold)
maskData[i] = 255;//:白色 有唯一匹配点
else
maskData[i] = 0; //没有唯一匹配点
}
m.CopyTo(mask);
}
maskHandle.Free();
}
}
}
#13. Mat 子矩阵操作
using System;
using System.Threading.Tasks;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
///
///
class MatOperations : ConsoleTestBase
{
public override void RunTest()
{
SubMat();
RowColRangeOperation();
RowColOperation();
}
///
/// 子矩阵运算 Submatrix operations
///
private void SubMat()
{
using var src = Cv2.ImRead(ImagePath.Lenna);//读取源图像
// Assign small image to mat
using var small = new Mat();
Cv2.Resize(src, small, new Size(100, 100));//缩放源图像
src[10, 110, 10, 110] = small;//缩放的小图像 替换源图像部分
src[370, 470, 400, 500] = small.T(); //缩放的小图像旋转后 替换源图像部分
// ↑这与以下相同:
//small.T().CopyTo(src[370, 470, 400, 500]);
//获取部分mat(类似于cvSetImageROI) Get partial mat (similar to cvSetImageROI)
Mat part = src[200, 400, 200, 360];
// 反转部分像素值Invert partial pixel values
Cv2.BitwiseNot(part, part);
// 用颜色 (128, 0, 0) 填充区域 (50..100, 100..150)
part = src.SubMat(50, 100, 400, 450);
part.SetTo(128);
using (new Window("SubMat", src))//显示结果
{
Cv2.WaitKey();
}
part.Dispose();
}
///
/// Submatrix operations
///
private void RowColRangeOperation()
{
using var src = Cv2.ImRead(ImagePath.Lenna);//源图像
Cv2.GaussianBlur(
src.RowRange(100, 200),
src.RowRange(200, 300),
new Size(7, 7), 20);//子矩阵高斯滤波
Cv2.GaussianBlur(
src.ColRange(200, 300),
src.ColRange(100, 200),
new Size(7, 7), 20);
using (new Window("RowColRangeOperation", src))
{
Cv2.WaitKey();
}
}
///
/// Submatrix expression operations
///
private void RowColOperation()
{
using var src = Cv2.ImRead(ImagePath.Lenna);//读取源图像
var rand = new Random();
//随机交换两行像素
for (int i = 0; i < 200; i++)//
{
int c1 = rand.Next(100, 400);
int c2 = rand.Next(100, 400);
using Mat temp = src.Row(c1).Clone();//随机一行
src.Row(c2).CopyTo(src.Row(c1));
temp.CopyTo(src.Row(c2));
}
((Mat)~src.ColRange(450, 500)).CopyTo(src.ColRange(0, 50));//复制指定区域图像
src.RowRange(450, 460).SetTo(new Scalar(0, 0, 255)); //设置指定区域图像为 红色
using (new Window("RowColOperation", src))
{
Cv2.WaitKey();
}
}
}
}
#14. 多次元尺度構成法
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
/// Multidimensional Scaling (多次元尺度構成法)
/// for C++ cv::Mat testing
///
class MDS : ConsoleTestBase
{
///
///美国10个城市之间的距离
///
///
/// *亚特兰大和芝加哥之间的直线距离为 587 公里。The linear distance between Atlanta and Chicago is 587km.
///
static readonly double[,] CityDistance =
{
/*Atlanta*/ {0, 587, 1212, 701, 1936, 604, 748, 2139, 2182, 543},
/*Chicago*/ {587, 0, 920, 940, 1745, 1188, 713, 1858, 1737, 597},
/*Denver*/ {1212, 920, 0, 879, 831, 1726, 1631, 949, 1021, 1494},
/*Houston*/ {701, 940, 879, 0, 1734, 968, 1420, 1645, 1891, 1220},
/*Los Angeles*/ {1936, 1745, 831, 1734, 0, 2339, 2451, 347, 959, 2300},
/*Miami*/ {604, 1188, 1726, 968, 2339, 0, 1092, 2594, 2734, 923},
/*New York*/ {748, 713, 1631, 1420, 2451, 1092, 0, 2571, 2408, 205},
/*San Francisco*/ {2139, 1858, 949, 1645, 347, 2594, 2571, 0, 678, 2442},
/*Seattle*/ {2182, 1737, 1021, 1891, 959, 2734, 2408, 678, 0, 2329},
/*Washington D.C.*/ {543, 597, 1494, 1220, 2300, 923, 205, 2442, 2329, 0}
};
///
/// 城市名
///
static readonly string[] CityNames =
{
"Atlanta","Chicago","Denver","Houston","Los Angeles","Miami","New York","San Francisco","Seattle","Washington D.C."
};
///
/// 经典多维缩放 Classical Multidimensional Scaling
///
public override void RunTest()
{
// 创建距离矩阵
int size = CityDistance.GetLength(0);
var t = new Mat(size, size, MatType.CV_64FC1, CityDistance);
//将 Torgerson 的加性常数添加到 t adds Torgerson's additive constant to t
double torgarson = Torgerson(t);
t += torgarson;
//对 t 的所有元素求平方 squares all elements of t
t = t.Mul(t);
//居中矩阵 G centering matrix G
using var g = CenteringMatrix(size);
// 计算内积矩阵 Bcalculates inner product matrix B
using var b = g * t * g.T() * -0.5;
//计算 B 的特征值和特征向量 calculates eigenvalues and eigenvectors of B
using var values = new Mat();
using var vectors = new Mat();
Cv2.Eigen(b, values, vectors);///计算 B 的特征值和特征向量
for (int r = 0; r < values.Rows; r++)
{
if (values.Get(r) < 0)
values.Set(r, 0);
}
//Console.WriteLine(values.Dump());
//将 sqrt(eigenvalue) 乘以特征向量 multiplies sqrt(eigenvalue) by eigenvector
using var result = vectors.RowRange(0, 2);
{
var at = result.GetGenericIndexer();
for (int r = 0; r < result.Rows; r++)
{
for (int c = 0; c < result.Cols; c++)
{
at[r, c] *= Math.Sqrt(values.Get(r));
}
}
}
//归一化 scaling
Cv2.Normalize(result, result, 0, 800, NormTypes.MinMax);
// opens a window
using (Mat img = Mat.Zeros(600, 800, MatType.CV_8UC3))
using (var window = new Window("City Location Estimation"))
{
var at = result.GetGenericIndexer();
for (int c = 0; c < size; c++)
{
double x = at[0, c];
double y = at[1, c];
x = x * 0.7 + img.Width * 0.1;
y = y * 0.7 + img.Height * 0.1;
img.Circle((int)x, (int)y, 5, Scalar.Red, -1);
Point textPos = new Point(x + 5, y + 10);
img.PutText(CityNames[c], textPos, HersheyFonts.HersheySimplex, 0.5, Scalar.White);
}
window.Image = img;
Cv2.WaitKey();
}
}
///
/// 返回 Torgerson 的加法常数Returns Torgerson's additive constant
///
///
///
private double Torgerson(Mat mat)
{
if (mat == null)
throw new ArgumentNullException();
if (mat.Rows != mat.Cols) //矩阵为方阵
throw new ArgumentException();
int n = mat.Rows;
//负值时的 加法常数 Additive constant in case of negative value
Cv2.MinMaxLoc(-mat, out _, out double max);
double c2 = max;
//三角不等式的 加法常数 Additive constant from triangular inequality
double c1 = 0;
var at = mat.GetGenericIndexer();//获取特定于类型的索引器。索引器具有获取器/设置器来访问每个矩阵元素。
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
for (int k = 0; k < n; k++)
{
double v = at[i, k] - at[i, j] - at[j, k];
if (v > c1)
{
c1 = v;
}
}
}
}
return Math.Max(Math.Max(c1, c2), 0);
}
///
/// Returns centering matrix
///
/// Size of matrix
///
private Mat CenteringMatrix(int n)
{
using var eye = Mat.Eye(n, n, MatType.CV_64FC1) ;
return (eye - 1.0 / n);
}
}
}
#15. 通道拆分/合并 测试
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
class MergeSplitSample : ConsoleTestBase
{
public override void RunTest()
{
// Split/Merge Test
{
using var src = new Mat(ImagePath.Lenna, ImreadModes.Color);//源图像
// Split each plane
Cv2.Split(src, out var planes);//分离通道
Cv2.ImShow("planes 0", planes[0]);
Cv2.ImShow("planes 1", planes[1]);
Cv2.ImShow("planes 2", planes[2]);
Cv2.WaitKey();
Cv2.DestroyAllWindows();
// Invert G plane
Cv2.BitwiseNot(planes[1], planes[1]);//G通道取反
// Merge
using var merged = new Mat();
Cv2.Merge(planes, merged);//混合通道
Cv2.ImShow("src", src);
Cv2.ImShow("merged", merged);
Cv2.WaitKey();
Cv2.DestroyAllWindows();
}
// 混合通道测试 MixChannels Test
{
using var rgba = new Mat(300, 300, MatType.CV_8UC4, new Scalar(50, 100, 150, 200));//纯色图
using var bgr = new Mat(rgba.Rows, rgba.Cols, MatType.CV_8UC3);
using var alpha = new Mat(rgba.Rows, rgba.Cols, MatType.CV_8UC1);
Mat[] input = { rgba };//4通道
Mat[] output = { bgr, alpha };//4通道
// rgba[0] -> bgr[2], rgba[1] -> bgr[1],
// rgba[2] -> bgr[0], rgba[3] -> alpha[0]
int[] fromTo = { 0, 2, 1, 1, 2, 0, 3, 3 };
Cv2.MixChannels(input, output, fromTo); //混合通道
Cv2.ImShow("rgba", rgba);
Cv2.ImShow("bgr", bgr);
Cv2.ImShow("alpha", alpha);
Cv2.WaitKey();
Cv2.DestroyAllWindows();
}
}
}
}
#16.
using OpenCvSharp;
using SampleBase;
using System.Threading.Tasks;
namespace SamplesCore
{
class MorphologySample : ConsoleTestBase
{
public override void RunTest()
{
using var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale);//灰度图
using var binary = new Mat();//二值图
using var dilate1 = new Mat(); //膨胀图1
using var dilate2 = new Mat();
byte[] kernelValues = {0, 1, 0, 1, 1, 1, 0, 1, 0}; // 内核值 cross (+)
using var kernel = new Mat(3, 3, MatType.CV_8UC1, kernelValues);//卷积核
// 灰度图二值化 Binarize
Cv2.Threshold(gray, binary, 0, 255, ThresholdTypes.Otsu);
// empty kernel
Cv2.Dilate(binary, dilate1, null);//空内核
// + kernel
Cv2.Dilate(binary, dilate2, kernel);//膨胀
Cv2.ImShow("binary", binary);
Cv2.ImShow("dilate (kernel = null)", dilate1);
Cv2.ImShow("dilate (kernel = +)", dilate2);
Cv2.WaitKey(0);
Cv2.DestroyAllWindows();
}
}
}
#17. 最大极值稳定区域 最大极值稳定区域,是一种类似分水岭图像的分割与匹配算法。它具有SIFT SURF及 ORB等特征不具备的仿射不变性,近年来广泛应用于图像分割与匹配领域。
//https://blog.csdn.net/hust_bochu_xuchao/article/details/52230694
//https://blog.csdn.net/qq_41685265/article/details/104096152
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
///
///最大稳定极值区域 Maximally Stable Extremal Regions
///
class MSERSample : ConsoleTestBase
{
public override void RunTest()
{
using Mat src = new Mat(ImagePath.Distortion, ImreadModes.Color);//源图像
using Mat gray = new Mat();
using Mat dst = src.Clone();
Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY);//灰度图
CppStyleMSER(gray, dst); // C++ style
using (new Window("MSER src", src))
using (new Window("MSER gray", gray))
using (new Window("MSER dst", dst))
{
Cv2.WaitKey();
}
}
///
/// Extracts MSER by C++-style code (cv::MSER)
///
///
///
private void CppStyleMSER(Mat gray, Mat dst)
{
MSER mser = MSER.Create();
mser.DetectRegions(gray, out Point[][] contours, out _);//提取特征区域 C++指定被检测区域的最小和最大尺寸,以便限制被检测特征的数量
foreach (Point[] pts in contours)
{
Scalar color = Scalar.RandomColor();//随机颜色
foreach (Point p in pts)
{
dst.Circle(p, 1, color);//绘制轮廓点
}
}
}
}
}
#17.
using System;
using System.Collections.Generic;
using OpenCvSharp;
using SampleBase;
namespace SamplesCore
{
class NormalArrayOperations : ConsoleTestBase
{
public override void RunTest()
{
Threshold1();
Threshold2();
Threshold3();
}
///
/// 对字节数组运行阈值 Run thresholding to byte array
///
private void Threshold1()
{
const int T = 3;//阈值
const int Max = 5;//最大值
byte[] input = {1, 2, 3, 4, 5, };
var output = new List();
Cv2.Threshold(InputArray.Create(input), OutputArray.Create(output),
T, Max, ThresholdTypes.Binary);//字节数组 阈值处理
Console.WriteLine("Threshold: {0}", T);
Console.WriteLine("input: {0}", string.Join(",", input));
Console.WriteLine("output: {0}", string.Join(",", output));
}
///
/// Run thresholding to short array
///
private void Threshold2()
{
const int T = 150;
const int Max = 250;
short[] input = { 50, 100, 150, 200, 250, };
var output = new List();
Cv2.Threshold(InputArray.Create(input), OutputArray.Create(output),
T, Max, ThresholdTypes.Binary);
Console.WriteLine("Threshold: {0}", T);
Console.WriteLine("input: {0}", string.Join(",", input));
Console.WriteLine("output: {0}", string.Join(",", output));
}
///
/// Run thresholding to struct array
///
private void Threshold3()
{
const double T = 2000;
const double Max = 5000;
// threshold does not support Point (int) 对结构数组运行阈值处理
Point2f[] input = {
new Point2f(1000, 1500),
new Point2f(2000, 2001),
new Point2f(500, 5000),
};
var output = new List();
Cv2.Threshold(InputArray.Create(input), OutputArray.Create(output),
T, Max, ThresholdTypes.Binary);
Console.WriteLine("Threshold: {0}", T);
Console.WriteLine("input: {0}", string.Join(",", input));
Console.WriteLine("output: {0}", string.Join(",", output));
}
}
}
#18.OpenVino 深度学习 https://zhuanlan.zhihu.com/p/91882515
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;
namespace SamplesCore
{
///
/// To run this example first you nedd to compile OPENCV with Intel OpenVino
/// Download the face detection model available here: https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001
/// Add the files to the bin folder
///要首先运行此示例,您需要使用英特尔 OpenVino 编译 OPENCV。在此处下载人脸检测模型:https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001。将文件添加到 bin 文件夹
///
internal class OpenVinoFaceDetection : ConsoleTestBase
{
const string modelFace = "face-detection-adas-0001.bin"; // 权重文件
const string modelFaceTxt = "face-detection-adas-0001.xml";// 网络结构的描述
const string sampleImage = "sample.jpg";
const string outputLoc = "sample_output.jpg";
public override void RunTest()
{
using var frame = Cv2.ImRead(sampleImage);//待检测图
int frameHeight = frame.Rows;
int frameWidth = frame.Cols;
using var netFace = CvDn