分享

如何用CSharpOpenCv集成Yolov3的模型

 行走在理想边缘 2022-06-24 发布于四川

目录

第一步,将训练好的.h5文件转给.weight文件

第二步,修改cfg文件

第三步、将weight文件、类别文件和cfg文件复制到C#程序的bin文件夹里

第四步、下载CSharpOpenCv

第四步、编写C#代码


第一步,将训练好的.h5文件转给.weight文件

      模型训练时采用TensorFlow + Keras框架,其参数模型输出为.h5文件,其中包含了yolo训练结果中的卷积层和池化层参数。
     OpenCV库,其中深度神经网络(dnn)模块采用了DarkNet框架,该框架导入的模型为.weights文件,并依赖于.cfg文件所定义的架构。所以,如果想将前面的框架训练的模型用于OpenCV中,就需要yolo.h5文件转换为yolo.weights文件。执行如下代码:


      from tensorflow.keras.models import load_model
      import configparser
      import io
      from collections import defaultdict
      import numpy as np
      from yolo import YOLO
      def unique_config_sections(config_file):
         """Convert all config sections to have unique names.
       Adds unique suffixes to config sections for compability with configparser.
       """
          section_counters = defaultdict(int)
          output_stream = io.StringIO()
         with open(config_file) as fin:
             for line in fin:
                 if line.startswith('['):
                      section = line.strip().strip('[]')
                      _section = section + '_' + str(section_counters[section])
                      section_counters[section] += 1
                      line = line.replace(section, _section)
                  output_stream.write(line)
          output_stream.seek(0)
         return output_stream
      def _main():
          unique_config_file = unique_config_sections(config_path)
          cfg_parser = configparser.ConfigParser()
          cfg_parser.read_file(unique_config_file)
         # major, minor, revision=[0,2,0] seen=32013312
          m_revision=[0,2,0]
          seen=[32013312]
         # convert to bytes
          m_revision_const = np.array(m_revision,dtype=np.int32)
          m_revision_bytes=m_revision_const.tobytes()
          seen_const=np.array(seen,dtype=np.int64)
          seen_bytes=seen_const.tobytes()
         print('write revision information\n')
          weight_file.write(m_revision_bytes)
          weight_file.write(seen_bytes)
         # conv2d and batch_normalize layers
          b=0
         print('start write weights\n')
         for section in cfg_parser.sections():
             #print('Parsing section {}'.format(section))
             if section.startswith('convolutional'):
                 # get 'convolutional_'
                  num = int(section.split('_')[-1])+1
                 # get 'batch_normalize'
                  batch_normalize = 'batch_normalize' in cfg_parser[section]
                 # if batch_normalize write it three times and activation='leaky'
                 if batch_normalize:
                     # from batch_normalization layer extract bn_weight_list
                      batch_weight_name = 'batch_normalization_' + str(num-b)
                      bn_weight_list_layer=model.get_layer(batch_weight_name)
                      bn_weight_list =bn_weight_list_layer.get_weights()
                     # from bn_weight_list extract bn_weight and con_bias
                      conv_bias = bn_weight_list[1]
                      bn_weight = [bn_weight_list[0], bn_weight_list[2], bn_weight_list[3]]
                     # from conv2d layer extract conv_weight
                      conv2d_weight_name = 'conv2d_' + str(num)
                     # print conv2d_weight_name
                     print(conv2d_weight_name,'\n')
                     print(batch_weight_name, '\n')
                      conv2d_weight_name_layer=model.get_layer(conv2d_weight_name)
                     # list[ndarray]
                      conv_weight = conv2d_weight_name_layer.get_weights()
                      conv_weight=conv_weight[0]
                      conv_weight = np.transpose(conv_weight, [3, 2, 0, 1])
                      bias_weight = np.array(conv_bias,dtype=np.float32)
                      bytes_bias_weight=bias_weight.tobytes()
                      weight_file.write(bytes_bias_weight)
                     print(bias_weight.shape,'\n')
                     # convert bn_weight to bytes then write to file
                      bn_weight_array=np.array(bn_weight,dtype=np.float32)
                      bytes_bn_weight=bn_weight_array.tobytes()
                      weight_file.write(bytes_bn_weight)
                     print(bn_weight_array.shape,'\n')
                      conv_weight_array=np.array(conv_weight,dtype=np.float32)
                      bytes_conv_weight=conv_weight_array.tobytes()
                      weight_file.write(bytes_conv_weight)
                     print(conv_weight_array.shape,'\n')
                 # not existence batch_normalize layers, write it two times
                 else:
                     # b is disorder parameter
                      b+=1
                     # from conv2d layer extract conv_weight(include conv_bias)
                     print('\n')
                      conv2d_weight_name = 'conv2d_' + str(num)
                     print('disorder',conv2d_weight_name,'\n\n')
                      conv2d_weight_name_layer = model.get_layer(conv2d_weight_name)
                      conv_weights =conv2d_weight_name_layer.get_weights()
                     # extract conv_bias conv2d_weight
                      conv_bias = conv_weights[-1]
                      conv_weight = conv_weights[0]
                      conv_weight=np.array(conv_weight)
                     # transpose
                      conv_weight = np.transpose(conv_weight, [3, 2, 0, 1])
                     # write the file with order conv_bias、conv2d_weight
                     # conv_bias convert to bytes
                      bias_weight = np.array(conv_bias,dtype=np.float32)
                      bytes_bias_weight = bias_weight.tobytes()
                      weight_file.write(bytes_bias_weight)
                     print(bias_weight.shape)
                     # conv_weight convert to bytes
                      conv_weight_array = np.array(conv_weight,dtype=np.float32)
                      bytes_conv_weight = conv_weight_array.tobytes()
                      weight_file.write(bytes_conv_weight)
                     # pritn the shape
                     print(conv_weight_array.shape)
          weight_file.close()
         print("convert success!\n")
      if __name__ == '__main__':
          model_path = "weight/yolov3.h5"     # keras yolov3 h5 model file
          config_path = 'yolov3.cfg'                   # .cfg file path
          weight_file = open('yolov3.weights', 'wb')   # save darknet yolov3 weights file path
         """
       The default keras yolov3 (https://github.com/qqwweee/keras-yolo3/blob/master/train.py)
       after trained save with method " model.save_weights(log_dir + 'trained_weights_final.h5')"
       it actually only saved weights, below call YOLO(modelpath) will check it's model,
       if it without model information, then automatic load model.
       """
          yoloobj = YOLO()
          model = yoloobj.yolo_model
          _main()
  
 

执行完成后就可以得到yolov3.weights文件。

第二步,修改cfg文件

打开yolov3.cfg,在里面查找 yolo(注意有3处),需要修改的地方处:
1、filters = 3 * ( 5 + classes)
2、classes = n (这个是你要训练的类的数量)

3,anchors改为训练时,聚类得到的结果。
例如:本文这里有3个类,所以filters=21,classes=2。

第三步、将weight文件、类别文件和cfg文件复制到C#程序的bin文件夹里

 新建WinForm程序,在bin/Debug下新建yolov3文件夹,然后把weight文件、类别文件、cfg文件放进去

第四步、下载CSharpOpenCv

下载地址:https://github.com/shimat/opencvsharp/releases?after=4.5.1.20201226

我用的版本是4.1.0版本。

解压后,将OpenCvSharp.Blob.dll、OpenCvSharp.dll、OpenCvSharp.Extensions.dll、OpenCvSharpExtern.dll复制到Debug文件,然后引用到项目里面。

第四步、编写C#代码

先看页面

代码:


      using System;
      using System.Collections.Generic;
      using System.ComponentModel;
      using System.Data;
      using System.Drawing;
      using System.Linq;
      using System.Text;
      using System.Threading.Tasks;
      using System.Windows.Forms;
      using System.Threading;
      using System.IO;
      using OpenCvSharp;
      using OpenCvSharp.Dnn;
      using OpenCvSharp.Extensions;
      namespace yolov4_detect
      {
         public partial class Form1 : Form
          {
             static string Cfg = "../../bin/Debug/yolov3/yolov3.cfg";
             static string Weight = "../../bin/Debug/yolov3/yolov3.weights";
             static string Names = "../../bin/Debug/yolov3/coco_classes.txt";
             string[] Labels = File.ReadAllLines(Names).ToArray();
              Scalar[] Colors = Enumerable.Repeat(false, 2).Select(x => Scalar.RandomColor()).ToArray();
              Net net;
             public Form1()
              {
                  InitializeComponent();
                  net = CvDnn.ReadNetFromDarknet(Cfg, Weight);
              }
             private Mat imagein;
             private Mat imageout;
             private void Form1_Load(object sender, EventArgs e)
              {
                  imagein = new Mat();
                  imageout = new Mat();
                  imagein = Cv2.ImRead(@"yolov3/aircraft_8.jpg");
                  pictureBox2.Image = imagein.ToBitmap();
              }
             private void button1_Click(object sender, EventArgs e)
              {
                  imageout = yolov3_model(imagein);
                  pictureBox1.Image = imageout.ToBitmap();
              }
             private Mat yolov3_model(Mat imgSrc)
              {
                  Mat org = new Mat();
                  org = imgSrc;
                 const float threshold = 0.5f;       //for confidence 
                 const float nmsThreshold = 0.3f;    //threshold for nms
                 var blob = CvDnn.BlobFromImage(org, 1.0 / 255, new OpenCvSharp.Size(416, 416), new Scalar(), true, false);
                  net.SetInput(blob);
                 var outNames = net.GetUnconnectedOutLayersNames();
                 var outs = outNames.Select(_ => new Mat()).ToArray();
                  net.Forward(outs, outNames);
                  org = GetResult(outs, org, threshold, nmsThreshold);
                 return org;
              }
             private Mat GetResult(IEnumerable<Mat> output, Mat image, float threshold, float nmsThreshold, bool nms = true)
              {
                 //for nms
                 var classIds = new List<int>();
                 var confidences = new List<float>();
                 var probabilities = new List<float>();
                 var boxes = new List<Rect2d>();
                 var w = image.Width;
                 var h = image.Height;
                 /*
       YOLO3 COCO trainval output
       0 1 : center 2 3 : w/h
       4 : confidence 5 ~ 84 : class probability
       */
                 const int prefix = 5;   //skip 0~4
                 foreach (var prob in output)
                  {
                     for (var i = 0; i < prob.Rows; i++)
                      {
                         var confidence = prob.At<float>(i, 4);
                         if (confidence > threshold)
                          {
                             //get classes probability
                             double maxVal, minVal;
                              OpenCvSharp.Point min, max;
                              Cv2.MinMaxLoc(prob.Row[i].ColRange(prefix, prob.Cols), out minVal, out maxVal, out min, out max);
                             var classes = max.X;
                             var probability = prob.At<float>(i, classes + prefix);
                             if (probability > threshold) //more accuracy, you can cancel it
                              {
                                 //get center and width/height
                                 var centerX = prob.At<float>(i, 0) * w;
                                 var centerY = prob.At<float>(i, 1) * h;
                                 var width = prob.At<float>(i, 2) * w;
                                 var height = prob.At<float>(i, 3) * h;
                                 if (!nms)
                                  {
                                     // draw result (if don't use NMSBoxes)
                                      Draw(image, classes, confidence, probability, centerX, centerY, width, height);
                                     continue;
                                  }
                                 //put data to list for NMSBoxes
                                  classIds.Add(classes);
                                  confidences.Add(confidence);
                                  probabilities.Add(probability);
                                  boxes.Add(new Rect2d(centerX, centerY, width, height));
                              }
                          }
                      }
                  }
                 if (!nms) return null;
                 //using non-maximum suppression to reduce overlapping low confidence box
                 int[] indices;
                  CvDnn.NMSBoxes(boxes, confidences, threshold, nmsThreshold, out indices);
                 foreach (var i in indices)
                  {
                     var box = boxes[i];
                      Draw(image, classIds[i], confidences[i], probabilities[i], box.X, box.Y, box.Width, box.Height);
                  }
                 return image;
              }
             private void Draw(Mat image, int classes, float confidence, float probability, double centerX, double centerY, double width, double height)
              {
                 //var label = $"{Labels[classes]} {probability * 100:0.00}%";
                 var label = Labels[classes] + "_" + probability.ToString("f2");
                 var x1 = (centerX - width / 2) < 0 ? 0 : centerX - width / 2; //avoid left side over edge
                 //draw result
                  image.Rectangle(new OpenCvSharp.Point(x1, centerY - height / 2), new OpenCvSharp.Point(centerX + width / 2, centerY + height / 2), Colors[classes], 2);
                 int baseline = 0;
                 var textSize = Cv2.GetTextSize(label, HersheyFonts.HersheyTriplex, 0.5, 1, out baseline);
                  Cv2.Rectangle(image, new OpenCvSharp.Rect(new OpenCvSharp.Point(x1, centerY - height / 2 - textSize.Height - baseline),
                     new OpenCvSharp.Size(textSize.Width, textSize.Height + baseline)), Colors[classes], Cv2.FILLED);
                 var textColor = Cv2.Mean(Colors[classes]).Val0 < 70 ? Scalar.White : Scalar.Black;
                  Cv2.PutText(image, label, new OpenCvSharp.Point(x1, centerY - height / 2 - baseline), HersheyFonts.HersheyTriplex, 0.5, textColor);
              }
          }
      }
  
 

运行结果:

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多