Java(Eclipse) + OpenCV3.2.0 でWebカメラ画像の特徴点検出(Mac)

Java(Eclipse) + OpenCV3.2.0 でWebカメラ画像の特徴点検出(Mac)

これまで,Java + OpenCVを使って,2枚の画像の特徴点検出(SRUFやSIFT)やWebカメラの画像表示をしてきました.
今回はそれを合わせて,Webカメラの画像を使って特徴点検出です.
 
 

import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.Features2d;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import java.awt.*;  
import java.awt.image.BufferedImage;  
import javax.swing.*;   
import org.opencv.core.Size;
import org.opencv.videoio.VideoCapture;  


public class imagematch extends JPanel{
    private static final long serialVersionUID = 1L;  
    private BufferedImage image;  
    // Create a constructor method  
    public imagematch(){  
        super();  
    }  
    private BufferedImage getimage(){  
        return image;  
    }  
    private void setimage(BufferedImage newimage){  
        image=newimage;  
        return;  
    }  
    
    /**  
     * Converts/writes a Mat into a BufferedImage.  
     *  
     * @param matrix Mat of type CV_8UC3 or CV_8UC1  
     * @return BufferedImage of type TYPE_3BYTE_BGR or TYPE_BYTE_GRAY  
     */  
    public static BufferedImage matToBufferedImage(Mat matrix) {  
        int cols = matrix.cols();  
        int rows = matrix.rows();  
        int elemSize = (int)matrix.elemSize();  
        byte[] data = new byte[cols * rows * elemSize];  
        int type;  
        matrix.get(0, 0, data);  
        switch (matrix.channels()) {  
        case 1:  
            type = BufferedImage.TYPE_BYTE_GRAY;  
            break;  
        case 3:  
            type = BufferedImage.TYPE_3BYTE_BGR;  
            // bgr to rgb  
            byte b;  
            for(int i=0; i<data.length; i=i+3) {  
                b = data[i];  
                data[i] = data[i+2];  
                data[i+2] = b;  
            }  
            break;  
        default:  
            return null;  
        }  
        BufferedImage image2 = new BufferedImage(cols, rows, type);  
        image2.getRaster().setDataElements(0, 0, cols, rows, data);  
        return image2;  
    }  
    public void paintComponent(Graphics g){  
        BufferedImage temp=getimage();
        if(temp!=null){
            g.drawImage(temp,10,10,temp.getWidth(),temp.getHeight(), this);  
        }
    } 

  public static void main(String[] args) {
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
      
    JFrame frame = new JFrame("BasicPanel");  
    frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);  
    frame.setSize(1200,600);  
    imagematch panel = new imagematch();  
    frame.setContentPane(panel);       
    frame.setVisible(true);       
 
 
    
    // 比較画像01
    Mat image01 = Imgcodecs.imread("./resources/test01s.jpg");

    // 比較画像02
    Mat webcam_image=new Mat();  
    BufferedImage temp;  
    VideoCapture capture =new VideoCapture(0);
    if( capture.isOpened())  
    {  
        while( true )  
        {  
            capture.read(webcam_image);  
            if( !webcam_image.empty() )  
            { 
                
    Mat image02 = webcam_image;
    
    Mat grayImage01 = new Mat(image01.rows(), image01.cols(), image01.type());
    Imgproc.cvtColor(image01, grayImage01, Imgproc.COLOR_BGRA2GRAY);
    Core.normalize(grayImage01, grayImage01, 0, 255, Core.NORM_MINMAX);

    Mat  grayImage02 = new Mat(image02.rows(), image02.cols(), image02.type());
    Imgproc.cvtColor(image02, grayImage02, Imgproc.COLOR_BGRA2GRAY);
    Core.normalize(grayImage02, grayImage02, 0, 255, Core.NORM_MINMAX);
    
    // ------ SIFTの処理 ここから ------
    FeatureDetector siftDetector = FeatureDetector.create(FeatureDetector.SIFT);
    DescriptorExtractor siftExtractor = DescriptorExtractor.create(DescriptorExtractor.SIFT);
    
    MatOfKeyPoint keyPoint01 = new MatOfKeyPoint();
    siftDetector.detect(grayImage01, keyPoint01);

    MatOfKeyPoint keyPoint02 = new MatOfKeyPoint();
    siftDetector.detect(grayImage02, keyPoint02);

    Mat descripters01 = new Mat(image01.rows(), image01.cols(), image01.type());
    siftExtractor.compute(grayImage01, keyPoint01, descripters01);
    
    Mat descripters02 = new Mat(image02.rows(), image02.cols(), image02.type());
    siftExtractor.compute(grayImage02, keyPoint02, descripters02);
    
    MatOfDMatch matchs = new MatOfDMatch();
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
    matcher.match(descripters01, descripters02, matchs);
    
    // 上位50点以外の点を除去する
    int N = 50;
    DMatch[] tmp01 = matchs.toArray();
    DMatch[] tmp02 = new DMatch[N];
    for (int i=0; i<tmp02.length; i++) {
      tmp02[i] = tmp01[i];
    }
    matchs.fromArray(tmp02);

    
    Mat matchedImage = new Mat(image01.rows(), image01.cols()*2, image01.type());
    Features2d.drawMatches(image01, keyPoint01, image02, keyPoint02, matchs, matchedImage);

    // 出力画像 at SIFT
    //Imgcodecs.imwrite("./resources/descriptedImageBySIFT-" + now + ".jpg", matchedImage);
    BufferedImage matchedImage02 = matToBufferedImage(matchedImage);
    // ------ SIFTの処理 ここまで ------


    // Webカメラ画像出力
                Imgproc.resize(webcam_image, webcam_image, new Size(webcam_image.size().width*0.5,webcam_image.size().height*0.5));
                frame.setSize(matchedImage.width()+300,matchedImage.height()+60);  
                temp=matToBufferedImage(webcam_image);  
                panel.setimage(matchedImage02);  
                panel.repaint();  
            }  
            else  
            {  
                System.out.println(" --(!) No captured frame -- ");  
            }  
        }  
    }  
    return; 
}
  
}

 
 
特徴点検出のプロトコルを変更したい場合は,113行目114行目のSIFTをその他のプロトコル(SURFやAKAZEなど)に変更してください.
 
 
結果
 
SIFT

 
SURF

 
AKAZE

 
 
動画からの検出になるとAKAZEのハイスピードが顕著でした.検出結果も比較もかなり正確だし,AKAZEいいですね.