diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/samples/widget_pose.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/samples/widget_pose.cpp new file mode 100644 index 00000000..130044f1 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/samples/widget_pose.cpp @@ -0,0 +1,99 @@ +/** + * @file widget_pose.cpp + * @brief Setting pose of a widget + * @author Ozan Cagri Tonkal + */ + +#include +#include +#include + +using namespace cv; +using namespace std; + +/** + * @function help + * @brief Display instructions to use this tutorial program + */ +static void help() +{ + cout + << "--------------------------------------------------------------------------" << endl + << "This program shows how to visualize a cube rotated around (1,1,1) and shifted " + << "using Rodrigues vector." << endl + << "Usage:" << endl + << "./widget_pose" << endl + << endl; +} + +/** + * @function main + */ +int main() +{ + help(); + + /// Create a window + viz::Viz3d myWindow("Coordinate Frame"); + + /// Add coordinate axes + myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem()); + + /// Add line to represent (1,1,1) axis + viz::WLine axis(Point3f(-1.0f,-1.0f,-1.0f), Point3f(1.0f,1.0f,1.0f)); + axis.setRenderingProperty(viz::LINE_WIDTH, 4.0); + myWindow.showWidget("Line Widget", axis); + + /// Construct a cube widget + viz::WCube cube_widget(Point3f(0.5,0.5,0.0), Point3f(0.0,0.0,-0.5), true, viz::Color::blue()); + cube_widget.setRenderingProperty(viz::LINE_WIDTH, 4.0); + myWindow.showWidget("Cube Widget", cube_widget); + + /// Rodrigues vector + Mat rot_vec = Mat::zeros(1,3,CV_32F); + float translation_phase = 0.0, translation = 0.0; + + rot_vec.at(0, 0) += (float)CV_PI * 0.01f; + rot_vec.at(0, 1) += (float)CV_PI * 0.01f; + rot_vec.at(0, 2) += (float)CV_PI * 0.01f; + + /// Shift on (1,1,1) + translation_phase += (float)CV_PI * 0.01f; + translation = sin(translation_phase); + + Mat rot_mat; + Rodrigues(rot_vec, rot_mat); + cout << "rot_mat = " << rot_mat << endl; + /// Construct pose + Affine3f pose(rot_mat, Vec3f(translation, translation, translation)); + Affine3f pose2(pose.matrix); + cout << "pose = " << pose.matrix << endl; + cout << "pose = " << pose2.matrix << endl; + + + + while(!myWindow.wasStopped()) + { + /* Rotation using rodrigues */ + /// Rotate around (1,1,1) + rot_vec.at(0,0) += (float)CV_PI * 0.01f; + rot_vec.at(0,1) += (float)CV_PI * 0.01f; + rot_vec.at(0,2) += (float)CV_PI * 0.01f; + + /// Shift on (1,1,1) + translation_phase += (float)CV_PI * 0.01f; + translation = sin(translation_phase); + + Mat rot_mat1; + Rodrigues(rot_vec, rot_mat1); + + /// Construct pose + Affine3f pose1(rot_mat1, Vec3f(translation, translation, translation)); + + myWindow.setWidgetPose("Cube Widget", pose1); + + myWindow.spinOnce(1, true); + } + + return 0; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/clouds.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/clouds.cpp new file mode 100644 index 00000000..c9a13fcf --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/clouds.cpp @@ -0,0 +1,530 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Point Cloud Widget implementation + +cv::viz::WCloud::WCloud(InputArray cloud, InputArray colors) +{ + WCloud cloud_widget(cloud, colors, cv::noArray()); + *this = cloud_widget; +} + +cv::viz::WCloud::WCloud(InputArray cloud, const Color &color) +{ + WCloud cloud_widget(cloud, Mat(cloud.size(), CV_8UC3, color)); + *this = cloud_widget; +} + +cv::viz::WCloud::WCloud(InputArray cloud, const Color &color, InputArray normals) +{ + WCloud cloud_widget(cloud, Mat(cloud.size(), CV_8UC3, color), normals); + *this = cloud_widget; +} + +cv::viz::WCloud::WCloud(cv::InputArray cloud, cv::InputArray colors, cv::InputArray normals) +{ + CV_Assert(!cloud.empty() && !colors.empty()); + + vtkSmartPointer cloud_source = vtkSmartPointer::New(); + cloud_source->SetColorCloudNormals(cloud, colors, normals); + cloud_source->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, cloud_source->GetOutput()); + mapper->SetScalarModeToUsePointData(); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + mapper->SetScalarRange(0, 255); + mapper->ScalarVisibilityOn(); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WCloud cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Painted Cloud Widget implementation + +cv::viz::WPaintedCloud::WPaintedCloud(InputArray cloud) +{ + vtkSmartPointer cloud_source = vtkSmartPointer::New(); + cloud_source->SetCloud(cloud); + cloud_source->Update(); + + Vec6d bounds(cloud_source->GetOutput()->GetPoints()->GetBounds()); + + vtkSmartPointer elevation = vtkSmartPointer::New(); + elevation->SetInputConnection(cloud_source->GetOutputPort()); + elevation->SetLowPoint(bounds[0], bounds[2], bounds[4]); + elevation->SetHighPoint(bounds[1], bounds[3], bounds[5]); + elevation->SetScalarRange(0.0, 1.0); + elevation->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, vtkPolyData::SafeDownCast(elevation->GetOutput())); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + mapper->ScalarVisibilityOn(); + mapper->SetColorModeToMapScalars(); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WPaintedCloud::WPaintedCloud(InputArray cloud, const Point3d& p1, const Point3d& p2) +{ + vtkSmartPointer cloud_source = vtkSmartPointer::New(); + cloud_source->SetCloud(cloud); + + vtkSmartPointer elevation = vtkSmartPointer::New(); + elevation->SetInputConnection(cloud_source->GetOutputPort()); + elevation->SetLowPoint(p1.x, p1.y, p1.z); + elevation->SetHighPoint(p2.x, p2.y, p2.z); + elevation->SetScalarRange(0.0, 1.0); + elevation->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, vtkPolyData::SafeDownCast(elevation->GetOutput())); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + mapper->ScalarVisibilityOn(); + mapper->SetColorModeToMapScalars(); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WPaintedCloud::WPaintedCloud(InputArray cloud, const Point3d& p1, const Point3d& p2, const Color& c1, const Color c2) +{ + vtkSmartPointer cloud_source = vtkSmartPointer::New(); + cloud_source->SetCloud(cloud); + + vtkSmartPointer elevation = vtkSmartPointer::New(); + elevation->SetInputConnection(cloud_source->GetOutputPort()); + elevation->SetLowPoint(p1.x, p1.y, p1.z); + elevation->SetHighPoint(p2.x, p2.y, p2.z); + elevation->SetScalarRange(0.0, 1.0); + elevation->Update(); + + Color vc1 = vtkcolor(c1), vc2 = vtkcolor(c2); + vtkSmartPointer color_transfer = vtkSmartPointer::New(); + color_transfer->SetColorSpaceToRGB(); + color_transfer->AddRGBPoint(0.0, vc1[0], vc1[1], vc1[2]); + color_transfer->AddRGBPoint(1.0, vc2[0], vc2[1], vc2[2]); + color_transfer->SetScaleToLinear(); + color_transfer->Build(); + + //if in future some need to replace color table with real scalars, then this can be done usine next calls: + //vtkDataArray *float_scalars = vtkPolyData::SafeDownCast(elevation->GetOutput())->GetPointData()->GetArray("Elevation"); + //vtkSmartPointer polydata = cloud_source->GetOutput(); + //polydata->GetPointData()->SetScalars(color_transfer->MapScalars(float_scalars, VTK_COLOR_MODE_DEFAULT, 0)); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, vtkPolyData::SafeDownCast(elevation->GetOutput())); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + mapper->ScalarVisibilityOn(); + mapper->SetColorModeToMapScalars(); + mapper->SetLookupTable(color_transfer); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WPaintedCloud cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Cloud Collection Widget implementation + +cv::viz::WCloudCollection::WCloudCollection() +{ + vtkSmartPointer append_filter = vtkSmartPointer::New(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetInputConnection(append_filter->GetOutputPort()); + mapper->SetScalarModeToUsePointData(); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + mapper->SetScalarRange(0, 255); + mapper->ScalarVisibilityOn(); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetNumberOfCloudPoints(1); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +void cv::viz::WCloudCollection::addCloud(InputArray cloud, InputArray colors, const Affine3d &pose) +{ + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetColorCloud(cloud, colors); + + vtkSmartPointer polydata = VtkUtils::TransformPolydata(source->GetOutputPort(), pose); + + vtkSmartPointer actor = vtkLODActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Correctness check." && actor); + + vtkSmartPointer producer = actor->GetMapper()->GetInputConnection(0, 0)->GetProducer(); + vtkSmartPointer append_filter = vtkAppendPolyData::SafeDownCast(producer); + VtkUtils::AddInputData(append_filter, polydata); + + actor->SetNumberOfCloudPoints(std::max(1, actor->GetNumberOfCloudPoints() + polydata->GetNumberOfPoints()/10)); +} + +void cv::viz::WCloudCollection::addCloud(InputArray cloud, const Color &color, const Affine3d &pose) +{ + addCloud(cloud, Mat(cloud.size(), CV_8UC3, color), pose); +} + +void cv::viz::WCloudCollection::finalize() +{ + vtkSmartPointer actor = vtkLODActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Incompatible widget type." && actor); + + vtkSmartPointer mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + CV_Assert("Need to add at least one cloud." && mapper); + + vtkSmartPointer producer = mapper->GetInputConnection(0, 0)->GetProducer(); + vtkSmartPointer append_filter = vtkAppendPolyData::SafeDownCast(producer); + append_filter->Update(); + + vtkSmartPointer polydata = append_filter->GetOutput(); + mapper->RemoveInputConnection(0, 0); + VtkUtils::SetInputData(mapper, polydata); +} + +template<> cv::viz::WCloudCollection cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Cloud Normals Widget implementation + +cv::viz::WCloudNormals::WCloudNormals(InputArray _cloud, InputArray _normals, int level, double scale, const Color &color) +{ + Mat cloud = _cloud.getMat(); + Mat normals = _normals.getMat(); + + CV_Assert(cloud.type() == CV_32FC3 || cloud.type() == CV_64FC3 || cloud.type() == CV_32FC4 || cloud.type() == CV_64FC4); + CV_Assert(cloud.size() == normals.size() && cloud.type() == normals.type()); + + int sqlevel = (int)std::sqrt((double)level); + int ystep = (cloud.cols > 1 && cloud.rows > 1) ? sqlevel : 1; + int xstep = (cloud.cols > 1 && cloud.rows > 1) ? sqlevel : level; + + vtkSmartPointer points = vtkSmartPointer::New(); + points->SetDataType(cloud.depth() == CV_32F ? VTK_FLOAT : VTK_DOUBLE); + + vtkSmartPointer lines = vtkSmartPointer::New(); + + int s_chs = cloud.channels(); + int n_chs = normals.channels(); + int total = 0; + + for(int y = 0; y < cloud.rows; y += ystep) + { + if (cloud.depth() == CV_32F) + { + const float *srow = cloud.ptr(y); + const float *send = srow + cloud.cols * s_chs; + const float *nrow = normals.ptr(y); + + for (; srow < send; srow += xstep * s_chs, nrow += xstep * n_chs) + if (!isNan(srow) && !isNan(nrow)) + { + Vec3f endp = Vec3f(srow) + Vec3f(nrow) * (float)scale; + + points->InsertNextPoint(srow); + points->InsertNextPoint(endp.val); + + lines->InsertNextCell(2); + lines->InsertCellPoint(total++); + lines->InsertCellPoint(total++); + } + } + else + { + const double *srow = cloud.ptr(y); + const double *send = srow + cloud.cols * s_chs; + const double *nrow = normals.ptr(y); + + for (; srow < send; srow += xstep * s_chs, nrow += xstep * n_chs) + if (!isNan(srow) && !isNan(nrow)) + { + Vec3d endp = Vec3d(srow) + Vec3d(nrow) * (double)scale; + + points->InsertNextPoint(srow); + points->InsertNextPoint(endp.val); + + lines->InsertNextCell(2); + lines->InsertCellPoint(total++); + lines->InsertCellPoint(total++); + } + } + } + + vtkSmartPointer polydata = vtkSmartPointer::New(); + polydata->SetPoints(points); + polydata->SetLines(lines); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WCloudNormals cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Mesh Widget implementation + +cv::viz::WMesh::WMesh(const Mesh &mesh) +{ + CV_Assert(mesh.cloud.rows == 1 && mesh.polygons.type() == CV_32SC1); + + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetColorCloudNormalsTCoords(mesh.cloud, mesh.colors, mesh.normals, mesh.tcoords); + source->Update(); + + Mat lookup_buffer(1, (int)mesh.cloud.total(), CV_32SC1); + int *lookup = lookup_buffer.ptr(); + for(int y = 0, index = 0; y < mesh.cloud.rows; ++y) + { + int s_chs = mesh.cloud.channels(); + + if (mesh.cloud.depth() == CV_32F) + { + const float* srow = mesh.cloud.ptr(y); + const float* send = srow + mesh.cloud.cols * s_chs; + + for (; srow != send; srow += s_chs, ++lookup) + if (!isNan(srow[0]) && !isNan(srow[1]) && !isNan(srow[2])) + *lookup = index++; + } + + if (mesh.cloud.depth() == CV_64F) + { + const double* srow = mesh.cloud.ptr(y); + const double* send = srow + mesh.cloud.cols * s_chs; + + for (; srow != send; srow += s_chs, ++lookup) + if (!isNan(srow[0]) && !isNan(srow[1]) && !isNan(srow[2])) + *lookup = index++; + } + } + lookup = lookup_buffer.ptr(); + + vtkSmartPointer polydata = source->GetOutput(); + polydata->SetVerts(0); + + const int * polygons = mesh.polygons.ptr(); + vtkSmartPointer cell_array = vtkSmartPointer::New(); + + int idx = 0; + size_t polygons_size = mesh.polygons.total(); + for (size_t i = 0; i < polygons_size; ++idx) + { + int n_points = polygons[i++]; + + cell_array->InsertNextCell(n_points); + for (int j = 0; j < n_points; ++j, ++idx) + cell_array->InsertCellPoint(lookup[polygons[i++]]); + } + cell_array->GetData()->SetNumberOfValues(idx); + cell_array->Squeeze(); + polydata->SetStrips(cell_array); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetScalarModeToUsePointData(); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + //actor->SetNumberOfCloudPoints(std::max(1, polydata->GetNumberOfPoints() / 10)); + actor->GetProperty()->SetRepresentationToSurface(); + actor->GetProperty()->BackfaceCullingOff(); // Backface culling is off for higher efficiency + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->EdgeVisibilityOff(); + actor->GetProperty()->ShadingOff(); + actor->SetMapper(mapper); + + if (!mesh.texture.empty()) + { + vtkSmartPointer image_source = vtkSmartPointer::New(); + image_source->SetImage(mesh.texture); + + vtkSmartPointer texture = vtkSmartPointer::New(); + texture->SetInputConnection(image_source->GetOutputPort()); + actor->SetTexture(texture); + } + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WMesh::WMesh(InputArray cloud, InputArray polygons, InputArray colors, InputArray normals) +{ + Mesh mesh; + mesh.cloud = cloud.getMat(); + mesh.colors = colors.getMat(); + mesh.normals = normals.getMat(); + mesh.polygons = polygons.getMat(); + *this = WMesh(mesh); +} + +template<> CV_EXPORTS cv::viz::WMesh cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Widget Merger implementation + +cv::viz::WWidgetMerger::WWidgetMerger() +{ + vtkSmartPointer append_filter = vtkSmartPointer::New(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetInputConnection(append_filter->GetOutputPort()); + mapper->SetScalarModeToUsePointData(); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + mapper->SetScalarRange(0, 255); + mapper->ScalarVisibilityOn(); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +void cv::viz::WWidgetMerger::addWidget(const Widget3D& widget, const Affine3d &pose) +{ + vtkActor *widget_actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(widget)); + CV_Assert("Widget is not 3D actor." && widget_actor); + + vtkSmartPointer widget_mapper = vtkPolyDataMapper::SafeDownCast(widget_actor->GetMapper()); + CV_Assert("Widget doesn't have a polydata mapper" && widget_mapper); + widget_mapper->Update(); + + vtkSmartPointer actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + vtkSmartPointer producer = actor->GetMapper()->GetInputConnection(0, 0)->GetProducer(); + vtkSmartPointer append_filter = vtkAppendPolyData::SafeDownCast(producer); + CV_Assert("Correctness check" && append_filter); + + VtkUtils::AddInputData(append_filter, VtkUtils::TransformPolydata(widget_mapper->GetInput(), pose)); +} + +void cv::viz::WWidgetMerger::finalize() +{ + vtkSmartPointer actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + vtkSmartPointer producer = actor->GetMapper()->GetInputConnection(0, 0)->GetProducer(); + vtkSmartPointer append_filter = vtkAppendPolyData::SafeDownCast(producer); + CV_Assert("Correctness check" && append_filter); + append_filter->Update(); + + vtkSmartPointer mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + mapper->RemoveInputConnection(0, 0); + VtkUtils::SetInputData(mapper, append_filter->GetOutput()); + mapper->Modified(); +} + +template<> CV_EXPORTS cv::viz::WWidgetMerger cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/precomp.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/precomp.hpp new file mode 100644 index 00000000..4c4bf7c5 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/precomp.hpp @@ -0,0 +1,350 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __OPENCV_VIZ_PRECOMP_HPP__ +#define __OPENCV_VIZ_PRECOMP_HPP__ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(_WIN32) || defined(__CYGWIN__) +# include /* unlink */ +#else +# include /* unlink */ +#endif + +#include "vtk/vtkOBJWriter.h" +#include "vtk/vtkXYZWriter.h" +#include "vtk/vtkXYZReader.h" +#include "vtk/vtkCloudMatSink.h" +#include "vtk/vtkCloudMatSource.h" +#include "vtk/vtkTrajectorySource.h" +#include "vtk/vtkImageMatSource.h" + +#if VTK_MAJOR_VERSION >= 9 +typedef vtkIdType const * CellIterT; +#else +typedef vtkIdType * CellIterT; +#endif + +#include +#include +#include +#include + + +namespace cv +{ + namespace viz + { + typedef std::map > WidgetActorMap; + + struct VizMap + { + typedef std::map type; + typedef type::iterator iterator; + + type m; + ~VizMap(); + void replace_clear(); + }; + + class VizStorage + { + public: + static void unregisterAll(); + + //! window names automatically have Viz - prefix even though not provided by the users + static String generateWindowName(const String &window_name); + + private: + VizStorage(); // Static + + static void add(const Viz3d& window); + static Viz3d& get(const String &window_name); + static void remove(const String &window_name); + static bool windowExists(const String &window_name); + static void removeUnreferenced(); + + static VizMap storage; + friend class Viz3d; + + static VizStorage init; + }; + + template inline _Tp normalized(const _Tp& v) { return v * 1/norm(v); } + + template inline bool isNan(const _Tp* data) + { + return isNan(data[0]) || isNan(data[1]) || isNan(data[2]); + } + + inline vtkSmartPointer getActor(const Widget3D& widget) + { + return vtkActor::SafeDownCast(WidgetAccessor::getProp(widget)); + } + + inline vtkSmartPointer getPolyData(const Widget3D& widget) + { + vtkSmartPointer mapper = getActor(widget)->GetMapper(); + return vtkPolyData::SafeDownCast(mapper->GetInput()); + } + + inline vtkSmartPointer vtkmatrix(const cv::Matx44d &matrix) + { + vtkSmartPointer vtk_matrix = vtkSmartPointer::New(); + vtk_matrix->DeepCopy(matrix.val); + return vtk_matrix; + } + + inline Color vtkcolor(const Color& color) + { + Color scaled_color = color * (1.0/255.0); + std::swap(scaled_color[0], scaled_color[2]); + return scaled_color; + } + + inline Vec3d get_random_vec(double from = -10.0, double to = 10.0) + { + RNG& rng = theRNG(); + return Vec3d(rng.uniform(from, to), rng.uniform(from, to), rng.uniform(from, to)); + } + + struct VtkUtils + { + template + static void SetInputData(vtkSmartPointer filter, vtkPolyData* polydata) + { + #if VTK_MAJOR_VERSION <= 5 + filter->SetInput(polydata); + #else + filter->SetInputData(polydata); + #endif + } + template + static void SetSourceData(vtkSmartPointer filter, vtkPolyData* polydata) + { + #if VTK_MAJOR_VERSION <= 5 + filter->SetSource(polydata); + #else + filter->SetSourceData(polydata); + #endif + } + + template + static void SetInputData(vtkSmartPointer filter, vtkImageData* polydata) + { + #if VTK_MAJOR_VERSION <= 5 + filter->SetInput(polydata); + #else + filter->SetInputData(polydata); + #endif + } + + template + static void AddInputData(vtkSmartPointer filter, vtkPolyData *polydata) + { + #if VTK_MAJOR_VERSION <= 5 + filter->AddInput(polydata); + #else + filter->AddInputData(polydata); + #endif + } + + static vtkSmartPointer FillScalars(size_t size, const Color& color) + { + Vec3b rgb = Vec3d(color[2], color[1], color[0]); + Vec3b* color_data = new Vec3b[size]; + std::fill(color_data, color_data + size, rgb); + + vtkSmartPointer scalars = vtkSmartPointer::New(); + scalars->SetName("Colors"); + scalars->SetNumberOfComponents(3); + scalars->SetNumberOfTuples((vtkIdType)size); + scalars->SetArray(color_data->val, (vtkIdType)(size * 3), 0, vtkUnsignedCharArray::VTK_DATA_ARRAY_DELETE); + return scalars; + } + + static vtkSmartPointer FillScalars(vtkSmartPointer polydata, const Color& color) + { + return polydata->GetPointData()->SetScalars(FillScalars(polydata->GetNumberOfPoints(), color)), polydata; + } + + static vtkSmartPointer ComputeNormals(vtkSmartPointer polydata) + { + vtkSmartPointer normals_generator = vtkSmartPointer::New(); + normals_generator->ComputePointNormalsOn(); + normals_generator->ComputeCellNormalsOff(); + normals_generator->SetFeatureAngle(0.1); + normals_generator->SetSplitting(0); + normals_generator->SetConsistency(1); + normals_generator->SetAutoOrientNormals(0); + normals_generator->SetFlipNormals(0); + normals_generator->SetNonManifoldTraversal(1); + VtkUtils::SetInputData(normals_generator, polydata); + normals_generator->Update(); + return normals_generator->GetOutput(); + } + + static vtkSmartPointer TransformPolydata(vtkSmartPointer algorithm_output_port, const Affine3d& pose) + { + vtkSmartPointer transform = vtkSmartPointer::New(); + transform->SetMatrix(vtkmatrix(pose.matrix)); + + vtkSmartPointer transform_filter = vtkSmartPointer::New(); + transform_filter->SetTransform(transform); + transform_filter->SetInputConnection(algorithm_output_port); + transform_filter->Update(); + return transform_filter->GetOutput(); + } + + static vtkSmartPointer TransformPolydata(vtkSmartPointer polydata, const Affine3d& pose) + { + vtkSmartPointer transform = vtkSmartPointer::New(); + transform->SetMatrix(vtkmatrix(pose.matrix)); + + vtkSmartPointer transform_filter = vtkSmartPointer::New(); + VtkUtils::SetInputData(transform_filter, polydata); + transform_filter->SetTransform(transform); + transform_filter->Update(); + return transform_filter->GetOutput(); + } + }; + + vtkSmartPointer vtkCocoaRenderWindowInteractorNew(); + } +} + +#include "vtk/vtkVizInteractorStyle.hpp" +#include "vizimpl.hpp" + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/shapes.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/shapes.cpp new file mode 100644 index 00000000..399106dd --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/shapes.cpp @@ -0,0 +1,1107 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// line widget implementation +cv::viz::WLine::WLine(const Point3d &pt1, const Point3d &pt2, const Color &color) +{ + vtkSmartPointer line = vtkSmartPointer::New(); + line->SetPoint1(pt1.x, pt1.y, pt1.z); + line->SetPoint2(pt2.x, pt2.y, pt2.z); + line->Update(); + + vtkSmartPointer polydata = line->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WLine cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// sphere widget implementation + +cv::viz::WSphere::WSphere(const Point3d ¢er, double radius, int sphere_resolution, const Color &color) +{ + vtkSmartPointer sphere = vtkSmartPointer::New(); + sphere->SetRadius(radius); + sphere->SetCenter(center.x, center.y, center.z); + sphere->SetPhiResolution(sphere_resolution); + sphere->SetThetaResolution(sphere_resolution); + sphere->LatLongTessellationOff(); + sphere->Update(); + + vtkSmartPointer polydata = sphere->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WSphere cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// plane widget implementation + +cv::viz::WPlane::WPlane(const Size2d& size, const Color &color) +{ + vtkSmartPointer plane = vtkSmartPointer::New(); + plane->SetOrigin(-0.5 * size.width, -0.5 * size.height, 0.0); + plane->SetPoint1( 0.5 * size.width, -0.5 * size.height, 0.0); + plane->SetPoint2(-0.5 * size.width, 0.5 * size.height, 0.0); + plane->Update(); + + vtkSmartPointer polydata = plane->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + actor->GetProperty()->LightingOff(); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WPlane::WPlane(const Point3d& center, const Vec3d& normal, const Vec3d& new_yaxis, const Size2d& size, const Color &color) +{ + Vec3d zvec = normalize(normal); + Vec3d xvec = normalize(new_yaxis.cross(zvec)); + Vec3d yvec = zvec.cross(xvec); + + WPlane plane(size, color); + plane.applyTransform(makeTransformToGlobal(xvec, yvec, zvec, center)); + *this = plane; +} + +template<> cv::viz::WPlane cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// arrow widget implementation + +cv::viz::WArrow::WArrow(const Point3d& pt1, const Point3d& pt2, double thickness, const Color &color) +{ + vtkSmartPointer arrow_source = vtkSmartPointer::New(); + arrow_source->SetShaftRadius(thickness); + arrow_source->SetTipRadius(thickness * 3.0); + arrow_source->SetTipLength(thickness * 10.0); + + Vec3d arbitrary = get_random_vec(); + Vec3d start_point(pt1.x, pt1.y, pt1.z), end_point(pt2.x, pt2.y, pt2.z); + + double length = norm(end_point - start_point); + + Vec3d xvec = normalized(end_point - start_point); + Vec3d zvec = normalized(xvec.cross(arbitrary)); + Vec3d yvec = zvec.cross(xvec); + + Matx33d R = makeTransformToGlobal(xvec, yvec, zvec).rotation(); + Affine3d transform_with_scale(R * length, start_point); + + vtkSmartPointer polydata = VtkUtils::TransformPolydata(arrow_source->GetOutputPort(), transform_with_scale); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WArrow cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// circle widget implementation + +cv::viz::WCircle::WCircle(double radius, double thickness, const Color &color) +{ + vtkSmartPointer disk = vtkSmartPointer::New(); + disk->SetCircumferentialResolution(30); + disk->SetInnerRadius(radius - thickness); + disk->SetOuterRadius(radius + thickness); + disk->Update(); + + vtkSmartPointer polydata = disk->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->LightingOff(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WCircle::WCircle(double radius, const Point3d& center, const Vec3d& normal, double thickness, const Color &color) +{ + Vec3d arbitrary = get_random_vec(); + Vec3d zvec = normalized(normal); + Vec3d xvec = normalized(zvec.cross(arbitrary)); + Vec3d yvec = zvec.cross(xvec); + + WCircle circle(radius, thickness, color); + circle.applyTransform(makeTransformToGlobal(xvec, yvec, zvec, center)); + *this = circle; +} + +template<> cv::viz::WCircle cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// WCone widget implementation + +cv::viz::WCone::WCone(double length, double radius, int resolution, const Color &color) +{ + vtkSmartPointer cone_source = vtkSmartPointer::New(); + cone_source->SetCenter(length*0.5, 0.0, 0.0); + cone_source->SetHeight(length); + cone_source->SetRadius(radius); + cone_source->SetResolution(resolution); + cone_source->Update(); + + vtkSmartPointer polydata = cone_source->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WCone::WCone(double radius, const Point3d& center, const Point3d& tip, int resolution, const Color &color) +{ + Vec3d arbitrary = get_random_vec(); + Vec3d xvec = normalized(Vec3d(tip - center)); + Vec3d zvec = normalized(xvec.cross(arbitrary)); + Vec3d yvec = zvec.cross(xvec); + + WCone circle(norm(tip - center), radius, resolution, color); + circle.applyTransform(makeTransformToGlobal(xvec, yvec, zvec, center)); + *this = circle; +} + +template<> cv::viz::WCone cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// cylinder widget implementation + +cv::viz::WCylinder::WCylinder(const Point3d& axis_point1, const Point3d& axis_point2, double radius, int numsides, const Color &color) +{ + vtkSmartPointer line = vtkSmartPointer::New(); + line->SetPoint1(axis_point1.x, axis_point1.y, axis_point1.z); + line->SetPoint2(axis_point2.x, axis_point2.y, axis_point2.z); + + vtkSmartPointer tuber = vtkSmartPointer::New(); + tuber->SetInputConnection(line->GetOutputPort()); + tuber->SetNumberOfSides(numsides); + tuber->SetRadius(radius); + tuber->Update(); + + vtkSmartPointer polydata = tuber->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WCylinder cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// cylinder widget implementation + +cv::viz::WCube::WCube(const Point3d& min_point, const Point3d& max_point, bool wire_frame, const Color &color) +{ + double bounds[6]; + bounds[0] = std::min(min_point.x, max_point.x); + bounds[1] = std::max(min_point.x, max_point.x); + bounds[2] = std::min(min_point.y, max_point.y); + bounds[3] = std::max(min_point.y, max_point.y); + bounds[4] = std::min(min_point.z, max_point.z); + bounds[5] = std::max(min_point.z, max_point.z); + + vtkSmartPointer cube; + if (wire_frame) + { + cube = vtkSmartPointer::New(); + vtkOutlineSource::SafeDownCast(cube)->SetBounds(bounds); + } + else + { + cube = vtkSmartPointer::New(); + vtkCubeSource::SafeDownCast(cube)->SetBounds(bounds); + } + cube->Update(); + vtkSmartPointer polydata =cube->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WCube cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// coordinate system widget implementation + +cv::viz::WCoordinateSystem::WCoordinateSystem(double scale) +{ + vtkSmartPointer axes = vtkSmartPointer::New(); + axes->SetOrigin(0, 0, 0); + axes->SetScaleFactor(scale); + axes->Update(); + + vtkSmartPointer colors = vtkSmartPointer::New(); + colors->SetNumberOfComponents(3); + colors->InsertNextTuple3(255, 0, 0); + colors->InsertNextTuple3(255, 0, 0); + colors->InsertNextTuple3(0, 255, 0); + colors->InsertNextTuple3(0, 255, 0); + colors->InsertNextTuple3(0, 0, 255); + colors->InsertNextTuple3(0, 0, 255); + + vtkSmartPointer polydata = axes->GetOutput(); + polydata->GetPointData()->SetScalars(colors); + + vtkSmartPointer tube_filter = vtkSmartPointer::New(); + VtkUtils::SetInputData(tube_filter, polydata); + tube_filter->SetRadius(axes->GetScaleFactor() / 50.0); + tube_filter->SetNumberOfSides(6); + tube_filter->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetScalarModeToUsePointData(); + VtkUtils::SetInputData(mapper, tube_filter->GetOutput()); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WCoordinateSystem cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// polyline widget implementation + +cv::viz::WPolyLine::WPolyLine(InputArray points, InputArray colors) +{ + vtkSmartPointer cloud_source = vtkSmartPointer::New(); + cloud_source->SetColorCloud(points, colors); + cloud_source->Update(); + + vtkSmartPointer polydata = cloud_source->GetOutput(); + + vtkSmartPointer cell_array = vtkSmartPointer::New(); + cell_array->Allocate(cell_array->EstimateSize(1, polydata->GetNumberOfPoints())); + cell_array->InsertNextCell(polydata->GetNumberOfPoints()); + for(vtkIdType i = 0; i < polydata->GetNumberOfPoints(); ++i) + cell_array->InsertCellPoint(i); + + polydata->SetLines(cell_array); + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + mapper->SetScalarRange(0, 255); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WPolyLine::WPolyLine(InputArray points, const Color &color) +{ + WPolyLine polyline(points, Mat(points.size(), CV_8UC3, color)); + *this = polyline; +} + +template<> cv::viz::WPolyLine cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// grid widget implementation + + +cv::viz::WGrid::WGrid(const Vec2i &cells, const Vec2d &cells_spacing, const Color &color) +{ + vtkSmartPointer grid_data = vtkSmartPointer::New(); + + // Add 1 to dimensions because in ImageData dimensions is the number of lines + // - however here it means number of cells + grid_data->SetDimensions(cells[0]+1, cells[1]+1, 1); + grid_data->SetSpacing(cells_spacing[0], cells_spacing[1], 0.); + + // Set origin of the grid to be the middle of the grid + grid_data->SetOrigin(cells[0] * cells_spacing[0] * (-0.5), cells[1] * cells_spacing[1] * (-0.5), 0); + + // Extract the edges so we have the grid + vtkSmartPointer extract_edges = vtkSmartPointer::New(); + VtkUtils::SetInputData(extract_edges, grid_data); + extract_edges->Update(); + + vtkSmartPointer polydata = extract_edges->GetOutput(); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WGrid::WGrid(const Point3d& center, const Vec3d& normal, const Vec3d& new_yaxis, const Vec2i &cells, const Vec2d &cells_spacing, const Color &color) +{ + Vec3d zvec = normalize(normal); + Vec3d xvec = normalize(new_yaxis.cross(zvec)); + Vec3d yvec = zvec.cross(xvec); + + WGrid grid(cells, cells_spacing, color); + grid.applyTransform(makeTransformToGlobal(xvec, yvec, zvec, center)); + *this = grid; +} + +template<> cv::viz::WGrid cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// text3D widget implementation + +cv::viz::WText3D::WText3D(const String &text, const Point3d &position, double text_scale, bool face_camera, const Color &color) +{ + vtkSmartPointer textSource = vtkSmartPointer::New(); + textSource->SetText(text.c_str()); + textSource->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetInputConnection(textSource->GetOutputPort()); + + if (face_camera) + { + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + actor->SetPosition(position.x, position.y, position.z); + actor->SetScale(text_scale); + WidgetAccessor::setProp(*this, actor); + } + else + { + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + actor->SetPosition(position.x, position.y, position.z); + actor->SetScale(text_scale); + actor->GetProperty()->LightingOff(); + WidgetAccessor::setProp(*this, actor); + } + + setColor(color); +} + +void cv::viz::WText3D::setText(const String &text) +{ + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("This widget does not support text." && actor); + + // Update text source + vtkPolyDataMapper *mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + vtkVectorText * textSource = vtkVectorText::SafeDownCast(mapper->GetInputConnection(0,0)->GetProducer()); + CV_Assert("This widget does not support text." && textSource); + + textSource->SetText(text.c_str()); + textSource->Modified(); + textSource->Update(); +} + +cv::String cv::viz::WText3D::getText() const +{ + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("This widget does not support text." && actor); + + vtkPolyDataMapper *mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + vtkVectorText * textSource = vtkVectorText::SafeDownCast(mapper->GetInputConnection(0,0)->GetProducer()); + CV_Assert("This widget does not support text." && textSource); + + return textSource->GetText(); +} + +template<> cv::viz::WText3D cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// text widget implementation + +cv::viz::WText::WText(const String &text, const Point &pos, int font_size, const Color &color) +{ + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetDisplayPosition(pos.x, pos.y); + actor->SetInput(text.c_str()); + + actor->GetProperty()->SetDisplayLocationToForeground(); + + vtkSmartPointer tprop = actor->GetTextProperty(); + tprop->SetFontSize(font_size); + tprop->SetFontFamilyToCourier(); + tprop->SetJustificationToLeft(); + tprop->BoldOn(); + + Color c = vtkcolor(color); + tprop->SetColor(c.val); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WText cv::viz::Widget::cast() const +{ + Widget2D widget = this->cast(); + return static_cast(widget); +} + +void cv::viz::WText::setText(const String &text) +{ + vtkTextActor *actor = vtkTextActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("This widget does not support text." && actor); + actor->SetInput(text.c_str()); +} + +cv::String cv::viz::WText::getText() const +{ + vtkTextActor *actor = vtkTextActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("This widget does not support text." && actor); + return actor->GetInput(); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// image overlay widget implementation + +cv::viz::WImageOverlay::WImageOverlay(InputArray image, const Rect &rect) +{ + CV_Assert(!image.empty() && image.depth() == CV_8U); + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetImage(image); + Size sz = image.size(); + + // Scale the image based on the Rect, and flip to match y-ais orientation + vtkSmartPointer transform = vtkSmartPointer::New(); + transform->Scale(sz.width/(double)rect.width, sz.height/(double)rect.height, 1.0); + transform->RotateX(180); + + vtkSmartPointer image_reslice = vtkSmartPointer::New(); + image_reslice->SetResliceTransform(transform); + image_reslice->SetInputConnection(source->GetOutputPort()); + image_reslice->SetOutputDimensionality(2); + image_reslice->InterpolateOn(); + image_reslice->AutoCropOutputOn(); + image_reslice->Update(); + + vtkSmartPointer image_mapper = vtkSmartPointer::New(); + image_mapper->SetInputConnection(image_reslice->GetOutputPort()); + image_mapper->SetColorWindow(255); // OpenCV color + image_mapper->SetColorLevel(127.5); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(image_mapper); + actor->SetPosition(rect.x, rect.y); + actor->GetProperty()->SetDisplayLocationToForeground(); + + WidgetAccessor::setProp(*this, actor); +} + +void cv::viz::WImageOverlay::setImage(InputArray image) +{ + CV_Assert(!image.empty() && image.depth() == CV_8U); + + vtkActor2D *actor = vtkActor2D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("This widget does not support overlay image." && actor); + + vtkImageMapper *mapper = vtkImageMapper::SafeDownCast(actor->GetMapper()); + CV_Assert("This widget does not support overlay image." && mapper); + \ + Vec6i extent; + mapper->GetInput()->GetExtent(extent.val); + Size size(extent[1], extent[3]); + + // Create the vtk image and set its parameters based on input image + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetImage(image); + Size sz = image.size(); + + // Scale the image based on the Rect, and flip to match y-ais orientation + vtkSmartPointer transform = vtkSmartPointer::New(); + transform->Scale(sz.width/(double)size.width, sz.height/(double)size.height, 1.0); + transform->RotateX(180); + + vtkSmartPointer image_reslice = vtkSmartPointer::New(); + image_reslice->SetResliceTransform(transform); + image_reslice->SetInputConnection(source->GetOutputPort()); + image_reslice->SetOutputDimensionality(2); + image_reslice->InterpolateOn(); + image_reslice->AutoCropOutputOn(); + image_reslice->Update(); + + mapper->SetInputConnection(image_reslice->GetOutputPort()); +} + +template<> cv::viz::WImageOverlay cv::viz::Widget::cast() const +{ + Widget2D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// image 3D widget implementation + +cv::viz::WImage3D::WImage3D(InputArray image, const Size2d &size) +{ + CV_Assert(!image.empty() && image.depth() == CV_8U); + + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetImage(image); + + vtkSmartPointer texture = vtkSmartPointer::New(); + texture->SetInputConnection(source->GetOutputPort()); + + vtkSmartPointer plane = vtkSmartPointer::New(); + plane->SetOrigin(-0.5 * size.width, -0.5 * size.height, 0.0); + plane->SetPoint1( 0.5 * size.width, -0.5 * size.height, 0.0); + plane->SetPoint2(-0.5 * size.width, 0.5 * size.height, 0.0); + + vtkSmartPointer textured_plane = vtkSmartPointer::New(); + textured_plane->SetInputConnection(plane->GetOutputPort()); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetInputConnection(textured_plane->GetOutputPort()); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + actor->SetTexture(texture); + actor->GetProperty()->ShadingOff(); + actor->GetProperty()->LightingOff(); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WImage3D::WImage3D(InputArray image, const Size2d &size, const Vec3d ¢er, const Vec3d &normal, const Vec3d &up_vector) +{ + CV_Assert(!image.empty() && image.depth() == CV_8U); + + // Compute the transformation matrix for drawing the camera frame in a scene + Vec3d n = normalize(normal); + Vec3d u = normalize(up_vector.cross(n)); + Vec3d v = n.cross(u); + Affine3d pose = makeTransformToGlobal(u, v, n, center); + + WImage3D image3d(image, size); + image3d.applyTransform(pose); + *this = image3d; +} + +void cv::viz::WImage3D::setImage(InputArray image) +{ + CV_Assert(!image.empty() && image.depth() == CV_8U); + + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("This widget does not support 3D image." && actor); + + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetImage(image); + + vtkSmartPointer texture = vtkSmartPointer::New(); + texture->SetInputConnection(source->GetOutputPort()); + + actor->SetTexture(texture); +} + +void cv::viz::WImage3D::setSize(const cv::Size& size) +{ + vtkSmartPointer actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + vtkSmartPointer mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + vtkSmartPointer textured_plane; + vtkSmartPointer plane; + #if VTK_MAJOR_VERSION <= 5 + textured_plane = vtkTextureMapToPlane::SafeDownCast(mapper->GetInputConnection(0,0)->GetProducer()); + plane = vtkPlaneSource::SafeDownCast(textured_plane->GetInputConnection(0,0)->GetProducer()); + #else + textured_plane = vtkTextureMapToPlane::SafeDownCast(mapper->GetInputAlgorithm()); + plane = vtkPlaneSource::SafeDownCast(textured_plane->GetInputAlgorithm()); + #endif + plane->SetOrigin(-0.5 * size.width, -0.5 * size.height, 0.0); + plane->SetPoint1( 0.5 * size.width, -0.5 * size.height, 0.0); + plane->SetPoint2(-0.5 * size.width, 0.5 * size.height, 0.0); +} + +template<> cv::viz::WImage3D cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// camera position widget implementation + +namespace cv { namespace viz { namespace +{ + struct CameraPositionUtils + { + static vtkSmartPointer createFrustum(double aspect_ratio, double fovy, double scale) + { + vtkSmartPointer camera = vtkSmartPointer::New(); + camera->SetViewAngle(fovy); + camera->SetPosition(0.0, 0.0, 0.0); + camera->SetViewUp(0.0, 1.0, 0.0); + camera->SetFocalPoint(0.0, 0.0, 1.0); + camera->SetClippingRange(1e-9, scale); + + double planes_array[24]; + camera->GetFrustumPlanes(aspect_ratio, planes_array); + + vtkSmartPointer planes = vtkSmartPointer::New(); + planes->SetFrustumPlanes(planes_array); + + vtkSmartPointer frustumSource = vtkSmartPointer::New(); + frustumSource->SetPlanes(planes); + + vtkSmartPointer extract_edges = vtkSmartPointer::New(); + extract_edges->SetInputConnection(frustumSource->GetOutputPort()); + extract_edges->Update(); + + return extract_edges->GetOutput(); + } + + static Mat ensureColorImage(InputArray image) + { + Mat color(image.size(), CV_8UC3); + if (image.channels() == 1) + { + Vec3b *drow = color.ptr(); + for(int y = 0; y < color.rows; ++y) + { + const unsigned char *srow = image.getMat().ptr(y); + const unsigned char *send = srow + color.cols; + for(;srow < send;) + *drow++ = Vec3b::all(*srow++); + } + } + else + image.copyTo(color); + return color; + } + }; +}}} + +cv::viz::WCameraPosition::WCameraPosition(double scale) +{ + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, getPolyData(WCoordinateSystem(scale))); + mapper->SetScalarModeToUsePointData(); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WCameraPosition::WCameraPosition(const Matx33d &K, double scale, const Color &color) +{ + double f_x = K(0,0), f_y = K(1,1), c_y = K(1,2); + + // Assuming that this is an ideal camera (c_y and c_x are at the center of the image) + double fovy = 2.0 * atan2(c_y, f_y) * 180 / CV_PI; + double aspect_ratio = f_y / f_x; + + vtkSmartPointer polydata = CameraPositionUtils::createFrustum(aspect_ratio, fovy, scale); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WCameraPosition::WCameraPosition(const Vec2d &fov, double scale, const Color &color) +{ + double aspect_ratio = tan(fov[0] * 0.5) / tan(fov[1] * 0.5); + double fovy = fov[1] * 180 / CV_PI; + + vtkSmartPointer polydata = CameraPositionUtils::createFrustum(aspect_ratio, fovy, scale); + VtkUtils::FillScalars(polydata, color); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, polydata); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WCameraPosition::WCameraPosition(const Matx33d &K, InputArray _image, double scale, const Color &color) +{ + CV_Assert(!_image.empty() && _image.depth() == CV_8U); + Mat image = CameraPositionUtils::ensureColorImage(_image); + image.at(0, 0) = Vec3d(color.val); //workaround of VTK limitation + + double f_y = K(1,1), c_y = K(1,2); + // Assuming that this is an ideal camera (c_y and c_x are at the center of the image) + double fovy = 2.0 * atan2(c_y, f_y) * 180.0 / CV_PI; + double far_end_height = 2.00 * c_y * scale / f_y; + double aspect_ratio = image.cols/(double)image.rows; + double image_scale = far_end_height/image.rows; + + WImage3D image_widget(image, Size2d(image.size()) * image_scale); + image_widget.applyTransform(Affine3d().translate(Vec3d(0, 0, scale))); + vtkSmartPointer plane = getPolyData(image_widget); + + vtkSmartPointer frustum = CameraPositionUtils::createFrustum(aspect_ratio, fovy, scale); + + // Frustum needs to be textured or else it can't be combined with image + vtkSmartPointer frustum_texture = vtkSmartPointer::New(); + VtkUtils::SetInputData(frustum_texture, frustum); + frustum_texture->SetSRange(0.0, 0.0); // Texture mapping with only one pixel + frustum_texture->SetTRange(0.0, 0.0); // from the image to have constant color + + vtkSmartPointer append_filter = vtkSmartPointer::New(); + append_filter->AddInputConnection(frustum_texture->GetOutputPort()); + VtkUtils::AddInputData(append_filter, plane); + + vtkSmartPointer actor = getActor(image_widget); + actor->GetMapper()->SetInputConnection(append_filter->GetOutputPort()); + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WCameraPosition::WCameraPosition(const Vec2d &fov, InputArray _image, double scale, const Color &color) +{ + CV_Assert(!_image.empty() && _image.depth() == CV_8U); + Mat image = CameraPositionUtils::ensureColorImage(_image); + image.at(0, 0) = Vec3d(color.val); //workaround of VTK limitation + + double fovy = fov[1] * 180.0 / CV_PI; + double far_end_height = 2.0 * scale * tan(fov[1] * 0.5); + double aspect_ratio = image.cols/(double)image.rows; + double image_scale = far_end_height/image.rows; + + WImage3D image_widget(image, Size2d(image.size()) * image_scale); + image_widget.applyTransform(Affine3d().translate(Vec3d(0, 0, scale))); + vtkSmartPointer plane = getPolyData(image_widget); + + vtkSmartPointer frustum = CameraPositionUtils::createFrustum(aspect_ratio, fovy, scale); + + // Frustum needs to be textured or else it can't be combined with image + vtkSmartPointer frustum_texture = vtkSmartPointer::New(); + VtkUtils::SetInputData(frustum_texture, frustum); + frustum_texture->SetSRange(0.0, 0.0); // Texture mapping with only one pixel + frustum_texture->SetTRange(0.0, 0.0); // from the image to have constant color + + vtkSmartPointer append_filter = vtkSmartPointer::New(); + append_filter->AddInputConnection(frustum_texture->GetOutputPort()); + VtkUtils::AddInputData(append_filter, plane); + + vtkSmartPointer actor = getActor(image_widget); + actor->GetMapper()->SetInputConnection(append_filter->GetOutputPort()); + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WCameraPosition cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// trajectory widget implementation + +cv::viz::WTrajectory::WTrajectory(InputArray _path, int display_mode, double scale, const Color &color) +{ + vtkSmartPointer append_filter = vtkSmartPointer::New(); + + // Bitwise and with 3 in order to limit the domain to 2 bits + if (display_mode & WTrajectory::PATH) + { + Mat points = vtkTrajectorySource::ExtractPoints(_path); + vtkSmartPointer polydata = getPolyData(WPolyLine(points, color)); + VtkUtils::AddInputData(append_filter, polydata); + } + + if (display_mode & WTrajectory::FRAMES) + { + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetTrajectory(_path); + + vtkSmartPointer glyph = getPolyData(WCoordinateSystem(scale)); + + vtkSmartPointer tensor_glyph = vtkSmartPointer::New(); + tensor_glyph->SetInputConnection(source->GetOutputPort()); + VtkUtils::SetSourceData(tensor_glyph, glyph); + tensor_glyph->ExtractEigenvaluesOff(); // Treat as a rotation matrix, not as something with eigenvalues + tensor_glyph->ThreeGlyphsOff(); + tensor_glyph->SymmetricOff(); + tensor_glyph->ColorGlyphsOff(); + + append_filter->AddInputConnection(tensor_glyph->GetOutputPort()); + } + append_filter->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, append_filter->GetOutput()); + mapper->SetScalarModeToUsePointData(); + mapper->SetScalarRange(0, 255); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WTrajectory cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// WTrajectoryFrustums widget implementation + +cv::viz::WTrajectoryFrustums::WTrajectoryFrustums(InputArray _path, const Matx33d &K, double scale, const Color &color) +{ + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetTrajectory(_path); + + vtkSmartPointer glyph = getPolyData(WCameraPosition(K, scale)); + VtkUtils::FillScalars(glyph, color); + + vtkSmartPointer tensor_glyph = vtkSmartPointer::New(); + tensor_glyph->SetInputConnection(source->GetOutputPort()); + VtkUtils::SetSourceData(tensor_glyph, glyph); + tensor_glyph->ExtractEigenvaluesOff(); // Treat as a rotation matrix, not as something with eigenvalues + tensor_glyph->ThreeGlyphsOff(); + tensor_glyph->SymmetricOff(); + tensor_glyph->ColorGlyphsOff(); + tensor_glyph->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, tensor_glyph->GetOutput()); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +cv::viz::WTrajectoryFrustums::WTrajectoryFrustums(InputArray _path, const Vec2d &fov, double scale, const Color &color) +{ + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetTrajectory(_path); + + vtkSmartPointer glyph = getPolyData(WCameraPosition(fov, scale)); + VtkUtils::FillScalars(glyph, color); + + vtkSmartPointer tensor_glyph = vtkSmartPointer::New(); + tensor_glyph->SetInputConnection(source->GetOutputPort()); + VtkUtils::SetSourceData(tensor_glyph, glyph); + tensor_glyph->ExtractEigenvaluesOff(); // Treat as a rotation matrix, not as something with eigenvalues + tensor_glyph->ThreeGlyphsOff(); + tensor_glyph->SymmetricOff(); + tensor_glyph->ColorGlyphsOff(); + tensor_glyph->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + VtkUtils::SetInputData(mapper, tensor_glyph->GetOutput()); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WTrajectoryFrustums cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// WTrajectorySpheres widget implementation + +cv::viz::WTrajectorySpheres::WTrajectorySpheres(InputArray _path, double line_length, double radius, const Color &from, const Color &to) +{ + CV_Assert(_path.kind() == _InputArray::STD_VECTOR || _path.kind() == _InputArray::MAT); + CV_Assert(_path.type() == CV_32FC(16) || _path.type() == CV_64FC(16)); + + Mat path64; + _path.getMat().convertTo(path64, CV_64F); + Affine3d *traj = path64.ptr(); + size_t total = path64.total(); + + vtkSmartPointer append_filter = vtkSmartPointer::New(); + + for(size_t i = 0; i < total; ++i) + { + Vec3d curr = traj[i].translation(); + + vtkSmartPointer sphere_source = vtkSmartPointer::New(); + sphere_source->SetCenter(curr.val); + sphere_source->SetRadius( (i == 0) ? 2 * radius : radius ); + sphere_source->Update(); + + double alpha = static_cast(i)/total; + Color c = from * (1 - alpha) + to * alpha; + + vtkSmartPointer polydata = sphere_source->GetOutput(); + polydata->GetCellData()->SetScalars(VtkUtils::FillScalars(polydata->GetNumberOfCells(), c)); + VtkUtils::AddInputData(append_filter, polydata); + + if (i > 0) + { + Vec3d prev = traj[i-1].translation(); + Vec3d lvec = prev - curr; + + if(norm(lvec) > line_length) + lvec = normalize(lvec) * line_length; + + Vec3d lend = curr + lvec; + + vtkSmartPointer line_source = vtkSmartPointer::New(); + line_source->SetPoint1(curr.val); + line_source->SetPoint2(lend.val); + line_source->Update(); + vtkSmartPointer polydata_ = line_source->GetOutput(); + polydata_->GetCellData()->SetScalars(VtkUtils::FillScalars(polydata_->GetNumberOfCells(), c)); + VtkUtils::AddInputData(append_filter, polydata_); + } + } + append_filter->Update(); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetScalarModeToUseCellData(); + VtkUtils::SetInputData(mapper, append_filter->GetOutput()); + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->SetMapper(mapper); + + WidgetAccessor::setProp(*this, actor); +} + +template<> cv::viz::WTrajectorySpheres cv::viz::Widget::cast() const +{ + Widget3D widget = this->cast(); + return static_cast(widget); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/types.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/types.cpp new file mode 100644 index 00000000..e9a470cf --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/types.cpp @@ -0,0 +1,230 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + +//////////////////////////////////////////////////////////////////// +/// Events + +cv::viz::KeyboardEvent::KeyboardEvent(Action _action, const String& _symbol, unsigned char _code, int _modifiers) + : action(_action), symbol(_symbol), code(_code), modifiers(_modifiers) {} + +cv::viz::MouseEvent::MouseEvent(const Type& _type, const MouseButton& _button, const Point& _pointer, int _modifiers) + : type(_type), button(_button), pointer(_pointer), modifiers(_modifiers) {} + +//////////////////////////////////////////////////////////////////// +/// cv::viz::Mesh3d + +cv::viz::Mesh cv::viz::Mesh::load(const String& file, int type) +{ + vtkSmartPointer reader = vtkSmartPointer::New(); + switch (type) { + case LOAD_AUTO: + { + CV_Error(Error::StsError, "cv::viz::Mesh::LOAD_AUTO: Not implemented yet"); + } + case LOAD_PLY: + { + vtkSmartPointer ply_reader = vtkSmartPointer::New(); + ply_reader->SetFileName(file.c_str()); + ply_reader->Update(); + reader = ply_reader; + break; + } + case LOAD_OBJ: + { + vtkSmartPointer obj_reader = vtkSmartPointer::New(); + obj_reader->SetFileName(file.c_str()); + obj_reader->Update(); + reader = obj_reader; + break; + } + default: + CV_Error(Error::StsError, "cv::viz::Mesh::load: Unknown file type"); + } + + vtkSmartPointer polydata = reader->GetOutput(); + CV_Assert("File does not exist or file format is not supported." && polydata); + + Mesh mesh; + vtkSmartPointer sink = vtkSmartPointer::New(); + sink->SetOutput(mesh.cloud, mesh.colors, mesh.normals, mesh.tcoords); + sink->SetInputConnection(reader->GetOutputPort()); + sink->Write(); + + // Now handle the polygons + vtkSmartPointer polygons = polydata->GetPolys(); + mesh.polygons.create(1, polygons->GetSize(), CV_32SC1); + mesh.polygons = 0; + int* poly_ptr = mesh.polygons.ptr(); + + polygons->InitTraversal(); + vtkIdType nr_cell_points; + CellIterT cell_points; + while (polygons->GetNextCell(nr_cell_points, cell_points)) + { + *poly_ptr++ = nr_cell_points; + for (vtkIdType i = 0; i < nr_cell_points; ++i) + *poly_ptr++ = (int)cell_points[i]; + } + + return mesh; +} + +//////////////////////////////////////////////////////////////////// +/// Camera implementation + +cv::viz::Camera::Camera(double fx, double fy, double cx, double cy, const Size &window_size) +{ + init(fx, fy, cx, cy, window_size); +} + +cv::viz::Camera::Camera(const Vec2d &fov, const Size &window_size) +{ + CV_Assert(window_size.width > 0 && window_size.height > 0); + setClip(Vec2d(0.01, 1000.01)); // Default clipping + setFov(fov); + window_size_ = window_size; + // Principal point at the center + principal_point_ = Vec2f(static_cast(window_size.width)*0.5f, static_cast(window_size.height)*0.5f); + focal_ = Vec2f(principal_point_[0] / tan(fov_[0]*0.5f), principal_point_[1] / tan(fov_[1]*0.5f)); +} + +cv::viz::Camera::Camera(const cv::Matx33d & K, const Size &window_size) +{ + double f_x = K(0,0); + double f_y = K(1,1); + double c_x = K(0,2); + double c_y = K(1,2); + init(f_x, f_y, c_x, c_y, window_size); +} + +cv::viz::Camera::Camera(const Matx44d &proj, const Size &window_size) +{ + CV_Assert(window_size.width > 0 && window_size.height > 0); + + double near = proj(2,3) / (proj(2,2) - 1.0); + double far = near * (proj(2,2) - 1.0) / (proj(2,2) + 1.0); + double left = near * (proj(0,2)-1) / proj(0,0); + double right = 2.0 * near / proj(0,0) + left; + double bottom = near * (proj(1,2)-1) / proj(1,1); + double top = 2.0 * near / proj(1,1) + bottom; + + double epsilon = 2.2204460492503131e-16; + + principal_point_[0] = fabs(left-right) < epsilon ? window_size.width * 0.5 : (left * window_size.width) / (left - right); + principal_point_[1] = fabs(top-bottom) < epsilon ? window_size.height * 0.5 : (top * window_size.height) / (top - bottom); + + focal_[0] = -near * principal_point_[0] / left; + focal_[1] = near * principal_point_[1] / top; + + setClip(Vec2d(near, far)); + fov_[0] = atan2(principal_point_[0], focal_[0]) + atan2(window_size.width-principal_point_[0], focal_[0]); + fov_[1] = atan2(principal_point_[1], focal_[1]) + atan2(window_size.height-principal_point_[1], focal_[1]); + + window_size_ = window_size; +} + +void cv::viz::Camera::init(double fx, double fy, double cx, double cy, const Size &window_size) +{ + CV_Assert(window_size.width > 0 && window_size.height > 0); + setClip(Vec2d(0.01, 1000.01));// Default clipping + + fov_[0] = atan2(cx, fx) + atan2(window_size.width - cx, fx); + fov_[1] = atan2(cy, fy) + atan2(window_size.height - cy, fy); + + principal_point_[0] = cx; + principal_point_[1] = cy; + + focal_[0] = fx; + focal_[1] = fy; + + window_size_ = window_size; +} + +void cv::viz::Camera::setWindowSize(const Size &window_size) +{ + CV_Assert(window_size.width > 0 && window_size.height > 0); + + // Get the scale factor and update the principal points + float scalex = static_cast(window_size.width) / static_cast(window_size_.width); + float scaley = static_cast(window_size.height) / static_cast(window_size_.height); + + principal_point_[0] *= scalex; + principal_point_[1] *= scaley; + focal_ *= scaley; + // Vertical field of view is fixed! Update horizontal field of view + fov_[0] = (atan2(principal_point_[0],focal_[0]) + atan2(window_size.width-principal_point_[0],focal_[0])); + + window_size_ = window_size; +} + +void cv::viz::Camera::computeProjectionMatrix(Matx44d &proj) const +{ + double top = clip_[0] * principal_point_[1] / focal_[1]; + double left = -clip_[0] * principal_point_[0] / focal_[0]; + double right = clip_[0] * (window_size_.width - principal_point_[0]) / focal_[0]; + double bottom = -clip_[0] * (window_size_.height - principal_point_[1]) / focal_[1]; + + double temp1 = 2.0 * clip_[0]; + double temp2 = 1.0 / (right - left); + double temp3 = 1.0 / (top - bottom); + double temp4 = 1.0 / (clip_[0] - clip_[1]); + + proj = Matx44d::zeros(); + proj(0,0) = temp1 * temp2; + proj(1,1) = temp1 * temp3; + proj(0,2) = (right + left) * temp2; + proj(1,2) = (top + bottom) * temp3; + proj(2,2) = (clip_[1]+clip_[0]) * temp4; + proj(3,2) = -1.0; + proj(2,3) = (temp1 * clip_[1]) * temp4; +} + +cv::viz::Camera cv::viz::Camera::KinectCamera(const Size &window_size) +{ + Matx33d K(525.0, 0.0, 320.0, 0.0, 525.0, 240.0, 0.0, 0.0, 1.0); + return Camera(K, window_size); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/viz3d.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/viz3d.cpp new file mode 100644 index 00000000..62e570e6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/viz3d.cpp @@ -0,0 +1,156 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + +cv::viz::Viz3d::Viz3d(const String& window_name) : impl_(0) { create(window_name); } + +cv::viz::Viz3d::Viz3d(const Viz3d& other) : impl_(other.impl_) +{ + if (impl_) + CV_XADD(&impl_->ref_counter, 1); +} + +cv::viz::Viz3d& cv::viz::Viz3d::operator=(const Viz3d& other) +{ + if (this != &other) + { + release(); + impl_ = other.impl_; + if (impl_) + CV_XADD(&impl_->ref_counter, 1); + } + return *this; +} + +cv::viz::Viz3d::~Viz3d() { release(); } + +void cv::viz::Viz3d::create(const String &window_name) +{ + if (impl_) + release(); + + if (VizStorage::windowExists(window_name)) + *this = VizStorage::get(window_name); + else + { + impl_ = new VizImpl(window_name); + impl_->ref_counter = 1; + + // Register the window + VizStorage::add(*this); + } +} + +void cv::viz::Viz3d::release() +{ + if (impl_ && CV_XADD(&impl_->ref_counter, -1) == 1) + { + delete impl_; + impl_ = 0; + } + + if (impl_ && impl_->ref_counter == 1) + VizStorage::removeUnreferenced(); + + impl_ = 0; +} + +void cv::viz::Viz3d::spin() { impl_->spin(); } +void cv::viz::Viz3d::spinOnce(int time, bool force_redraw) { impl_->spinOnce(time, force_redraw); } +void cv::viz::Viz3d::setOffScreenRendering() { impl_->setOffScreenRendering(); } +void cv::viz::Viz3d::removeAllLights() { impl_->removeAllLights(); } +void cv::viz::Viz3d::addLight(const Vec3d &position, const Vec3d &focalPoint, const Color &color, + const Color &diffuseColor, const Color &ambientColor, const Color &specularColor) +{ impl_->addLight(position, focalPoint, color, diffuseColor, ambientColor, specularColor); } +bool cv::viz::Viz3d::wasStopped() const { return impl_->wasStopped(); } +void cv::viz::Viz3d::close() { impl_->close(); } + +void cv::viz::Viz3d::registerKeyboardCallback(KeyboardCallback callback, void* cookie) +{ impl_->registerKeyboardCallback(callback, cookie); } + +void cv::viz::Viz3d::registerMouseCallback(MouseCallback callback, void* cookie) +{ impl_->registerMouseCallback(callback, cookie); } + +void cv::viz::Viz3d::showWidget(const String &id, const Widget &widget, const Affine3d &pose) { impl_->showWidget(id, widget, pose); } +void cv::viz::Viz3d::removeWidget(const String &id) { impl_->removeWidget(id); } +cv::viz::Widget cv::viz::Viz3d::getWidget(const String &id) const { return impl_->getWidget(id); } +void cv::viz::Viz3d::removeAllWidgets() { impl_->removeAllWidgets(); } + +void cv::viz::Viz3d::showImage(InputArray image, const Size& window_size) { impl_->showImage(image, window_size); } + +void cv::viz::Viz3d::setWidgetPose(const String &id, const Affine3d &pose) { impl_->setWidgetPose(id, pose); } +void cv::viz::Viz3d::updateWidgetPose(const String &id, const Affine3d &pose) { impl_->updateWidgetPose(id, pose); } +cv::Affine3d cv::viz::Viz3d::getWidgetPose(const String &id) const { return impl_->getWidgetPose(id); } + +void cv::viz::Viz3d::setCamera(const Camera &camera) { impl_->setCamera(camera); } +cv::viz::Camera cv::viz::Viz3d::getCamera() const { return impl_->getCamera(); } +void cv::viz::Viz3d::setViewerPose(const Affine3d &pose) { impl_->setViewerPose(pose); } +cv::Affine3d cv::viz::Viz3d::getViewerPose() const { return impl_->getViewerPose(); } + +void cv::viz::Viz3d::resetCameraViewpoint(const String &id) { impl_->resetCameraViewpoint(id); } +void cv::viz::Viz3d::resetCamera() { impl_->resetCamera(); } + +void cv::viz::Viz3d::convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord) { impl_->convertToWindowCoordinates(pt, window_coord); } +void cv::viz::Viz3d::converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction) { impl_->converTo3DRay(window_coord, origin, direction); } + +cv::Size cv::viz::Viz3d::getWindowSize() const { return impl_->getWindowSize(); } +void cv::viz::Viz3d::setWindowSize(const Size &window_size) { impl_->setWindowSize(window_size); } +cv::String cv::viz::Viz3d::getWindowName() const { return impl_->getWindowName(); } +cv::Mat cv::viz::Viz3d::getScreenshot() const { return impl_->getScreenshot(); } +void cv::viz::Viz3d::saveScreenshot(const String &file) { impl_->saveScreenshot(file); } +void cv::viz::Viz3d::setWindowPosition(const Point& window_position) { impl_->setWindowPosition(window_position); } +void cv::viz::Viz3d::setFullScreen(bool mode) { impl_->setFullScreen(mode); } +void cv::viz::Viz3d::setBackgroundColor(const Color& color, const Color& color2) { impl_->setBackgroundColor(color, color2); } + +void cv::viz::Viz3d::setBackgroundTexture(InputArray image) { impl_->setBackgroundTexture(image); } +void cv::viz::Viz3d::setBackgroundMeshLab() {impl_->setBackgroundMeshLab(); } + +void cv::viz::Viz3d::setRenderingProperty(const String &id, int property, double value) { getWidget(id).setRenderingProperty(property, value); } +double cv::viz::Viz3d::getRenderingProperty(const String &id, int property) { return getWidget(id).getRenderingProperty(property); } + +void cv::viz::Viz3d::setRepresentation(int representation) { impl_->setRepresentation(representation); } + +void cv::viz::Viz3d::setGlobalWarnings(bool enabled) { vtkObject::SetGlobalWarningDisplay(enabled ? 1 : 0); } diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizcore.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizcore.cpp new file mode 100644 index 00000000..a0ca4980 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizcore.cpp @@ -0,0 +1,352 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + +cv::Affine3d cv::viz::makeTransformToGlobal(const Vec3d& axis_x, const Vec3d& axis_y, const Vec3d& axis_z, const Vec3d& origin) +{ + Affine3d::Mat3 R(axis_x[0], axis_y[0], axis_z[0], + axis_x[1], axis_y[1], axis_z[1], + axis_x[2], axis_y[2], axis_z[2]); + + return Affine3d(R, origin); +} + +cv::Affine3d cv::viz::makeCameraPose(const Vec3d& position, const Vec3d& focal_point, const Vec3d& y_dir) +{ + // Compute the transformation matrix for drawing the camera frame in a scene + Vec3d n = normalize(focal_point - position); + Vec3d u = normalize(y_dir.cross(n)); + Vec3d v = n.cross(u); + + return makeTransformToGlobal(u, v, n, position); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// VizStorage implementation + +#if defined(_WIN32) && !defined(__CYGWIN__) + + #include + + static BOOL WINAPI ConsoleHandlerRoutine(DWORD /*dwCtrlType*/) + { + vtkObject::GlobalWarningDisplayOff(); + return FALSE; + } + + static void register_console_handler() + { + HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE); + CONSOLE_SCREEN_BUFFER_INFO hOutInfo; + if (GetConsoleScreenBufferInfo(hOut, &hOutInfo)) + SetConsoleCtrlHandler(ConsoleHandlerRoutine, TRUE); + } + +#else + + void register_console_handler(); + void register_console_handler() {} + +#endif + + +cv::viz::VizStorage cv::viz::VizStorage::init; +cv::viz::VizMap cv::viz::VizStorage::storage; + +void cv::viz::VizMap::replace_clear() { type().swap(m); } +cv::viz::VizMap::~VizMap() { replace_clear(); } + +cv::viz::VizStorage::VizStorage() +{ + register_console_handler(); +} +void cv::viz::VizStorage::unregisterAll() { storage.replace_clear(); } + +cv::viz::Viz3d& cv::viz::VizStorage::get(const String &window_name) +{ + String name = generateWindowName(window_name); + VizMap::iterator vm_itr = storage.m.find(name); + CV_Assert(vm_itr != storage.m.end()); + return vm_itr->second; +} + +void cv::viz::VizStorage::add(const Viz3d& window) +{ + String window_name = window.getWindowName(); + VizMap::iterator vm_itr = storage.m.find(window_name); + CV_Assert(vm_itr == storage.m.end()); + storage.m.insert(std::make_pair(window_name, window)); +} + +bool cv::viz::VizStorage::windowExists(const String &window_name) +{ + String name = generateWindowName(window_name); + return storage.m.find(name) != storage.m.end(); +} + +void cv::viz::VizStorage::removeUnreferenced() +{ + for(VizMap::iterator pos = storage.m.begin(); pos != storage.m.end();) + if(pos->second.impl_->ref_counter == 1) + storage.m.erase(pos++); + else + ++pos; +} + +cv::String cv::viz::VizStorage::generateWindowName(const String &window_name) +{ + String output = "Viz"; + // Already is Viz + if (window_name == output) + return output; + + String prefixed = output + " - "; + if (window_name.substr(0, prefixed.length()) == prefixed) + output = window_name; // Already has "Viz - " + else if (window_name.substr(0, output.length()) == output) + output = prefixed + window_name; // Doesn't have prefix + else + output = (window_name == "" ? output : prefixed + window_name); + + return output; +} + +cv::viz::Viz3d cv::viz::getWindowByName(const String &window_name) { return Viz3d (window_name); } +void cv::viz::unregisterAllWindows() { VizStorage::unregisterAll(); } + +cv::viz::Viz3d cv::viz::imshow(const String& window_name, InputArray image, const Size& window_size) +{ + Viz3d viz = getWindowByName(window_name); + viz.showImage(image, window_size); + return viz; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Read/write clouds. Supported formats: ply, stl, xyz, obj + +void cv::viz::writeCloud(const String& file, InputArray cloud, InputArray colors, InputArray normals, bool binary) +{ + CV_Assert(file.size() > 4 && "Extension is required"); + String extension = file.substr(file.size()-4); + + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetColorCloudNormals(cloud, colors, normals); + + vtkSmartPointer writer; + if (extension == ".xyz") + { + writer = vtkSmartPointer::New(); + vtkXYZWriter::SafeDownCast(writer)->SetFileName(file.c_str()); + } + else if (extension == ".ply") + { + writer = vtkSmartPointer::New(); + vtkPLYWriter::SafeDownCast(writer)->SetFileName(file.c_str()); + vtkPLYWriter::SafeDownCast(writer)->SetFileType(binary ? VTK_BINARY : VTK_ASCII); + vtkPLYWriter::SafeDownCast(writer)->SetArrayName("Colors"); + } + else if (extension == ".obj") + { + writer = vtkSmartPointer::New(); + vtkOBJWriter::SafeDownCast(writer)->SetFileName(file.c_str()); + } + else + CV_Error(Error::StsError, "Unsupported format"); + + writer->SetInputConnection(source->GetOutputPort()); + writer->Write(); +} + +cv::Mat cv::viz::readCloud(const String& file, OutputArray colors, OutputArray normals) +{ + CV_Assert(file.size() > 4 && "Extension is required"); + String extension = file.substr(file.size()-4); + + vtkSmartPointer reader; + if (extension == ".xyz") + { + reader = vtkSmartPointer::New(); + vtkXYZReader::SafeDownCast(reader)->SetFileName(file.c_str()); + } + else if (extension == ".ply") + { + reader = vtkSmartPointer::New(); + CV_Assert(vtkPLYReader::CanReadFile(file.c_str())); + vtkPLYReader::SafeDownCast(reader)->SetFileName(file.c_str()); + } + else if (extension == ".obj") + { + reader = vtkSmartPointer::New(); + vtkOBJReader::SafeDownCast(reader)->SetFileName(file.c_str()); + } + else if (extension == ".stl") + { + reader = vtkSmartPointer::New(); + vtkSTLReader::SafeDownCast(reader)->SetFileName(file.c_str()); + } + else + CV_Error(Error::StsError, "Unsupported format"); + + cv::Mat cloud; + + vtkSmartPointer sink = vtkSmartPointer::New(); + sink->SetInputConnection(reader->GetOutputPort()); + sink->SetOutput(cloud, colors, normals); + sink->Write(); + + return cloud; +} + +cv::viz::Mesh cv::viz::readMesh(const String& file) { return Mesh::load(file); } + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Read/write poses and trajectories + +bool cv::viz::readPose(const String& file, Affine3d& pose, const String& tag) +{ + FileStorage fs(file, FileStorage::READ); + if (!fs.isOpened()) + return false; + + Mat hdr(pose.matrix, false); + fs[tag] >> hdr; + if (hdr.empty() || hdr.cols != pose.matrix.cols || hdr.rows != pose.matrix.rows) + return false; + + hdr.convertTo(pose.matrix, CV_64F); + return true; +} + +void cv::viz::writePose(const String& file, const Affine3d& pose, const String& tag) +{ + FileStorage fs(file, FileStorage::WRITE); + fs << tag << Mat(pose.matrix, false); +} + +void cv::viz::readTrajectory(OutputArray _traj, const String& files_format, int start, int end, const String& tag) +{ + CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); + + start = max(0, std::min(start, end)); + end = std::max(start, end); + + std::vector traj; + + for(int i = start; i < end; ++i) + { + Affine3d affine; + bool ok = readPose(cv::format(files_format.c_str(), i), affine, tag); + if (!ok) + break; + + traj.push_back(affine); + } + + Mat(traj).convertTo(_traj, _traj.depth()); +} + +void cv::viz::writeTrajectory(InputArray _traj, const String& files_format, int start, const String& tag) +{ + if (_traj.kind() == _InputArray::STD_VECTOR_MAT) + { +#if CV_MAJOR_VERSION < 3 + std::vector& v = *(std::vector*)_traj.obj; +#else + std::vector& v = *(std::vector*)_traj.getObj(); +#endif + + for(size_t i = 0, index = max(0, start); i < v.size(); ++i, ++index) + { + Affine3d affine; + Mat pose = v[i]; + CV_Assert(pose.type() == CV_32FC(16) || pose.type() == CV_64FC(16)); + pose.copyTo(affine.matrix); + writePose(cv::format(files_format.c_str(), index), affine, tag); + } + return; + } + + if (_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT) + { + CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); + + Mat traj = _traj.getMat(); + + if (traj.depth() == CV_32F) + for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index) + writePose(cv::format(files_format.c_str(), index), traj.at((int)i), tag); + + if (traj.depth() == CV_64F) + for(size_t i = 0, index = max(0, start); i < traj.total(); ++i, ++index) + writePose(cv::format(files_format.c_str(), index), traj.at((int)i), tag); + return; + } + + CV_Error(Error::StsError, "Unsupported array kind"); +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// Computing normals for mesh + +void cv::viz::computeNormals(const Mesh& mesh, OutputArray _normals) +{ + vtkSmartPointer polydata = getPolyData(WMesh(mesh)); + vtkSmartPointer with_normals = VtkUtils::ComputeNormals(polydata); + + vtkSmartPointer generic_normals = with_normals->GetPointData()->GetNormals(); + if(generic_normals) + { + Mat normals(1, generic_normals->GetNumberOfTuples(), CV_64FC3); + Vec3d *optr = normals.ptr(); + + for(int i = 0; i < generic_normals->GetNumberOfTuples(); ++i, ++optr) + generic_normals->GetTuple(i, optr->val); + + normals.convertTo(_normals, mesh.cloud.type()); + } + else + _normals.release(); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizimpl.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizimpl.cpp new file mode 100644 index 00000000..2c7ce997 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizimpl.cpp @@ -0,0 +1,631 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + + +///////////////////////////////////////////////////////////////////////////////////////////// +cv::viz::Viz3d::VizImpl::VizImpl(const String &name) : spin_once_state_(false), + window_position_(Vec2i(std::numeric_limits::min())), widget_actor_map_(new WidgetActorMap) +{ + renderer_ = vtkSmartPointer::New(); + window_name_ = VizStorage::generateWindowName(name); + + // Create render window + window_ = vtkSmartPointer::New(); + int * sz = window_->GetScreenSize(); + if (sz) + { + cv::Vec2i window_size = cv::Vec2i(sz) / 2; + window_->SetSize(window_size.val); + } + else + { + int new_sz[2] = { 640, 480 }; + window_->SetSize(new_sz); + } + window_->AddRenderer(renderer_); + + // Create the interactor style + style_ = vtkSmartPointer::New(); + style_->setWidgetActorMap(widget_actor_map_); + style_->UseTimersOn(); + + timer_callback_ = vtkSmartPointer::New(); + exit_callback_ = vtkSmartPointer::New(); + exit_callback_->viz = this; + + offScreenMode_ = false; + + setBackgroundMeshLab(); +} + +cv::viz::Viz3d::VizImpl::~VizImpl() { close(); } + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::TimerCallback::Execute(vtkObject* caller, unsigned long event_id, void* cookie) +{ + if (event_id == vtkCommand::TimerEvent && timer_id == *reinterpret_cast(cookie)) + { + vtkSmartPointer interactor = vtkRenderWindowInteractor::SafeDownCast(caller); + interactor->TerminateApp(); + } +} + +void cv::viz::Viz3d::VizImpl::ExitCallback::Execute(vtkObject*, unsigned long event_id, void*) +{ + if (event_id == vtkCommand::ExitEvent && viz->interactor_) + { + viz->interactor_->TerminateApp(); + viz->interactor_ = 0; + } +} + +///////////////////////////////////////////////////////////////////////////////////////////// + +bool cv::viz::Viz3d::VizImpl::wasStopped() const +{ + bool stopped = spin_once_state_ ? interactor_ == 0 : false; + spin_once_state_ &= !stopped; + return stopped; +} + +void cv::viz::Viz3d::VizImpl::close() +{ + if (!interactor_) + return; + interactor_->GetRenderWindow()->Finalize(); + interactor_->TerminateApp(); // This tends to close the window... + interactor_ = 0; +} + +void cv::viz::Viz3d::VizImpl::recreateRenderWindow() +{ +#if !defined _MSC_VER && !defined __APPLE__ + //recreating is workaround for Ubuntu -- a crash in x-server + Vec2i window_size(window_->GetSize()); + int fullscreen = window_->GetFullScreen(); + + window_->Finalize(); + window_ = vtkSmartPointer::New(); + if (window_position_[0] != std::numeric_limits::min()) //also workaround + window_->SetPosition(window_position_.val); + + window_->SetSize(window_size.val); + window_->SetFullScreen(fullscreen); + window_->AddRenderer(renderer_); +#endif +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::spin() +{ + recreateRenderWindow(); +#if defined __APPLE__ + interactor_ = vtkCocoaRenderWindowInteractorNew(); +#else + interactor_ = vtkSmartPointer::New(); +#endif + interactor_->SetRenderWindow(window_); + interactor_->SetInteractorStyle(style_); + window_->AlphaBitPlanesOff(); + window_->PointSmoothingOff(); + window_->LineSmoothingOff(); + window_->PolygonSmoothingOff(); + window_->SwapBuffersOn(); + window_->SetStereoTypeToAnaglyph(); + window_->Render(); + window_->SetWindowName(window_name_.c_str()); + interactor_->Start(); + interactor_ = 0; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::spinOnce(int time, bool force_redraw) +{ + if (interactor_ == 0) + { + spin_once_state_ = true; + recreateRenderWindow(); +#if defined __APPLE__ + interactor_ = vtkCocoaRenderWindowInteractorNew(); +#else + interactor_ = vtkSmartPointer::New(); +#endif + interactor_->SetRenderWindow(window_); + interactor_->SetInteractorStyle(style_); + interactor_->AddObserver(vtkCommand::TimerEvent, timer_callback_); + interactor_->AddObserver(vtkCommand::ExitEvent, exit_callback_); + window_->AlphaBitPlanesOff(); + window_->PointSmoothingOff(); + window_->LineSmoothingOff(); + window_->PolygonSmoothingOff(); + window_->SwapBuffersOn(); + window_->SetStereoTypeToAnaglyph(); + window_->Render(); + window_->SetWindowName(window_name_.c_str()); + } + + vtkSmartPointer local = interactor_; + + if (force_redraw) + local->Render(); + + timer_callback_->timer_id = local->CreateRepeatingTimer(std::max(1, time)); + local->Start(); + local->DestroyTimer(timer_callback_->timer_id); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setOffScreenRendering() +{ + window_->SetOffScreenRendering(1); + offScreenMode_ = true; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::removeAllLights() +{ + renderer_->RemoveAllLights(); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::addLight(Vec3d position, Vec3d focalPoint, const Color &color, const Color &diffuseColor, + const Color &ambientColor, const Color &specularColor) +{ + Color color_ = vtkcolor(color); + Color diffuseColor_ = vtkcolor(diffuseColor); + Color ambientColor_ = vtkcolor(ambientColor); + Color specularColor_ = vtkcolor(specularColor); + + vtkSmartPointer light = vtkSmartPointer::New(); + light->SetPosition(position.val); + light->SetFocalPoint(focalPoint.val); + light->SetColor(color_.val); + light->SetDiffuseColor(diffuseColor_.val); + light->SetAmbientColor(ambientColor_.val); + light->SetSpecularColor(specularColor_.val); + + renderer_->AddLight(light); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::showWidget(const String &id, const Widget &widget, const Affine3d &pose) +{ + WidgetActorMap::iterator wam_itr = widget_actor_map_->find(id); + bool exists = wam_itr != widget_actor_map_->end(); + if (exists) + { + // Remove it if it exists and add it again + removeActorFromRenderer(wam_itr->second); + } + // Get the actor and set the user matrix + vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(widget)); + if (actor) + { + // If the actor is 3D, apply pose + vtkSmartPointer matrix = vtkmatrix(pose.matrix); + actor->SetUserMatrix(matrix); + actor->Modified(); + } + // If the actor is a vtkFollower, then it should always face the camera + vtkFollower *follower = vtkFollower::SafeDownCast(actor); + if (follower) + { + follower->SetCamera(renderer_->GetActiveCamera()); + } + + renderer_->AddActor(WidgetAccessor::getProp(widget)); + (*widget_actor_map_)[id] = WidgetAccessor::getProp(widget); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::removeWidget(const String &id) +{ + WidgetActorMap::iterator wam_itr = widget_actor_map_->find(id); + bool exists = wam_itr != widget_actor_map_->end(); + CV_Assert("Widget does not exist." && exists); + CV_Assert("Widget could not be removed." && removeActorFromRenderer(wam_itr->second)); + widget_actor_map_->erase(wam_itr); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +cv::viz::Widget cv::viz::Viz3d::VizImpl::getWidget(const String &id) const +{ + WidgetActorMap::const_iterator wam_itr = widget_actor_map_->find(id); + bool exists = wam_itr != widget_actor_map_->end(); + CV_Assert("Widget does not exist." && exists); + + Widget widget; + WidgetAccessor::setProp(widget, wam_itr->second); + return widget; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setWidgetPose(const String &id, const Affine3d &pose) +{ + WidgetActorMap::iterator wam_itr = widget_actor_map_->find(id); + bool exists = wam_itr != widget_actor_map_->end(); + CV_Assert("Widget does not exist." && exists); + + vtkProp3D *actor = vtkProp3D::SafeDownCast(wam_itr->second); + CV_Assert("Widget is not 3D." && actor); + + vtkSmartPointer matrix = vtkmatrix(pose.matrix); + actor->SetUserMatrix(matrix); + actor->Modified(); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::updateWidgetPose(const String &id, const Affine3d &pose) +{ + WidgetActorMap::iterator wam_itr = widget_actor_map_->find(id); + bool exists = wam_itr != widget_actor_map_->end(); + CV_Assert("Widget does not exist." && exists); + + vtkProp3D *actor = vtkProp3D::SafeDownCast(wam_itr->second); + CV_Assert("Widget is not 3D." && actor); + + vtkSmartPointer matrix = actor->GetUserMatrix(); + if (!matrix) + { + setWidgetPose(id, pose); + return ; + } + Affine3d updated_pose = pose * Affine3d(*matrix->Element); + matrix = vtkmatrix(updated_pose.matrix); + + actor->SetUserMatrix(matrix); + actor->Modified(); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +cv::Affine3d cv::viz::Viz3d::VizImpl::getWidgetPose(const String &id) const +{ + WidgetActorMap::const_iterator wam_itr = widget_actor_map_->find(id); + bool exists = wam_itr != widget_actor_map_->end(); + CV_Assert("Widget does not exist." && exists); + + vtkProp3D *actor = vtkProp3D::SafeDownCast(wam_itr->second); + CV_Assert("Widget is not 3D." && actor); + + return Affine3d(*actor->GetUserMatrix()->Element); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::saveScreenshot(const String &file) { style_->saveScreenshot(file.c_str()); } + +///////////////////////////////////////////////////////////////////////////////////////////// +cv::Mat cv::viz::Viz3d::VizImpl::getScreenshot() const +{ + vtkSmartPointer windowToImageFilter = + vtkSmartPointer::New(); + windowToImageFilter->SetInput(window_); + windowToImageFilter->ReadFrontBufferOff(); // read from the back buffer + windowToImageFilter->Update(); + + vtkImageData *resultImage = windowToImageFilter->GetOutput(); + int * dim = resultImage->GetDimensions(); + cv::Mat image(dim[1], dim[0], CV_8UC3); + + Vec3b* dptr = reinterpret_cast(resultImage->GetScalarPointer()); + size_t elem_step = resultImage->GetIncrements()[1]/sizeof(Vec3b); + + for (int y = 0; y < image.rows; ++y) + { + const Vec3b* drow = dptr + elem_step * y; + unsigned char *srow = image.ptr(image.rows - y - 1); + for (int x = 0; x < image.cols; ++x, srow += image.channels()) + { + srow[0] = drow[x][2]; + srow[1] = drow[x][1]; + srow[2] = drow[x][0]; + } + } + + resultImage = 0; + + return image; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::registerMouseCallback(MouseCallback callback, void* cookie) +{ style_->registerMouseCallback(callback, cookie); } + +void cv::viz::Viz3d::VizImpl::registerKeyboardCallback(KeyboardCallback callback, void* cookie) +{ style_->registerKeyboardCallback(callback, cookie); } + + +////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::removeAllWidgets() +{ + widget_actor_map_->clear(); + renderer_->RemoveAllViewProps(); +} +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::showImage(InputArray image, const Size& window_size) +{ + removeAllWidgets(); + if (window_size.width > 0 && window_size.height > 0) + setWindowSize(window_size); + + showWidget("showImage", WImageOverlay(image, Rect(Point(0,0), getWindowSize()))); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +bool cv::viz::Viz3d::VizImpl::removeActorFromRenderer(vtkSmartPointer actor) +{ + vtkPropCollection* actors = renderer_->GetViewProps(); + actors->InitTraversal(); + vtkProp* current_actor = NULL; + while ((current_actor = actors->GetNextProp()) != NULL) + if (current_actor == actor) + { + renderer_->RemoveActor(actor); + return true; + } + return false; +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setBackgroundColor(const Color& color, const Color& color2) +{ + Color c = vtkcolor(color), c2 = vtkcolor(color2); + bool gradient = color2[0] >= 0 && color2[1] >= 0 && color2[2] >= 0; + + if (gradient) + { + renderer_->SetBackground(c2.val); + renderer_->SetBackground2(c.val); + renderer_->GradientBackgroundOn(); + } + else + { + renderer_->SetBackground(c.val); + renderer_->GradientBackgroundOff(); + } +} + +void cv::viz::Viz3d::VizImpl::setBackgroundMeshLab() +{ setBackgroundColor(Color(2, 1, 1), Color(240, 120, 120)); } + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setBackgroundTexture(InputArray image) +{ + if (image.empty()) + { + renderer_->SetBackgroundTexture(0); + renderer_->TexturedBackgroundOff(); + return; + } + + vtkSmartPointer source = vtkSmartPointer::New(); + source->SetImage(image); + + vtkSmartPointer image_flip = vtkSmartPointer::New(); + image_flip->SetFilteredAxis(1); // Vertical flip + image_flip->SetInputConnection(source->GetOutputPort()); + + vtkSmartPointer texture = vtkSmartPointer::New(); + texture->SetInputConnection(image_flip->GetOutputPort()); + //texture->Update(); + + renderer_->SetBackgroundTexture(texture); + renderer_->TexturedBackgroundOn(); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setCamera(const Camera &camera) +{ + vtkSmartPointer active_camera = renderer_->GetActiveCamera(); + + // Set the intrinsic parameters of the camera + window_->SetSize(camera.getWindowSize().width, camera.getWindowSize().height); + double aspect_ratio = static_cast(camera.getWindowSize().width)/static_cast(camera.getWindowSize().height); + + Matx44d proj_mat; + camera.computeProjectionMatrix(proj_mat); + + // Use the intrinsic parameters of the camera to simulate more realistically + vtkSmartPointer vtk_matrix = active_camera->GetProjectionTransformMatrix(aspect_ratio, -1.0, 1.0); + Matx44d old_proj_mat(*vtk_matrix->Element); + + // This is a hack around not being able to set Projection Matrix + vtkSmartPointer transform = vtkSmartPointer::New(); + transform->SetMatrix(vtkmatrix(proj_mat * old_proj_mat.inv())); + active_camera->SetUserTransform(transform); + + renderer_->ResetCameraClippingRange(); + renderer_->Render(); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +cv::viz::Camera cv::viz::Viz3d::VizImpl::getCamera() const +{ + vtkSmartPointer active_camera = renderer_->GetActiveCamera(); + + Size window_size(renderer_->GetRenderWindow()->GetSize()[0], + renderer_->GetRenderWindow()->GetSize()[1]); + double aspect_ratio = window_size.width / (double)window_size.height; + + vtkSmartPointer proj_matrix = active_camera->GetProjectionTransformMatrix(aspect_ratio, -1.0f, 1.0f); + return Camera(Matx44d(*proj_matrix->Element), window_size); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setViewerPose(const Affine3d &pose) +{ + vtkCamera& camera = *renderer_->GetActiveCamera(); + + // Position = extrinsic translation + cv::Vec3d pos_vec = pose.translation(); + + // Rotate the view vector + cv::Matx33d rotation = pose.rotation(); + cv::Vec3d y_axis(0.0, -1.0, 0.0); // In Computer Vision Camera Y-axis is oriented down + cv::Vec3d up_vec(rotation * y_axis); + + // Compute the new focal point + cv::Vec3d z_axis(0.0, 0.0, 1.0); + cv::Vec3d focal_vec = pose * z_axis; + + camera.SetPosition(pos_vec.val); + camera.SetFocalPoint(focal_vec.val); + camera.SetViewUp(up_vec.val); + + renderer_->ResetCameraClippingRange(); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +cv::Affine3d cv::viz::Viz3d::VizImpl::getViewerPose() const +{ + vtkCamera& camera = *renderer_->GetActiveCamera(); + + Vec3d pos(camera.GetPosition()); + Vec3d view_up(camera.GetViewUp()); + Vec3d focal(camera.GetFocalPoint()); + + Vec3d y_axis = normalized(-view_up); // In Computer Vision Camera Y-axis is oriented down + Vec3d z_axis = normalized(focal - pos); + Vec3d x_axis = normalized(y_axis.cross(z_axis)); + + return makeTransformToGlobal(x_axis, y_axis, z_axis, pos); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord) +{ + Vec3d window_pt; + vtkInteractorObserver::ComputeWorldToDisplay(renderer_, pt.x, pt.y, pt.z, window_pt.val); + window_coord = window_pt; +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction) +{ + Vec4d world_pt; + vtkInteractorObserver::ComputeDisplayToWorld(renderer_, window_coord.x, window_coord.y, window_coord.z, world_pt.val); + Vec3d cam_pos(renderer_->GetActiveCamera()->GetPosition()); + origin = cam_pos; + direction = normalize(Vec3d(world_pt.val) - cam_pos); +} + +///////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::resetCameraViewpoint(const String &id) +{ + vtkSmartPointer camera_pose; + static WidgetActorMap::iterator it = widget_actor_map_->find(id); + if (it != widget_actor_map_->end()) + { + vtkProp3D *actor = vtkProp3D::SafeDownCast(it->second); + CV_Assert("Widget is not 3D." && actor); + camera_pose = actor->GetUserMatrix(); + } + else + return; + + // Prevent a segfault + if (!camera_pose) return; + + vtkSmartPointer cam = renderer_->GetActiveCamera(); + cam->SetPosition(camera_pose->GetElement(0, 3), + camera_pose->GetElement(1, 3), + camera_pose->GetElement(2, 3)); + + cam->SetFocalPoint(camera_pose->GetElement(0, 3) - camera_pose->GetElement(0, 2), + camera_pose->GetElement(1, 3) - camera_pose->GetElement(1, 2), + camera_pose->GetElement(2, 3) - camera_pose->GetElement(2, 2)); + + cam->SetViewUp(camera_pose->GetElement(0, 1), + camera_pose->GetElement(1, 1), + camera_pose->GetElement(2, 1)); + + renderer_->SetActiveCamera(cam); + renderer_->ResetCameraClippingRange(); + renderer_->ResetCamera(); + renderer_->Render(); +} + +/////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::resetCamera() +{ + renderer_->ResetCamera(); +} + +/////////////////////////////////////////////////////////////////////////////////// +void cv::viz::Viz3d::VizImpl::setRepresentation(int representation) +{ + vtkActorCollection * actors = renderer_->GetActors(); + actors->InitTraversal(); + vtkActor * actor; + switch (representation) + { + case REPRESENTATION_POINTS: + { + while ((actor = actors->GetNextActor()) != NULL) + actor->GetProperty()->SetRepresentationToPoints(); + break; + } + case REPRESENTATION_SURFACE: + { + while ((actor = actors->GetNextActor()) != NULL) + actor->GetProperty()->SetRepresentationToSurface(); + break; + } + case REPRESENTATION_WIREFRAME: + { + while ((actor = actors->GetNextActor()) != NULL) + actor->GetProperty()->SetRepresentationToWireframe(); + break; + } + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +cv::String cv::viz::Viz3d::VizImpl::getWindowName() const { return window_name_; } +void cv::viz::Viz3d::VizImpl::setFullScreen(bool mode) { window_->SetFullScreen(mode); } +void cv::viz::Viz3d::VizImpl::setWindowPosition(const Point& position) { window_position_ = position; window_->SetPosition(position.x, position.y); } +void cv::viz::Viz3d::VizImpl::setWindowSize(const Size& window_size) { window_->SetSize(window_size.width, window_size.height); } +cv::Size cv::viz::Viz3d::VizImpl::getWindowSize() const { return Size(Point(Vec2i(window_->GetSize()))); } diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizimpl.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizimpl.hpp new file mode 100644 index 00000000..b39bc497 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vizimpl.hpp @@ -0,0 +1,146 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __OPENCV_VIZ_VIZ3D_IMPL_HPP__ +#define __OPENCV_VIZ_VIZ3D_IMPL_HPP__ + +struct cv::viz::Viz3d::VizImpl +{ +public: + typedef Viz3d::KeyboardCallback KeyboardCallback; + typedef Viz3d::MouseCallback MouseCallback; + + int ref_counter; + + VizImpl(const String &name); + virtual ~VizImpl(); + + bool wasStopped() const; + void close(); + + void spin(); + void spinOnce(int time = 1, bool force_redraw = false); + void setOffScreenRendering(); + + void removeAllLights(); + void addLight(Vec3d position, Vec3d focalPoint, const Color &color, const Color &diffuseColor, + const Color &ambientColor, const Color &specularColor); + + void showWidget(const String &id, const Widget &widget, const Affine3d &pose = Affine3d::Identity()); + void removeWidget(const String &id); + Widget getWidget(const String &id) const; + void removeAllWidgets(); + + void showImage(InputArray image, const Size& window_size); + + void setWidgetPose(const String &id, const Affine3d &pose); + void updateWidgetPose(const String &id, const Affine3d &pose); + Affine3d getWidgetPose(const String &id) const; + + void setRepresentation(int representation); + + void setCamera(const Camera &camera); + Camera getCamera() const; + + /** \brief Reset the camera to a given widget */ + void resetCameraViewpoint(const String& id); + void resetCamera(); + + void setViewerPose(const Affine3d &pose); + Affine3d getViewerPose() const; + + void convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord); + void converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction); + + Mat getScreenshot() const; + void saveScreenshot(const String &file); + void setWindowPosition(const Point& position); + Size getWindowSize() const; + void setWindowSize(const Size& window_size); + void setFullScreen(bool mode); + String getWindowName() const; + void setBackgroundColor(const Color& color, const Color& color2); + void setBackgroundTexture(InputArray image); + void setBackgroundMeshLab(); + + void registerKeyboardCallback(KeyboardCallback callback, void* cookie = 0); + void registerMouseCallback(MouseCallback callback, void* cookie = 0); + +private: + struct TimerCallback : public vtkCommand + { + static TimerCallback* New() { return new TimerCallback; } + virtual void Execute(vtkObject* caller, unsigned long event_id, void* cookie); + int timer_id; + }; + + struct ExitCallback : public vtkCommand + { + static ExitCallback* New() { return new ExitCallback; } + virtual void Execute(vtkObject*, unsigned long event_id, void*); + VizImpl* viz; + }; + + mutable bool spin_once_state_; + vtkSmartPointer interactor_; + + vtkSmartPointer window_; + String window_name_; + Vec2i window_position_; + + vtkSmartPointer timer_callback_; + vtkSmartPointer exit_callback_; + + vtkSmartPointer renderer_; + vtkSmartPointer style_; + Ptr widget_actor_map_; + + bool offScreenMode_; + + bool removeActorFromRenderer(vtkSmartPointer actor); + void recreateRenderWindow(); +}; + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSink.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSink.cpp new file mode 100644 index 00000000..aa3d34ca --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSink.cpp @@ -0,0 +1,174 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkCloudMatSink); +}} + +cv::viz::vtkCloudMatSink::vtkCloudMatSink() {} +cv::viz::vtkCloudMatSink::~vtkCloudMatSink() {} + +void cv::viz::vtkCloudMatSink::SetOutput(OutputArray _cloud, OutputArray _colors, OutputArray _normals, OutputArray _tcoords) +{ + cloud = _cloud; + colors = _colors; + normals = _normals; + tcoords = _tcoords; +} + +void cv::viz::vtkCloudMatSink::WriteData() +{ + vtkPolyData *input = this->GetInput(); + if (!input) + return; + + vtkSmartPointer points_Data = input->GetPoints(); + + if (cloud.needed() && points_Data) + { + int vtktype = points_Data->GetDataType(); + CV_Assert(vtktype == VTK_FLOAT || vtktype == VTK_DOUBLE); + + cloud.create(1, points_Data->GetNumberOfPoints(), vtktype == VTK_FLOAT ? CV_32FC3 : CV_64FC3); + Vec3d *ddata = cloud.getMat().ptr(); + Vec3f *fdata = cloud.getMat().ptr(); + + if (cloud.depth() == CV_32F) + for(size_t i = 0; i < cloud.total(); ++i) + *fdata++ = Vec3d(points_Data->GetPoint((vtkIdType)i)); + + if (cloud.depth() == CV_64F) + for(size_t i = 0; i < cloud.total(); ++i) + *ddata++ = Vec3d(points_Data->GetPoint((vtkIdType)i)); + } + else + cloud.release(); + + vtkSmartPointer scalars_data = input->GetPointData() ? input->GetPointData()->GetScalars() : 0; + + if (colors.needed() && scalars_data) + { + int channels = scalars_data->GetNumberOfComponents(); + int vtktype = scalars_data->GetDataType(); + + CV_Assert((channels == 3 || channels == 4) && "Only 3- or 4-channel color data support is implemented"); + CV_Assert(cloud.total() == (size_t)scalars_data->GetNumberOfTuples()); + + Mat buffer(cloud.size(), CV_64FC(channels)); + Vec3d *cptr = buffer.ptr(); + for(size_t i = 0; i < buffer.total(); ++i) + *cptr++ = Vec3d(scalars_data->GetTuple((vtkIdType)i)); + + buffer.convertTo(colors, CV_8U, vtktype == VTK_FLOAT || VTK_FLOAT == VTK_DOUBLE ? 255.0 : 1.0); + } + else + colors.release(); + + vtkSmartPointer normals_data = input->GetPointData() ? input->GetPointData()->GetNormals() : 0; + + if (normals.needed() && normals_data) + { + int channels = normals_data->GetNumberOfComponents(); + int vtktype = normals_data->GetDataType(); + + CV_Assert((vtktype == VTK_FLOAT || VTK_FLOAT == VTK_DOUBLE) && (channels == 3 || channels == 4)); + CV_Assert(cloud.total() == (size_t)normals_data->GetNumberOfTuples()); + + Mat buffer(cloud.size(), CV_64FC(channels)); + Vec3d *cptr = buffer.ptr(); + for(size_t i = 0; i < buffer.total(); ++i) + *cptr++ = Vec3d(normals_data->GetTuple((vtkIdType)i)); + + buffer.convertTo(normals, vtktype == VTK_FLOAT ? CV_32F : CV_64F); + } + else + normals.release(); + + vtkSmartPointer coords_data = input->GetPointData() ? input->GetPointData()->GetTCoords() : 0; + + if (tcoords.needed() && coords_data) + { + int vtktype = coords_data->GetDataType(); + + CV_Assert(vtktype == VTK_FLOAT || VTK_FLOAT == VTK_DOUBLE); + CV_Assert(cloud.total() == (size_t)coords_data->GetNumberOfTuples()); + + Mat buffer(cloud.size(), CV_64FC2); + Vec2d *cptr = buffer.ptr(); + for(size_t i = 0; i < buffer.total(); ++i) + *cptr++ = Vec2d(coords_data->GetTuple((vtkIdType)i)); + + buffer.convertTo(tcoords, vtktype == VTK_FLOAT ? CV_32F : CV_64F); + + } + else + tcoords.release(); +} + +void cv::viz::vtkCloudMatSink::PrintSelf(ostream& os, vtkIndent indent) +{ + Superclass::PrintSelf(os, indent); + os << indent << "Cloud: " << cloud.needed() << "\n"; + os << indent << "Colors: " << colors.needed() << "\n"; + os << indent << "Normals: " << normals.needed() << "\n"; +} + +int cv::viz::vtkCloudMatSink::FillInputPortInformation(int, vtkInformation *info) +{ + info->Set(vtkAlgorithm::INPUT_REQUIRED_DATA_TYPE(), "vtkPolyData"); + return 1; +} + +vtkPolyData* cv::viz::vtkCloudMatSink::GetInput() +{ + return vtkPolyData::SafeDownCast(this->Superclass::GetInput()); +} + +vtkPolyData* cv::viz::vtkCloudMatSink::GetInput(int port) +{ + return vtkPolyData::SafeDownCast(this->Superclass::GetInput(port)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSink.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSink.h new file mode 100644 index 00000000..997e6984 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSink.h @@ -0,0 +1,88 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __vtkCloudMatSink_h +#define __vtkCloudMatSink_h + +#include +#include + +namespace cv +{ + namespace viz + { + class vtkCloudMatSink : public vtkWriter + { + public: + static vtkCloudMatSink *New(); + vtkTypeMacro(vtkCloudMatSink,vtkWriter) + void PrintSelf(ostream& os, vtkIndent indent); + + void SetOutput(OutputArray cloud, OutputArray colors = noArray(), OutputArray normals = noArray(), OutputArray tcoords = noArray()); + + // Description: + // Get the input to this writer. + vtkPolyData* GetInput(); + vtkPolyData* GetInput(int port); + + protected: + vtkCloudMatSink(); + ~vtkCloudMatSink(); + + void WriteData(); + int FillInputPortInformation(int port, vtkInformation *info); + + _OutputArray cloud; //!< point coordinates of type CV_32FC3 or CV_64FC3 with only 1 row + _OutputArray colors; //!< point color of type CV_8UC3 or CV_8UC4 with only 1 row + _OutputArray normals; //!< point normal of type CV_32FC3, CV_32FC4, CV_64FC3 or CV_64FC4 with only 1 row + _OutputArray tcoords; //!< texture coordinates of type CV_32FC2 or CV_64FC2 with only 1 row + + private: + vtkCloudMatSink(const vtkCloudMatSink&); // Not implemented. + void operator=(const vtkCloudMatSink&); // Not implemented. + }; + } // end namespace viz +} // end namespace cv + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSource.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSource.cpp new file mode 100644 index 00000000..5f1cecbf --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSource.cpp @@ -0,0 +1,286 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkCloudMatSource); + + template struct VtkDepthTraits; + + template<> struct VtkDepthTraits + { + const static int data_type = VTK_FLOAT; + typedef vtkFloatArray array_type; + }; + + template<> struct VtkDepthTraits + { + const static int data_type = VTK_DOUBLE; + typedef vtkDoubleArray array_type; + }; +}} + +cv::viz::vtkCloudMatSource::vtkCloudMatSource() { SetNumberOfInputPorts(0); } +cv::viz::vtkCloudMatSource::~vtkCloudMatSource() {} + +int cv::viz::vtkCloudMatSource::SetCloud(InputArray _cloud) +{ + CV_Assert(_cloud.depth() == CV_32F || _cloud.depth() == CV_64F); + CV_Assert(_cloud.channels() == 3 || _cloud.channels() == 4); + + Mat cloud = _cloud.getMat(); + + int total = _cloud.depth() == CV_32F ? filterNanCopy(cloud) : filterNanCopy(cloud); + + vertices = vtkSmartPointer::New(); + vertices->Allocate(vertices->EstimateSize(1, total)); + vertices->InsertNextCell(total); + for(int i = 0; i < total; ++i) + vertices->InsertCellPoint(i); + + return total; +} + +int cv::viz::vtkCloudMatSource::SetColorCloud(InputArray _cloud, InputArray _colors) +{ + int total = SetCloud(_cloud); + + if (_colors.empty()) + return total; + + CV_Assert(_colors.depth() == CV_8U && _colors.channels() <= 4 && _colors.channels() != 2); + CV_Assert(_colors.size() == _cloud.size()); + + Mat cloud = _cloud.getMat(); + Mat colors = _colors.getMat(); + + if (cloud.depth() == CV_32F) + filterNanColorsCopy(colors, cloud, total); + else if (cloud.depth() == CV_64F) + filterNanColorsCopy(colors, cloud, total); + + return total; +} + +int cv::viz::vtkCloudMatSource::SetColorCloudNormals(InputArray _cloud, InputArray _colors, InputArray _normals) +{ + int total = SetColorCloud(_cloud, _colors); + + if (_normals.empty()) + return total; + + CV_Assert(_normals.depth() == CV_32F || _normals.depth() == CV_64F); + CV_Assert(_normals.channels() == 3 || _normals.channels() == 4); + CV_Assert(_normals.size() == _cloud.size()); + + Mat c = _cloud.getMat(); + Mat n = _normals.getMat(); + + if (n.depth() == CV_32F && c.depth() == CV_32F) + filterNanNormalsCopy(n, c, total); + else if (n.depth() == CV_32F && c.depth() == CV_64F) + filterNanNormalsCopy(n, c, total); + else if (n.depth() == CV_64F && c.depth() == CV_32F) + filterNanNormalsCopy(n, c, total); + else if (n.depth() == CV_64F && c.depth() == CV_64F) + filterNanNormalsCopy(n, c, total); + else + CV_Error(Error::StsError, "Unsupported normals/cloud type"); + + return total; +} + +int cv::viz::vtkCloudMatSource::SetColorCloudNormalsTCoords(InputArray _cloud, InputArray _colors, InputArray _normals, InputArray _tcoords) +{ + int total = SetColorCloudNormals(_cloud, _colors, _normals); + + if (_tcoords.empty()) + return total; + + CV_Assert(_tcoords.depth() == CV_32F || _tcoords.depth() == CV_64F); + CV_Assert(_tcoords.channels() == 2 && _tcoords.size() == _cloud.size()); + + Mat cl = _cloud.getMat(); + Mat tc = _tcoords.getMat(); + + if (tc.depth() == CV_32F && cl.depth() == CV_32F) + filterNanTCoordsCopy(tc, cl, total); + else if (tc.depth() == CV_32F && cl.depth() == CV_64F) + filterNanTCoordsCopy(tc, cl, total); + else if (tc.depth() == CV_64F && cl.depth() == CV_32F) + filterNanTCoordsCopy(tc, cl, total); + else if (tc.depth() == CV_64F && cl.depth() == CV_64F) + filterNanTCoordsCopy(tc, cl, total); + else + CV_Error(Error::StsError, "Unsupported tcoords/cloud type"); + + return total; +} + +int cv::viz::vtkCloudMatSource::RequestData(vtkInformation *vtkNotUsed(request), vtkInformationVector **vtkNotUsed(inputVector), vtkInformationVector *outputVector) +{ + vtkInformation *outInfo = outputVector->GetInformationObject(0); + vtkPolyData *output = vtkPolyData::SafeDownCast(outInfo->Get(vtkDataObject::DATA_OBJECT())); + + output->SetPoints(points); + output->SetVerts(vertices); + if (scalars) + output->GetPointData()->SetScalars(scalars); + + if (normals) + output->GetPointData()->SetNormals(normals); + + if (tcoords) + output->GetPointData()->SetTCoords(tcoords); + + return 1; +} + +template +int cv::viz::vtkCloudMatSource::filterNanCopy(const Mat& cloud) +{ + CV_DbgAssert(DataType<_Tp>::depth == cloud.depth()); + points = vtkSmartPointer::New(); + points->SetDataType(VtkDepthTraits<_Tp>::data_type); + points->Allocate((vtkIdType)cloud.total()); + points->SetNumberOfPoints((vtkIdType)cloud.total()); + + int s_chs = cloud.channels(); + int total = 0; + for (int y = 0; y < cloud.rows; ++y) + { + const _Tp* srow = cloud.ptr<_Tp>(y); + const _Tp* send = srow + cloud.cols * s_chs; + + for (; srow != send; srow += s_chs) + if (!isNan(srow)) + points->SetPoint(total++, srow); + } + points->SetNumberOfPoints(total); + points->Squeeze(); + return total; +} + +template +void cv::viz::vtkCloudMatSource::filterNanColorsCopy(const Mat& cloud_colors, const Mat& mask, int total) +{ + Vec3b* array = new Vec3b[total]; + Vec3b* pos = array; + + int s_chs = cloud_colors.channels(); + int m_chs = mask.channels(); + for (int y = 0; y < cloud_colors.rows; ++y) + { + const unsigned char* srow = cloud_colors.ptr(y); + const unsigned char* send = srow + cloud_colors.cols * s_chs; + const _Msk* mrow = mask.ptr<_Msk>(y); + + if (cloud_colors.channels() == 1) + { + for (; srow != send; srow += s_chs, mrow += m_chs) + if (!isNan(mrow)) + *pos++ = Vec3b(srow[0], srow[0], srow[0]); + } + else + for (; srow != send; srow += s_chs, mrow += m_chs) + if (!isNan(mrow)) + *pos++ = Vec3b(srow[2], srow[1], srow[0]); + + } + + scalars = vtkSmartPointer::New(); + scalars->SetName("Colors"); + scalars->SetNumberOfComponents(3); + scalars->SetNumberOfTuples(total); + scalars->SetArray(array->val, total * 3, 0, vtkUnsignedCharArray::VTK_DATA_ARRAY_DELETE); +} + +template +void cv::viz::vtkCloudMatSource::filterNanNormalsCopy(const Mat& cloud_normals, const Mat& mask, int total) +{ + normals = vtkSmartPointer< typename VtkDepthTraits<_Tn>::array_type >::New(); + normals->SetName("Normals"); + normals->SetNumberOfComponents(3); + normals->SetNumberOfTuples(total); + + int s_chs = cloud_normals.channels(); + int m_chs = mask.channels(); + + int pos = 0; + for (int y = 0; y < cloud_normals.rows; ++y) + { + const _Tn* srow = cloud_normals.ptr<_Tn>(y); + const _Tn* send = srow + cloud_normals.cols * s_chs; + + const _Msk* mrow = mask.ptr<_Msk>(y); + + for (; srow != send; srow += s_chs, mrow += m_chs) + if (!isNan(mrow)) + normals->SetTuple(pos++, srow); + } +} + +template +void cv::viz::vtkCloudMatSource::filterNanTCoordsCopy(const Mat& _tcoords, const Mat& mask, int total) +{ + typedef Vec<_Tn, 2> Vec2; + tcoords = vtkSmartPointer< typename VtkDepthTraits<_Tn>::array_type >::New(); + tcoords->SetName("TextureCoordinates"); + tcoords->SetNumberOfComponents(2); + tcoords->SetNumberOfTuples(total); + + int pos = 0; + for (int y = 0; y < mask.rows; ++y) + { + const Vec2* srow = _tcoords.ptr(y); + const Vec2* send = srow + _tcoords.cols; + const _Msk* mrow = mask.ptr<_Msk>(y); + + for (; srow != send; ++srow, mrow += mask.channels()) + if (!isNan(mrow)) + tcoords->SetTuple(pos++, srow->val); + } +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSource.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSource.h new file mode 100644 index 00000000..56bd93e0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCloudMatSource.h @@ -0,0 +1,96 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __vtkCloudMatSource_h +#define __vtkCloudMatSource_h + +#include +#include +#include +#include +#include + +namespace cv +{ + namespace viz + { + class vtkCloudMatSource : public vtkPolyDataAlgorithm + { + public: + static vtkCloudMatSource *New(); + vtkTypeMacro(vtkCloudMatSource,vtkPolyDataAlgorithm) + + virtual int SetCloud(InputArray cloud); + virtual int SetColorCloud(InputArray cloud, InputArray colors); + virtual int SetColorCloudNormals(InputArray cloud, InputArray colors, InputArray normals); + virtual int SetColorCloudNormalsTCoords(InputArray cloud, InputArray colors, InputArray normals, InputArray tcoords); + + protected: + vtkCloudMatSource(); + ~vtkCloudMatSource(); + + int RequestData(vtkInformation *, vtkInformationVector **, vtkInformationVector *); + + vtkSmartPointer points; + vtkSmartPointer vertices; + vtkSmartPointer scalars; + vtkSmartPointer normals; + vtkSmartPointer tcoords; + private: + vtkCloudMatSource(const vtkCloudMatSource&); // Not implemented. + void operator=(const vtkCloudMatSource&); // Not implemented. + + template int filterNanCopy(const Mat& cloud); + template void filterNanColorsCopy(const Mat& cloud_colors, const Mat& mask, int total); + + template + void filterNanNormalsCopy(const Mat& cloud_normals, const Mat& mask, int total); + + template + void filterNanTCoordsCopy(const Mat& tcoords, const Mat& mask, int total); + }; + } +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCocoaInteractorFix.mm b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCocoaInteractorFix.mm new file mode 100644 index 00000000..294556fe --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkCocoaInteractorFix.mm @@ -0,0 +1,227 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +// This workaround code was taken from PCL library(www.pointclouds.org) +// +//M*/ + +#import +#include +#include +#include +#include +#include + +namespace cv { namespace viz { + vtkSmartPointer vtkCocoaRenderWindowInteractorNew(); +}} // namespace + +#if ((VTK_MAJOR_VERSION < 6) || ((VTK_MAJOR_VERSION == 6) && (VTK_MINOR_VERSION < 2))) + + +//---------------------------------------------------------------------------- +@interface vtkCocoaServerFix : NSObject +{ + vtkCocoaRenderWindow* renWin; +} + ++ (id)cocoaServerWithRenderWindow:(vtkCocoaRenderWindow*)inRenderWindow; + +- (void)start; +- (void)stop; +- (void)breakEventLoop; + +@end + +//---------------------------------------------------------------------------- +@implementation vtkCocoaServerFix + +//---------------------------------------------------------------------------- +- (id)initWithRenderWindow:(vtkCocoaRenderWindow *)inRenderWindow +{ + self = [super init]; + if (self) + renWin = inRenderWindow; + return self; +} + +//---------------------------------------------------------------------------- ++ (id)cocoaServerWithRenderWindow:(vtkCocoaRenderWindow *)inRenderWindow +{ + vtkCocoaServerFix *server = [[[vtkCocoaServerFix alloc] initWithRenderWindow:inRenderWindow] autorelease]; + return server; +} + +//---------------------------------------------------------------------------- +- (void)start +{ + // Retrieve the NSWindow. + NSWindow *win = nil; + if (renWin) + { + win = reinterpret_cast (renWin->GetRootWindow ()); + + // We don't want to be informed of every window closing, so check for nil. + if (win != nil) + { + // Register for the windowWillClose notification in order to stop the run loop if the window closes. + NSNotificationCenter *nc = [NSNotificationCenter defaultCenter]; + [nc addObserver:self selector:@selector(windowWillClose:) name:NSWindowWillCloseNotification object:win]; + } + } + // Start the NSApplication's run loop + NSApplication* application = [NSApplication sharedApplication]; + [application run]; +} + +//---------------------------------------------------------------------------- +- (void)stop +{ + [self breakEventLoop]; +} + +//---------------------------------------------------------------------------- +- (void)breakEventLoop +{ + NSApplication* application = [NSApplication sharedApplication]; + [application stop:application]; + + NSEvent *event = [NSEvent otherEventWithType:NSApplicationDefined + location:NSMakePoint(0.0,0.0) + modifierFlags:0 + timestamp:0 + windowNumber:-1 + context:nil + subtype:0 + data1:0 + data2:0]; + [application postEvent:event atStart:YES]; +} + +//---------------------------------------------------------------------------- +- (void)windowWillClose:(NSNotification*)aNotification +{ + (void)aNotification; + + NSNotificationCenter *nc = [NSNotificationCenter defaultCenter]; + [nc removeObserver:self name:NSWindowWillCloseNotification object:nil]; + + if (renWin) + { + int windowCreated = renWin->GetWindowCreated (); + if (windowCreated) + { + [self breakEventLoop]; + + // The NSWindow is closing, so prevent anyone from accidentally using it + renWin->SetRootWindow(NULL); + } + } +} + +@end + +//---------------------------------------------------------------------------- + +namespace cv { namespace viz +{ + class vtkCocoaRenderWindowInteractorFix : public vtkCocoaRenderWindowInteractor + { + public: + static vtkCocoaRenderWindowInteractorFix *New (); + vtkTypeMacro (vtkCocoaRenderWindowInteractorFix, vtkCocoaRenderWindowInteractor) + + virtual void Start (); + virtual void TerminateApp (); + + protected: + vtkCocoaRenderWindowInteractorFix () {} + ~vtkCocoaRenderWindowInteractorFix () {} + + private: + vtkCocoaRenderWindowInteractorFix (const vtkCocoaRenderWindowInteractorFix&); // Not implemented. + void operator = (const vtkCocoaRenderWindowInteractorFix&); // Not implemented. + }; + + vtkStandardNewMacro (vtkCocoaRenderWindowInteractorFix) +}} + +void cv::viz::vtkCocoaRenderWindowInteractorFix::Start () +{ + vtkCocoaRenderWindow* renWin = vtkCocoaRenderWindow::SafeDownCast(this->GetRenderWindow ()); + if (renWin != NULL) + { + vtkCocoaServerFix *server = reinterpret_cast (this->GetCocoaServer ()); + if (!this->GetCocoaServer ()) + { + server = [vtkCocoaServerFix cocoaServerWithRenderWindow:renWin]; + this->SetCocoaServer (reinterpret_cast (server)); + } + + [server start]; + } +} + +void cv::viz::vtkCocoaRenderWindowInteractorFix::TerminateApp () +{ + vtkCocoaRenderWindow *renWin = vtkCocoaRenderWindow::SafeDownCast (this->RenderWindow); + if (renWin) + { + vtkCocoaServerFix *server = reinterpret_cast (this->GetCocoaServer ()); + [server stop]; + } +} + +vtkSmartPointer cv::viz::vtkCocoaRenderWindowInteractorNew() +{ + return vtkSmartPointer::New(); +} + + +#else + +vtkSmartPointer cv::viz::vtkCocoaRenderWindowInteractorNew() +{ + return vtkSmartPointer::New(); +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkImageMatSource.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkImageMatSource.cpp new file mode 100644 index 00000000..d9de698d --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkImageMatSource.cpp @@ -0,0 +1,143 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkImageMatSource); +}} + +cv::viz::vtkImageMatSource::vtkImageMatSource() +{ + this->SetNumberOfInputPorts(0); + this->ImageData = vtkSmartPointer::New(); +} + +int cv::viz::vtkImageMatSource::RequestInformation(vtkInformation *, vtkInformationVector**, vtkInformationVector *outputVector) +{ + vtkInformation* outInfo = outputVector->GetInformationObject(0); + + outInfo->Set(vtkStreamingDemandDrivenPipeline::WHOLE_EXTENT(), this->ImageData->GetExtent(), 6); + outInfo->Set(vtkDataObject::SPACING(), 1.0, 1.0, 1.0); + outInfo->Set(vtkDataObject::ORIGIN(), 0.0, 0.0, 0.0); + + vtkDataObject::SetPointDataActiveScalarInfo(outInfo, this->ImageData->GetScalarType(), this->ImageData->GetNumberOfScalarComponents()); + return 1; +} + +int cv::viz::vtkImageMatSource::RequestData(vtkInformation*, vtkInformationVector**, vtkInformationVector *outputVector) +{ + vtkInformation *outInfo = outputVector->GetInformationObject(0); + + vtkImageData *output = vtkImageData::SafeDownCast(outInfo->Get(vtkDataObject::DATA_OBJECT()) ); + output->ShallowCopy(this->ImageData); + return 1; +} + +void cv::viz::vtkImageMatSource::SetImage(InputArray _image) +{ + CV_Assert(_image.depth() == CV_8U && (_image.channels() == 1 || _image.channels() == 3 || _image.channels() == 4)); + + Mat image = _image.getMat(); + + this->ImageData->SetDimensions(image.cols, image.rows, 1); +#if VTK_MAJOR_VERSION <= 5 + this->ImageData->SetNumberOfScalarComponents(image.channels()); + this->ImageData->SetScalarTypeToUnsignedChar(); + this->ImageData->AllocateScalars(); +#else + this->ImageData->AllocateScalars(VTK_UNSIGNED_CHAR, image.channels()); +#endif + + switch(image.channels()) + { + case 1: copyGrayImage(image, this->ImageData); break; + case 3: copyRGBImage (image, this->ImageData); break; + case 4: copyRGBAImage(image, this->ImageData); break; + } + this->ImageData->Modified(); +} + +void cv::viz::vtkImageMatSource::copyGrayImage(const Mat &source, vtkSmartPointer output) +{ + unsigned char* dptr = reinterpret_cast(output->GetScalarPointer()); + size_t elem_step = output->GetIncrements()[1]/sizeof(unsigned char); + + for (int y = 0; y < source.rows; ++y) + { + unsigned char* drow = dptr + elem_step * y; + const unsigned char *srow = source.ptr(y); + for (int x = 0; x < source.cols; ++x) + drow[x] = *srow++; + } +} + +void cv::viz::vtkImageMatSource::copyRGBImage(const Mat &source, vtkSmartPointer output) +{ + Vec3b* dptr = reinterpret_cast(output->GetScalarPointer()); + size_t elem_step = output->GetIncrements()[1]/sizeof(Vec3b); + + for (int y = 0; y < source.rows; ++y) + { + Vec3b* drow = dptr + elem_step * y; + const unsigned char *srow = source.ptr(y); + for (int x = 0; x < source.cols; ++x, srow += source.channels()) + drow[x] = Vec3b(srow[2], srow[1], srow[0]); + } +} + +void cv::viz::vtkImageMatSource::copyRGBAImage(const Mat &source, vtkSmartPointer output) +{ + Vec4b* dptr = reinterpret_cast(output->GetScalarPointer()); + size_t elem_step = output->GetIncrements()[1]/sizeof(Vec4b); + + for (int y = 0; y < source.rows; ++y) + { + Vec4b* drow = dptr + elem_step * y; + const unsigned char *srow = source.ptr(y); + for (int x = 0; x < source.cols; ++x, srow += source.channels()) + drow[x] = Vec4b(srow[2], srow[1], srow[0], srow[3]); + } +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkImageMatSource.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkImageMatSource.h new file mode 100644 index 00000000..a7a41e08 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkImageMatSource.h @@ -0,0 +1,82 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +#ifndef __vtkImageMatSource_h +#define __vtkImageMatSource_h + +namespace cv +{ + namespace viz + { + class vtkImageMatSource : public vtkImageAlgorithm + { + public: + static vtkImageMatSource *New(); + vtkTypeMacro(vtkImageMatSource,vtkImageAlgorithm); + + void SetImage(InputArray image); + + protected: + vtkImageMatSource(); + ~vtkImageMatSource() {} + + vtkSmartPointer ImageData; + + int RequestInformation(vtkInformation*, vtkInformationVector**, vtkInformationVector*); + int RequestData (vtkInformation*, vtkInformationVector**, vtkInformationVector*); + private: + vtkImageMatSource(const vtkImageMatSource&); // Not implemented. + void operator=(const vtkImageMatSource&); // Not implemented. + + static void copyGrayImage(const Mat &source, vtkSmartPointer output); + static void copyRGBImage (const Mat &source, vtkSmartPointer output); + static void copyRGBAImage(const Mat &source, vtkSmartPointer output); + }; + } +} + + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkOBJWriter.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkOBJWriter.cpp new file mode 100644 index 00000000..2e5764fc --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkOBJWriter.cpp @@ -0,0 +1,274 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkOBJWriter); +}} + +cv::viz::vtkOBJWriter::vtkOBJWriter() +{ + std::ofstream fout; // only used to extract the default precision + this->DecimalPrecision = fout.precision(); + this->FileName = NULL; +} + +cv::viz::vtkOBJWriter::~vtkOBJWriter(){} + +void cv::viz::vtkOBJWriter::WriteData() +{ + vtkPolyData *input = this->GetInput(); + if (!input) + return; + + if (!this->FileName ) + { + vtkErrorMacro(<< "No FileName specified! Can't write!"); + this->SetErrorCode(vtkErrorCode::NoFileNameError); + return; + } + + vtkDebugMacro(<<"Opening vtk file for writing..."); + std::ostream *outfilep = new std::ofstream(this->FileName, ios::out); + if (outfilep->fail()) + { + vtkErrorMacro(<< "Unable to open file: "<< this->FileName); + this->SetErrorCode(vtkErrorCode::CannotOpenFileError); + delete outfilep; + return; + } + + std::ostream& outfile = *outfilep; + + //write header + outfile << "# wavefront obj file written by opencv viz module" << std::endl << std::endl; + outfile << "mtllib NONE" << std::endl << std::endl; + + // write out the points + for (int i = 0; i < input->GetNumberOfPoints(); i++) + { + Vec3d p; + input->GetPoint(i, p.val); + outfile << std::setprecision(this->DecimalPrecision) << "v " << p[0] << " " << p[1] << " " << p[2] << std::endl; + } + + const int idStart = 1; + + // write out the point data + vtkSmartPointer normals = input->GetPointData()->GetNormals(); + if(normals) + { + for (int i = 0; i < normals->GetNumberOfTuples(); i++) + { + Vec3d p; + normals->GetTuple(i, p.val); + outfile << std::setprecision(this->DecimalPrecision) << "vn " << p[0] << " " << p[1] << " " << p[2] << std::endl; + } + } + + vtkSmartPointer tcoords = input->GetPointData()->GetTCoords(); + if (tcoords) + { + for (int i = 0; i < tcoords->GetNumberOfTuples(); i++) + { + Vec2d p; + tcoords->GetTuple(i, p.val); + outfile << std::setprecision(this->DecimalPrecision) << "vt " << p[0] << " " << p[1] << std::endl; + } + } + + // write out a group name and material + outfile << std::endl << "g grp" << idStart << std::endl; + outfile << "usemtl mtlNONE" << std::endl; + + // write out verts if any + if (input->GetNumberOfVerts() > 0) + { + vtkIdType npts = 0; + CellIterT index = 0; + vtkCellArray *cells = input->GetVerts(); + for (cells->InitTraversal(); cells->GetNextCell(npts, index); ) + { + outfile << "p "; + for (int i = 0; i < npts; i++) + outfile << index[i] + idStart << " "; + outfile << std::endl; + } + } + + // write out lines if any + if (input->GetNumberOfLines() > 0) + { + vtkIdType npts = 0; + CellIterT index = 0; + vtkCellArray *cells = input->GetLines(); + for (cells->InitTraversal(); cells->GetNextCell(npts, index); ) + { + outfile << "l "; + if (tcoords) + { + for (int i = 0; i < npts; i++) + outfile << index[i] + idStart << "/" << index[i] + idStart << " "; + } + else + for (int i = 0; i < npts; i++) + outfile << index[i] + idStart << " "; + + outfile << std::endl; + } + } + + // write out polys if any + if (input->GetNumberOfPolys() > 0) + { + vtkIdType npts = 0; + CellIterT index = 0; + vtkCellArray *cells = input->GetPolys(); + for (cells->InitTraversal(); cells->GetNextCell(npts, index); ) + { + outfile << "f "; + for (int i = 0; i < npts; i++) + { + if (normals) + { + if (tcoords) + outfile << index[i] + idStart << "/" << index[i] + idStart << "/" << index[i] + idStart << " "; + else + outfile << index[i] + idStart << "//" << index[i] + idStart << " "; + } + else + { + if (tcoords) + outfile << index[i] + idStart << " " << index[i] + idStart << " "; + else + outfile << index[i] + idStart << " "; + } + } + outfile << std::endl; + } + } + + // write out tstrips if any + if (input->GetNumberOfStrips() > 0) + { + vtkIdType npts = 0; + CellIterT index = 0; + vtkCellArray *cells = input->GetStrips(); + for (cells->InitTraversal(); cells->GetNextCell(npts, index); ) + { + for (int i = 2, i1, i2; i < npts; ++i) + { + if (i % 2) + { + i1 = i - 1; + i2 = i - 2; + } + else + { + i1 = i - 1; + i2 = i - 2; + } + + if(normals) + { + if (tcoords) + { + outfile << "f " << index[i1] + idStart << "/" << index[i1] + idStart << "/" << index[i1] + idStart << " " + << index[i2]+ idStart << "/" << index[i2] + idStart << "/" << index[i2] + idStart << " " + << index[i] + idStart << "/" << index[i] + idStart << "/" << index[i] + idStart << std::endl; + } + else + { + outfile << "f " << index[i1] + idStart << "//" << index[i1] + idStart << " " << index[i2] + idStart + << "//" << index[i2] + idStart << " " << index[i] + idStart << "//" << index[i] + idStart << std::endl; + } + } + else + { + if (tcoords) + { + outfile << "f " << index[i1] + idStart << "/" << index[i1] + idStart << " " << index[i2] + idStart + << "/" << index[i2] + idStart << " " << index[i] + idStart << "/" << index[i] + idStart << std::endl; + } + else + outfile << "f " << index[i1] + idStart << " " << index[i2] + idStart << " " << index[i] + idStart << std::endl; + } + } /* for (int i = 2; i < npts; ++i) */ + } + } /* if (input->GetNumberOfStrips() > 0) */ + + vtkDebugMacro(<<"Closing vtk file\n"); + delete outfilep; + + // Delete the file if an error occurred + if (this->ErrorCode == vtkErrorCode::OutOfDiskSpaceError) + { + vtkErrorMacro("Ran out of disk space; deleting file: " << this->FileName); + unlink(this->FileName); + } +} + +void cv::viz::vtkOBJWriter::PrintSelf(ostream& os, vtkIndent indent) +{ + Superclass::PrintSelf(os, indent); + os << indent << "DecimalPrecision: " << DecimalPrecision << "\n"; +} + +int cv::viz::vtkOBJWriter::FillInputPortInformation(int, vtkInformation *info) +{ + info->Set(vtkAlgorithm::INPUT_REQUIRED_DATA_TYPE(), "vtkPolyData"); + return 1; +} + +vtkPolyData* cv::viz::vtkOBJWriter::GetInput() +{ + return vtkPolyData::SafeDownCast(this->Superclass::GetInput()); +} + +vtkPolyData* cv::viz::vtkOBJWriter::GetInput(int port) +{ + return vtkPolyData::SafeDownCast(this->Superclass::GetInput(port)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkOBJWriter.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkOBJWriter.h new file mode 100644 index 00000000..7ad0f17b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkOBJWriter.h @@ -0,0 +1,91 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __vtkOBJWriter_h +#define __vtkOBJWriter_h + +#include + +namespace cv +{ + namespace viz + { + class vtkOBJWriter : public vtkWriter + { + public: + static vtkOBJWriter *New(); + vtkTypeMacro(vtkOBJWriter,vtkWriter) + void PrintSelf(ostream& os, vtkIndent indent); + + vtkGetMacro(DecimalPrecision, int) + vtkSetMacro(DecimalPrecision, int) + + // Description: + // Specify file name of data file to write. + vtkSetStringMacro(FileName) + vtkGetStringMacro(FileName) + + // Description: + // Get the input to this writer. + vtkPolyData* GetInput(); + vtkPolyData* GetInput(int port); + + protected: + vtkOBJWriter(); + ~vtkOBJWriter(); + + void WriteData(); + int FillInputPortInformation(int port, vtkInformation *info); + + int DecimalPrecision; + char *FileName; + + private: + vtkOBJWriter(const vtkOBJWriter&); // Not implemented. + void operator=(const vtkOBJWriter&); // Not implemented. + }; + } +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkTrajectorySource.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkTrajectorySource.cpp new file mode 100644 index 00000000..d0e180a9 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkTrajectorySource.cpp @@ -0,0 +1,110 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkTrajectorySource); +}} + +cv::viz::vtkTrajectorySource::vtkTrajectorySource() { SetNumberOfInputPorts(0); } +cv::viz::vtkTrajectorySource::~vtkTrajectorySource() {} + +void cv::viz::vtkTrajectorySource::SetTrajectory(InputArray _traj) +{ + CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); + CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); + + Mat traj; + _traj.getMat().convertTo(traj, CV_64F); + const Affine3d* dpath = traj.ptr(); + size_t total = traj.total(); + + points = vtkSmartPointer::New(); + points->SetDataType(VTK_DOUBLE); + points->SetNumberOfPoints((vtkIdType)total); + + tensors = vtkSmartPointer::New(); + tensors->SetNumberOfComponents(9); + tensors->SetNumberOfTuples((vtkIdType)total); + + for(size_t i = 0; i < total; ++i, ++dpath) + { + Matx33d R = dpath->rotation().t(); // transposed because of + tensors->SetTuple((vtkIdType)i, R.val); // column major order + + Vec3d p = dpath->translation(); + points->SetPoint((vtkIdType)i, p.val); + } +} + +cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj) +{ + CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT); + CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16)); + + Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3)); + const Affine3d* dpath = _traj.getMat().ptr(); + const Affine3f* fpath = _traj.getMat().ptr(); + + if (_traj.depth() == CV_32F) + for(int i = 0; i < points.cols; ++i) + points.at(i) = fpath[i].translation(); + + if (_traj.depth() == CV_64F) + for(int i = 0; i < points.cols; ++i) + points.at(i) = dpath[i].translation(); + + return points; +} + +int cv::viz::vtkTrajectorySource::RequestData(vtkInformation *vtkNotUsed(request), vtkInformationVector **vtkNotUsed(inputVector), vtkInformationVector *outputVector) +{ + vtkInformation *outInfo = outputVector->GetInformationObject(0); + vtkPolyData *output = vtkPolyData::SafeDownCast(outInfo->Get(vtkDataObject::DATA_OBJECT())); + output->SetPoints(points); + output->GetPointData()->SetTensors(tensors); + return 1; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkTrajectorySource.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkTrajectorySource.h new file mode 100644 index 00000000..f6c9c77b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkTrajectorySource.h @@ -0,0 +1,84 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __vtkTrajectorySource_h +#define __vtkTrajectorySource_h + +#include +#include +#include +#include +#include + +namespace cv +{ + namespace viz + { + class vtkTrajectorySource : public vtkPolyDataAlgorithm + { + public: + static vtkTrajectorySource *New(); + vtkTypeMacro(vtkTrajectorySource,vtkPolyDataAlgorithm) + + virtual void SetTrajectory(InputArray trajectory); + + static Mat ExtractPoints(InputArray trajectory); + + protected: + vtkTrajectorySource(); + ~vtkTrajectorySource(); + + vtkSmartPointer points; + vtkSmartPointer tensors; + + int RequestData(vtkInformation *, vtkInformationVector **, vtkInformationVector *); + private: + vtkTrajectorySource(const vtkTrajectorySource&); // Not implemented. + void operator=(const vtkTrajectorySource&); // Not implemented. + + }; + } +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkVizInteractorStyle.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkVizInteractorStyle.cpp new file mode 100644 index 00000000..e2d33806 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkVizInteractorStyle.cpp @@ -0,0 +1,1076 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkVizInteractorStyle) +}} + +////////////////////////////////////////////////////////////////////////////////////////////// + +cv::viz::vtkVizInteractorStyle::vtkVizInteractorStyle() +{ + FlyMode = false; + MotionFactor = 10.0; + + keyboardCallback_ = 0; + keyboard_callback_cookie_ = 0; + + mouseCallback_ = 0; + mouse_callback_cookie_ = 0; + + // Set windows size (width, height) to unknown (-1) + win_size_ = Vec2i(-1, -1); + win_pos_ = Vec2i(0, 0); + max_win_size_ = Vec2i(-1, -1); + + stereo_anaglyph_redblue_ = true; + + //from fly + KeysDown = 0; + UseTimers = 1; + + DiagonalLength = 1.0; + MotionStepSize = 1.0/100.0; + MotionUserScale = 1.0; // +/- key adjustment + MotionAccelerationFactor = 10.0; + AngleStepSize = 1.0; +} + +cv::viz::vtkVizInteractorStyle::~vtkVizInteractorStyle() {} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::saveScreenshot(const String &file) +{ + FindPokedRenderer(Interactor->GetEventPosition()[0], Interactor->GetEventPosition()[1]); + + vtkSmartPointer wif = vtkSmartPointer::New(); + wif->SetInput(Interactor->GetRenderWindow()); + + vtkSmartPointer snapshot_writer = vtkSmartPointer::New(); + snapshot_writer->SetInputConnection(wif->GetOutputPort()); + snapshot_writer->SetFileName(file.c_str()); + snapshot_writer->Write(); + + cout << "Screenshot successfully captured (" << file.c_str() << ")" << endl; +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::exportScene(const String &file) +{ + vtkSmartPointer exporter; + if (file.size() > 5 && file.substr(file.size() - 5) == ".vrml") + { + exporter = vtkSmartPointer::New(); + vtkVRMLExporter::SafeDownCast(exporter)->SetFileName(file.c_str()); + } + else + { + exporter = vtkSmartPointer::New(); + vtkOBJExporter::SafeDownCast(exporter)->SetFilePrefix(file.c_str()); + } + + exporter->SetInput(Interactor->GetRenderWindow()); + exporter->Write(); + + cout << "Scene successfully exported (" << file.c_str() << ")" << endl; +} + +void cv::viz::vtkVizInteractorStyle::exportScene() +{ + // Export scene as in obj or vrml format + String format = Interactor->GetAltKey() ? "scene-%d.vrml" : "scene-%d"; + exportScene(cv::format(format.c_str(), (unsigned int)time(0))); +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::changePointsSize(float delta) +{ + vtkSmartPointer ac = CurrentRenderer->GetActors(); + vtkCollectionSimpleIterator ait; + + for (ac->InitTraversal(ait); vtkActor* actor = ac->GetNextActor(ait); ) + for (actor->InitPathTraversal(); vtkAssemblyPath* path = actor->GetNextPath(); ) + { + vtkActor* apart = vtkActor::SafeDownCast(path->GetLastNode()->GetViewProp()); + float psize = apart->GetProperty()->GetPointSize() + delta; + psize = std::max(1.f, std::min(63.f, psize)); + apart->GetProperty()->SetPointSize(psize); + } +} + +void cv::viz::vtkVizInteractorStyle::setRepresentationToPoints() +{ + vtkSmartPointer ac = CurrentRenderer->GetActors(); + vtkCollectionSimpleIterator ait; + for (ac->InitTraversal(ait); vtkActor* actor = ac->GetNextActor(ait); ) + for (actor->InitPathTraversal(); vtkAssemblyPath* path = actor->GetNextPath(); ) + { + vtkActor* apart = vtkActor::SafeDownCast(path->GetLastNode()->GetViewProp()); + apart->GetProperty()->SetRepresentationToPoints(); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::printCameraParams() +{ + vtkSmartPointer cam = Interactor->GetRenderWindow()->GetRenderers()->GetFirstRenderer()->GetActiveCamera(); + + Vec2d clip(cam->GetClippingRange()); + Vec3d focal(cam->GetFocalPoint()), pos(cam->GetPosition()), view(cam->GetViewUp()); + Vec2i win_pos(Interactor->GetRenderWindow()->GetPosition()); + Vec2i win_size(Interactor->GetRenderWindow()->GetSize()); + double angle = cam->GetViewAngle () / 180.0 * CV_PI; + + String data = cv::format("clip(%f,%f) focal(%f,%f,%f) pos(%f,%f,%f) view(%f,%f,%f) angle(%f) winsz(%d,%d) winpos(%d,%d)", + clip[0], clip[1], focal[0], focal[1], focal[2], pos[0], pos[1], pos[2], view[0], view[1], view[2], + angle, win_size[0], win_size[1], win_pos[0], win_pos[1]); + + std::cout << data.c_str() << std::endl; +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::toggleFullScreen() +{ + Vec2i screen_size(Interactor->GetRenderWindow()->GetScreenSize()); + Vec2i win_size(Interactor->GetRenderWindow()->GetSize()); + + // Is window size = max? + if (win_size == max_win_size_) + { + Interactor->GetRenderWindow()->SetSize(win_size_.val); + Interactor->GetRenderWindow()->SetPosition(win_pos_.val); + Interactor->Render(); + } + // Set to max + else + { + win_pos_ = Vec2i(Interactor->GetRenderWindow()->GetPosition()); + win_size_ = win_size; + + Interactor->GetRenderWindow()->SetSize(screen_size.val); + Interactor->Render(); + max_win_size_ = Vec2i(Interactor->GetRenderWindow()->GetSize()); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::resetViewerPose() +{ + WidgetActorMap::iterator it = widget_actor_map_->begin(); + // it might be that some actors don't have a valid transformation set -> we skip them to avoid a seg fault. + for (; it != widget_actor_map_->end(); ++it) + { + vtkProp3D * actor = vtkProp3D::SafeDownCast(it->second); + if (actor && actor->GetUserMatrix()) + break; + } + + vtkSmartPointer cam = CurrentRenderer->GetActiveCamera(); + + // if a valid transformation was found, use it otherwise fall back to default view point. + if (it != widget_actor_map_->end()) + { + vtkMatrix4x4* m = vtkProp3D::SafeDownCast(it->second)->GetUserMatrix(); + + cam->SetFocalPoint(m->GetElement(0, 3) - m->GetElement(0, 2), + m->GetElement(1, 3) - m->GetElement(1, 2), + m->GetElement(2, 3) - m->GetElement(2, 2)); + + cam->SetViewUp (m->GetElement(0, 1), m->GetElement(1, 1), m->GetElement(2, 1)); + cam->SetPosition(m->GetElement(0, 3), m->GetElement(1, 3), m->GetElement(2, 3)); + } + else + { + cam->SetPosition(0, 0, 0); + cam->SetFocalPoint(0, 0, 1); + cam->SetViewUp(0, -1, 0); + } + + // go to the next actor for the next key-press event. + if (it != widget_actor_map_->end()) + ++it; + else + it = widget_actor_map_->begin(); + + CurrentRenderer->SetActiveCamera(cam); + CurrentRenderer->ResetCameraClippingRange(); + Interactor->Render(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::toggleStereo() +{ + vtkSmartPointer window = Interactor->GetRenderWindow(); + if (!window->GetStereoRender()) + { + static Vec2i red_blue(4, 3), magenta_green(2, 5); + window->SetAnaglyphColorMask (stereo_anaglyph_redblue_ ? red_blue.val : magenta_green.val); + stereo_anaglyph_redblue_ = !stereo_anaglyph_redblue_; + } + window->SetStereoRender(!window->GetStereoRender()); + Interactor->Render(); + +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::printHelp() +{ + std::cout << "| Help:\n" + "-------\n" + " p, P : switch to a point-based representation\n" + " w, W : switch to a wireframe-based representation (where available)\n" + " s, S : switch to a surface-based representation (where available)\n" + "\n" + " j, J : take a .PNG snapshot of the current window view\n" + " k, K : export scene to Wavefront .obj format\n" + " ALT + k, K : export scene to VRML format\n" + " c, C : display current camera/window parameters\n" + " F5 : enable/disable fly mode (changes control style)\n" + "\n" + " e, E : exit the interactor\n" + " q, Q : stop and call VTK's TerminateApp\n" + "\n" + " +/- : increment/decrement overall point size\n" + " +/- [+ ALT] : zoom in/out \n" + "\n" + " r, R [+ ALT] : reset camera [to viewpoint = {0, 0, 0} -> center_{x, y, z}]\n" + "\n" + " ALT + s, S : turn stereo mode on/off\n" + " ALT + f, F : switch between maximized window mode and original size\n" + "\n" + << std::endl; +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::zoomIn() +{ + FindPokedRenderer(Interactor->GetEventPosition()[0], Interactor->GetEventPosition()[1]); + // Zoom in + StartDolly(); + double factor = 10.0 * 0.2 * .5; + Dolly(std::pow(1.1, factor)); + EndDolly(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::zoomOut() +{ + FindPokedRenderer(Interactor->GetEventPosition()[0], Interactor->GetEventPosition()[1]); + // Zoom out + StartDolly(); + double factor = 10.0 * -0.2 * .5; + Dolly(std::pow(1.1, factor)); + EndDolly(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnChar() +{ + FindPokedRenderer(Interactor->GetEventPosition()[0], Interactor->GetEventPosition()[1]); + + String key(Interactor->GetKeySym()); + if (key.find("XF86ZoomIn") != String::npos) + zoomIn(); + else if (key.find("XF86ZoomOut") != String::npos) + zoomOut(); + + switch (Interactor->GetKeyCode()) + { +// // All of the options below simply exit +// case 'l': case 'L': case 'j': case 'J': case 'c': case 'C': case 'q': case 'Q': +// case 'f': case 'F': case 'g': case 'G': case 'o': case 'O': case 'u': case 'U': + case 'p': case 'P': + break; + + case '+': + if (FlyMode) + MotionUserScale = std::min(16.0, MotionUserScale*2.0); + break; + case '-': + if (FlyMode) + MotionUserScale = std::max(MotionUserScale * 0.5, 0.0625); + break; + + case 'r': case 'R': case 's': case 'S': + if (!Interactor->GetAltKey()) + Superclass::OnChar(); + break; + default: + Superclass::OnChar(); + break; + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::registerMouseCallback(void (*callback)(const MouseEvent&, void*), void* cookie) +{ + mouseCallback_ = callback; + mouse_callback_cookie_ = cookie; +} + +void cv::viz::vtkVizInteractorStyle::registerKeyboardCallback(void (*callback)(const KeyboardEvent&, void*), void *cookie) +{ + keyboardCallback_ = callback; + keyboard_callback_cookie_ = cookie; +} + +////////////////////////////////////////////////////////////////////////////////////////////// +int cv::viz::vtkVizInteractorStyle::getModifiers() +{ + int modifiers = KeyboardEvent::NONE; + + if (Interactor->GetAltKey()) + modifiers |= KeyboardEvent::ALT; + + if (Interactor->GetControlKey()) + modifiers |= KeyboardEvent::CTRL; + + if (Interactor->GetShiftKey()) + modifiers |= KeyboardEvent::SHIFT; + return modifiers; +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnKeyDown() +{ + FindPokedRenderer(Interactor->GetEventPosition()[0], Interactor->GetEventPosition()[1]); + + String key(Interactor->GetKeySym()); + if (key.find("XF86ZoomIn") != String::npos) + zoomIn(); + else if (key.find("XF86ZoomOut") != String::npos) + zoomOut(); + else if (key.find("F5") != String::npos) + { + FlyMode = !FlyMode; + std::cout << (FlyMode ? "Fly mode: on" : "Fly mode: off") << std::endl; + } + + // Save the initial windows width/height + if (win_size_[0] == -1 || win_size_[1] == -1) + win_size_ = Vec2i(Interactor->GetRenderWindow()->GetSize()); + + switch (Interactor->GetKeyCode()) + { + case 'a': case 'A' : KeysDown |=16; break; + case 'z': case 'Z' : KeysDown |=32; break; + case 'h': case 'H' : printHelp(); break; + case 'p': case 'P' : setRepresentationToPoints(); break; + case 'k': case 'K' : exportScene(); break; + case 'j': case 'J' : saveScreenshot(cv::format("screenshot-%d.png", (unsigned int)time(0))); break; + case 'c': case 'C' : printCameraParams(); break; + case '=': zoomIn(); break; + case 43: // KEY_PLUS + { + if (FlyMode) + break; + if (Interactor->GetAltKey()) + zoomIn(); + else + changePointsSize(+1.f); + break; + } + case 45: // KEY_MINUS + { + if (FlyMode) + break; + if (Interactor->GetAltKey()) + zoomOut(); + else + changePointsSize(-1.f); + break; + } + // Switch between maximize and original window size + case 'f': case 'F': + { + if (Interactor->GetAltKey()) + toggleFullScreen(); + break; + } + // 's'/'S' w/out ALT + case 's': case 'S': + { + if (Interactor->GetAltKey()) + toggleStereo(); + break; + } + + case 'o': case 'O': + { + vtkSmartPointer cam = CurrentRenderer->GetActiveCamera(); + cam->SetParallelProjection(!cam->GetParallelProjection()); + Interactor->Render(); + break; + } + + // Overwrite the camera reset + case 'r': case 'R': + { + if (Interactor->GetAltKey()) + resetViewerPose(); + break; + } + case 'q': case 'Q': + Interactor->ExitCallback(); return; + default: + Superclass::OnKeyDown(); break; + } + + KeyboardEvent event(KeyboardEvent::KEY_DOWN, Interactor->GetKeySym(), Interactor->GetKeyCode(), getModifiers()); + if (keyboardCallback_) + keyboardCallback_(event, keyboard_callback_cookie_); + + if (FlyMode && (KeysDown & (32+16)) == (32+16)) + { + if (State == VTKIS_FORWARDFLY || State == VTKIS_REVERSEFLY) + StopState(); + } + else if (FlyMode && (KeysDown & 32) == 32) + { + if (State == VTKIS_FORWARDFLY) + StopState(); + + if (State == VTKIS_NONE) + StartState(VTKIS_REVERSEFLY); + } + else if (FlyMode && (KeysDown & 16) == 16) + { + if (State == VTKIS_REVERSEFLY) + StopState(); + + if (State == VTKIS_NONE) + StartState(VTKIS_FORWARDFLY); + } + + Interactor->Render(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnKeyUp() +{ + KeyboardEvent event(KeyboardEvent::KEY_UP, Interactor->GetKeySym(), Interactor->GetKeyCode(), getModifiers()); + if (keyboardCallback_) + keyboardCallback_(event, keyboard_callback_cookie_); + + switch (Interactor->GetKeyCode()) + { + case 'a': case 'A' : KeysDown &= ~16; break; + case 'z': case 'Z' : KeysDown &= ~32; break; + } + + if (State == VTKIS_FORWARDFLY && (KeysDown & 16) == 0) + StopState(); + + if (State == VTKIS_REVERSEFLY && (KeysDown & 32) == 0) + StopState(); + + Superclass::OnKeyUp(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnMouseMove() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent event(MouseEvent::MouseMove, MouseEvent::NoButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + FindPokedRenderer(p[0], p[1]); + + if (State == VTKIS_ROTATE || State == VTKIS_PAN || State == VTKIS_DOLLY || State == VTKIS_SPIN) + { + switch (State) + { + case VTKIS_ROTATE: Rotate(); break; + case VTKIS_PAN: Pan(); break; + case VTKIS_DOLLY: Dolly(); break; + case VTKIS_SPIN: Spin(); break; + } + + InvokeEvent(vtkCommand::InteractionEvent, NULL); + } + + if (State == VTKIS_FORWARDFLY || State == VTKIS_REVERSEFLY) + { + vtkCamera *cam = CurrentRenderer->GetActiveCamera(); + Vec2i thispos(Interactor->GetEventPosition()); + Vec2i lastpos(Interactor->GetLastEventPosition()); + + // we want to steer by an amount proportional to window viewangle and size + // compute dx and dy increments relative to last mouse click + Vec2i size(Interactor->GetSize()); + double scalefactor = 5*cam->GetViewAngle()/size[0]; + + double dx = - (thispos[0] - lastpos[0])*scalefactor*AngleStepSize; + double dy = (thispos[1] - lastpos[1])*scalefactor*AngleStepSize; + + // Temporary until I get smooth flight working + DeltaPitch = dy; + DeltaYaw = dx; + + InvokeEvent(vtkCommand::InteractionEvent, NULL); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnLeftButtonDown() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent::Type type = (Interactor->GetRepeatCount() == 0) ? MouseEvent::MouseButtonPress : MouseEvent::MouseDblClick; + MouseEvent event(type, MouseEvent::LeftButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + FindPokedRenderer(p[0], p[1]); + if (!CurrentRenderer) + return; + + GrabFocus(EventCallbackCommand); + + if (FlyMode) + { + if(State == VTKIS_REVERSEFLY) + State = VTKIS_FORWARDFLY; + else + { + SetupMotionVars(); + if (State == VTKIS_NONE) + StartState(VTKIS_FORWARDFLY); + } + } + else + { + if (Interactor->GetShiftKey()) + { + if (Interactor->GetControlKey()) + StartDolly(); + else + StartPan(); + } + else + { + if (Interactor->GetControlKey()) + StartSpin(); + else + StartRotate(); + } + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnLeftButtonUp() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent event(MouseEvent::MouseButtonRelease, MouseEvent::LeftButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + switch (State) + { + case VTKIS_DOLLY: EndDolly(); break; + case VTKIS_PAN: EndPan(); break; + case VTKIS_SPIN: EndSpin(); break; + case VTKIS_ROTATE: EndRotate(); break; + case VTKIS_FORWARDFLY: StopState(); break; + } + + if (Interactor ) + ReleaseFocus(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnMiddleButtonDown() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent::Type type = (Interactor->GetRepeatCount() == 0) ? MouseEvent::MouseButtonPress : MouseEvent::MouseDblClick; + MouseEvent event(type, MouseEvent::MiddleButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + FindPokedRenderer(p[0], p[1]); + if (!CurrentRenderer) + return; + + GrabFocus(EventCallbackCommand); + StartPan(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnMiddleButtonUp() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent event(MouseEvent::MouseButtonRelease, MouseEvent::MiddleButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + if (State == VTKIS_PAN) + { + EndPan(); + if (Interactor) + ReleaseFocus(); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnRightButtonDown() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent::Type type = (Interactor->GetRepeatCount() == 0) ? MouseEvent::MouseButtonPress : MouseEvent::MouseDblClick; + MouseEvent event(type, MouseEvent::RightButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + FindPokedRenderer(p[0], p[1]); + if (!CurrentRenderer) + return; + + GrabFocus(EventCallbackCommand); + + if (FlyMode) + { + if (State == VTKIS_FORWARDFLY) + State = VTKIS_REVERSEFLY; + else + { + SetupMotionVars(); + if (State == VTKIS_NONE) + StartState(VTKIS_REVERSEFLY); + } + + } + else + StartDolly(); +} + + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnRightButtonUp() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent event(MouseEvent::MouseButtonRelease, MouseEvent::RightButton, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + if(State == VTKIS_DOLLY) + { + EndDolly(); + if (Interactor) + ReleaseFocus(); + } + + if (State == VTKIS_REVERSEFLY) + { + StopState(); + if (Interactor) + ReleaseFocus(); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnMouseWheelForward() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent event(MouseEvent::MouseScrollUp, MouseEvent::VScroll, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + if (Interactor->GetRepeatCount() && mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + if (Interactor->GetAltKey()) + { + // zoom + vtkSmartPointer cam = CurrentRenderer->GetActiveCamera(); + double opening_angle = cam->GetViewAngle(); + if (opening_angle > 15.0) + opening_angle -= 1.0; + + cam->SetViewAngle(opening_angle); + cam->Modified(); + CurrentRenderer->ResetCameraClippingRange(); + CurrentRenderer->Modified(); + Interactor->Render(); + } + else + { + FindPokedRenderer(p[0], p[1]); + if (!CurrentRenderer) + return; + + GrabFocus(EventCallbackCommand); + StartDolly(); + Dolly(pow(1.1, MotionFactor * 0.2 * MouseWheelMotionFactor)); + EndDolly(); + ReleaseFocus(); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnMouseWheelBackward() +{ + Vec2i p(Interactor->GetEventPosition()); + MouseEvent event(MouseEvent::MouseScrollDown, MouseEvent::VScroll, p, getModifiers()); + if (mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + if (Interactor->GetRepeatCount() && mouseCallback_) + mouseCallback_(event, mouse_callback_cookie_); + + if (Interactor->GetAltKey()) + { + // zoom + vtkSmartPointer cam = CurrentRenderer->GetActiveCamera(); + double opening_angle = cam->GetViewAngle(); + if (opening_angle < 170.0) + opening_angle += 1.0; + + cam->SetViewAngle(opening_angle); + cam->Modified(); + CurrentRenderer->ResetCameraClippingRange(); + CurrentRenderer->Modified(); + Interactor->Render(); + } + else + { + FindPokedRenderer(p[0], p[1]); + if (!CurrentRenderer) + return; + + GrabFocus(EventCallbackCommand); + StartDolly(); + Dolly(pow(1.1, MotionFactor * -0.2 * MouseWheelMotionFactor)); + EndDolly(); + ReleaseFocus(); + } +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::OnTimer() +{ + if (State == VTKIS_FORWARDFLY || State == VTKIS_REVERSEFLY) + Fly(); + + Interactor->Render(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::Rotate() +{ + if (!CurrentRenderer) + return; + + Vec2i dxy = Vec2i(Interactor->GetEventPosition()) - Vec2i(Interactor->GetLastEventPosition()); + Vec2i size(CurrentRenderer->GetRenderWindow()->GetSize()); + + double delta_elevation = -20.0 / size[1]; + double delta_azimuth = -20.0 / size[0]; + + double rxf = dxy[0] * delta_azimuth * MotionFactor; + double ryf = dxy[1] * delta_elevation * MotionFactor; + + vtkCamera *camera = CurrentRenderer->GetActiveCamera(); + camera->Azimuth(rxf); + camera->Elevation(ryf); + camera->OrthogonalizeViewUp(); + + if (AutoAdjustCameraClippingRange) + CurrentRenderer->ResetCameraClippingRange(); + + if (Interactor->GetLightFollowCamera()) + CurrentRenderer->UpdateLightsGeometryToFollowCamera(); + + Interactor->Render(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::Spin() +{ + if (!CurrentRenderer) + return; + + vtkRenderWindowInteractor *rwi = Interactor; + + double *center = CurrentRenderer->GetCenter(); + + double newAngle = vtkMath::DegreesFromRadians( atan2( rwi->GetEventPosition()[1] - center[1], rwi->GetEventPosition()[0] - center[0] ) ); + double oldAngle = vtkMath::DegreesFromRadians( atan2( rwi->GetLastEventPosition()[1] - center[1], rwi->GetLastEventPosition()[0] - center[0] ) ); + + vtkCamera *camera = CurrentRenderer->GetActiveCamera(); + camera->Roll( newAngle - oldAngle ); + camera->OrthogonalizeViewUp(); + + rwi->Render(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// +void cv::viz::vtkVizInteractorStyle::Pan() +{ + if (!CurrentRenderer) + return; + + vtkRenderWindowInteractor *rwi = Interactor; + + double viewFocus[4], focalDepth, viewPoint[3]; + double newPickPoint[4], oldPickPoint[4], motionVector[3]; + + // Calculate the focal depth since we'll be using it a lot + + vtkCamera *camera = CurrentRenderer->GetActiveCamera(); + camera->GetFocalPoint(viewFocus); + ComputeWorldToDisplay(viewFocus[0], viewFocus[1], viewFocus[2], viewFocus); + focalDepth = viewFocus[2]; + + ComputeDisplayToWorld(rwi->GetEventPosition()[0], rwi->GetEventPosition()[1], focalDepth, newPickPoint); + + // Has to recalc old mouse point since the viewport has moved, so can't move it outside the loop + ComputeDisplayToWorld(rwi->GetLastEventPosition()[0], rwi->GetLastEventPosition()[1], focalDepth, oldPickPoint); + + // Camera motion is reversed + motionVector[0] = oldPickPoint[0] - newPickPoint[0]; + motionVector[1] = oldPickPoint[1] - newPickPoint[1]; + motionVector[2] = oldPickPoint[2] - newPickPoint[2]; + + camera->GetFocalPoint(viewFocus); + camera->GetPosition(viewPoint); + camera->SetFocalPoint(motionVector[0] + viewFocus[0], motionVector[1] + viewFocus[1], motionVector[2] + viewFocus[2]); + camera->SetPosition( motionVector[0] + viewPoint[0], motionVector[1] + viewPoint[1], motionVector[2] + viewPoint[2]); + + if (Interactor->GetLightFollowCamera()) + CurrentRenderer->UpdateLightsGeometryToFollowCamera(); + + Interactor->Render(); +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::Dolly() +{ + if (!CurrentRenderer) + return; + + int dy = Interactor->GetEventPosition()[1] - Interactor->GetLastEventPosition()[1]; + Dolly(pow(1.1, MotionFactor * dy / CurrentRenderer->GetCenter()[1])); +} + +void cv::viz::vtkVizInteractorStyle::Dolly(double factor) +{ + if (!CurrentRenderer) + return; + + vtkCamera *camera = CurrentRenderer->GetActiveCamera(); + if (camera->GetParallelProjection()) + camera->SetParallelScale(camera->GetParallelScale() / factor); + else + { + camera->Dolly(factor); + if (AutoAdjustCameraClippingRange) + CurrentRenderer->ResetCameraClippingRange(); + } + + if (Interactor->GetLightFollowCamera()) + CurrentRenderer->UpdateLightsGeometryToFollowCamera(); + + Interactor->Render(); +} +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::Fly() +{ + if (CurrentRenderer == NULL) + return; + + if (KeysDown) + FlyByKey(); + else + FlyByMouse(); + + CurrentRenderer->GetActiveCamera()->OrthogonalizeViewUp(); + + if (AutoAdjustCameraClippingRange) + CurrentRenderer->ResetCameraClippingRange(); + + if (Interactor->GetLightFollowCamera()) + CurrentRenderer->UpdateLightsGeometryToFollowCamera(); +} + +void cv::viz::vtkVizInteractorStyle::SetupMotionVars() +{ + Vec6d bounds; + CurrentRenderer->ComputeVisiblePropBounds(bounds.val); + + if ( !vtkMath::AreBoundsInitialized(bounds.val) ) + DiagonalLength = 1.0; + else + DiagonalLength = norm(Vec3d(bounds[0], bounds[2], bounds[4]) - Vec3d(bounds[1], bounds[3], bounds[5])); +} + +void cv::viz::vtkVizInteractorStyle::MotionAlongVector(const Vec3d& vector, double amount, vtkCamera* cam) +{ + // move camera and focus along DirectionOfProjection + Vec3d campos = Vec3d(cam->GetPosition()) - amount * vector; + Vec3d camfoc = Vec3d(cam->GetFocalPoint()) - amount * vector; + + cam->SetPosition(campos.val); + cam->SetFocalPoint(camfoc.val); +} + +void cv::viz::vtkVizInteractorStyle::FlyByMouse() +{ + vtkCamera* cam = CurrentRenderer->GetActiveCamera(); + double speed = DiagonalLength * MotionStepSize * MotionUserScale; + speed = speed * ( Interactor->GetShiftKey() ? MotionAccelerationFactor : 1.0); + + // Sidestep + if (Interactor->GetAltKey()) + { + if (DeltaYaw!=0.0) + { + vtkMatrix4x4 *vtm = cam->GetViewTransformMatrix(); + Vec3d a_vector(vtm->GetElement(0,0), vtm->GetElement(0,1), vtm->GetElement(0,2)); + + MotionAlongVector(a_vector, -DeltaYaw*speed, cam); + } + if (DeltaPitch!=0.0) + { + Vec3d a_vector(cam->GetViewUp()); + MotionAlongVector(a_vector, DeltaPitch*speed, cam); + } + } + else + { + cam->Yaw(DeltaYaw); + cam->Pitch(DeltaPitch); + DeltaYaw = 0; + DeltaPitch = 0; + } + // + if (!Interactor->GetControlKey()) + { + Vec3d a_vector(cam->GetDirectionOfProjection()); // reversed (use -speed) + switch (State) + { + case VTKIS_FORWARDFLY: MotionAlongVector(a_vector, -speed, cam); break; + case VTKIS_REVERSEFLY: MotionAlongVector(a_vector, speed, cam); break; + } + } +} + +void cv::viz::vtkVizInteractorStyle::FlyByKey() +{ + vtkCamera* cam = CurrentRenderer->GetActiveCamera(); + + double speed = DiagonalLength * MotionStepSize * MotionUserScale; + speed = speed * ( Interactor->GetShiftKey() ? MotionAccelerationFactor : 1.0); + + // Left and right + if (Interactor->GetAltKey()) + { // Sidestep + vtkMatrix4x4 *vtm = cam->GetViewTransformMatrix(); + Vec3d a_vector(vtm->GetElement(0,0), vtm->GetElement(0,1), vtm->GetElement(0,2)); + + if (KeysDown & 1) + MotionAlongVector(a_vector, -speed, cam); + + if (KeysDown & 2) + MotionAlongVector(a_vector, speed, cam); + } + else + { + if (KeysDown & 1) + cam->Yaw( AngleStepSize); + + if (KeysDown & 2) + cam->Yaw(-AngleStepSize); + } + + // Up and Down + if (Interactor->GetControlKey()) + { // Sidestep + Vec3d a_vector = Vec3d(cam->GetViewUp()); + if (KeysDown & 4) + MotionAlongVector(a_vector,-speed, cam); + + if (KeysDown & 8) + MotionAlongVector(a_vector, speed, cam); + } + else + { + if (KeysDown & 4) + cam->Pitch(-AngleStepSize); + + if (KeysDown & 8) + cam->Pitch( AngleStepSize); + } + + // forward and backward + Vec3d a_vector(cam->GetDirectionOfProjection()); + if (KeysDown & 16) + MotionAlongVector(a_vector, speed, cam); + + if (KeysDown & 32) + MotionAlongVector(a_vector,-speed, cam); +} + +////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::viz::vtkVizInteractorStyle::PrintSelf(ostream& os, vtkIndent indent) +{ + Superclass::PrintSelf(os, indent); + os << indent << "MotionFactor: " << MotionFactor << "\n"; + os << indent << "MotionStepSize: " << MotionStepSize << "\n"; + os << indent << "MotionAccelerationFactor: "<< MotionAccelerationFactor << "\n"; + os << indent << "AngleStepSize: " << AngleStepSize << "\n"; + os << indent << "MotionUserScale: "<< MotionUserScale << "\n"; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkVizInteractorStyle.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkVizInteractorStyle.hpp new file mode 100644 index 00000000..c9458ae0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkVizInteractorStyle.hpp @@ -0,0 +1,169 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __OPENCV_VIZ_INTERACTOR_STYLE_H__ +#define __OPENCV_VIZ_INTERACTOR_STYLE_H__ + +#include + +namespace cv +{ + namespace viz + { + class vtkVizInteractorStyle : public vtkInteractorStyle + { + public: + static vtkVizInteractorStyle *New(); + vtkTypeMacro(vtkVizInteractorStyle, vtkInteractorStyle) + void PrintSelf(ostream& os, vtkIndent indent); + + virtual void OnChar(); + virtual void OnKeyDown(); + virtual void OnKeyUp(); + + virtual void OnMouseMove(); + virtual void OnLeftButtonDown(); + virtual void OnLeftButtonUp(); + virtual void OnMiddleButtonDown(); + virtual void OnMiddleButtonUp(); + virtual void OnRightButtonDown(); + virtual void OnRightButtonUp(); + virtual void OnMouseWheelForward(); + virtual void OnMouseWheelBackward(); + virtual void OnTimer(); + + virtual void Rotate(); + virtual void Spin(); + virtual void Pan(); + virtual void Dolly(); + + vtkSetMacro(FlyMode,bool) + vtkGetMacro(FlyMode,bool) + + + vtkSetMacro(MotionFactor, double) + vtkGetMacro(MotionFactor, double) + + void registerMouseCallback(void (*callback)(const MouseEvent&, void*), void* cookie = 0); + void registerKeyboardCallback(void (*callback)(const KeyboardEvent&, void*), void * cookie = 0); + + void setWidgetActorMap(const Ptr& actors) { widget_actor_map_ = actors; } + void saveScreenshot(const String &file); + void exportScene(const String &file); + void exportScene(); + void changePointsSize(float delta); + void setRepresentationToPoints(); + void printCameraParams(); + void toggleFullScreen(); + void resetViewerPose(); + void toggleStereo(); + void printHelp(); + + // Set the basic unit step size : by default 1/250 of bounding diagonal + vtkSetMacro(MotionStepSize,double) + vtkGetMacro(MotionStepSize,double) + + // Set acceleration factor when shift key is applied : default 10 + vtkSetMacro(MotionAccelerationFactor,double) + vtkGetMacro(MotionAccelerationFactor,double) + + // Set the basic angular unit for turning : default 1 degree + vtkSetMacro(AngleStepSize,double) + vtkGetMacro(AngleStepSize,double) + + private: + Ptr widget_actor_map_; + + Vec2i win_size_; + Vec2i win_pos_; + Vec2i max_win_size_; + + void zoomIn(); + void zoomOut(); + + protected: + vtkVizInteractorStyle(); + ~vtkVizInteractorStyle(); + + virtual void Dolly(double factor); + + void Fly(); + void FlyByMouse(); + void FlyByKey(); + void SetupMotionVars(); + void MotionAlongVector(const Vec3d& vector, double amount, vtkCamera* cam); + + private: + vtkVizInteractorStyle(const vtkVizInteractorStyle&); + vtkVizInteractorStyle& operator=(const vtkVizInteractorStyle&); + + //! True for red-blue colors, false for magenta-green. + bool stereo_anaglyph_redblue_; + + void (*keyboardCallback_)(const KeyboardEvent&, void*); + void *keyboard_callback_cookie_; + + void (*mouseCallback_)(const MouseEvent&, void*); + void *mouse_callback_cookie_; + + bool FlyMode; + double MotionFactor; + + int getModifiers(); + + // from fly + unsigned char KeysDown; + double DiagonalLength; + double MotionStepSize; + double MotionUserScale; + double MotionAccelerationFactor; + double AngleStepSize; + double DeltaYaw; + double DeltaPitch; + }; + } // end namespace viz +} // end namespace cv + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZReader.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZReader.cpp new file mode 100644 index 00000000..3b9265fe --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZReader.cpp @@ -0,0 +1,107 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkXYZReader); +}} + + +cv::viz::vtkXYZReader::vtkXYZReader() +{ + this->FileName = 0; + this->SetNumberOfInputPorts(0); +} + +cv::viz::vtkXYZReader::~vtkXYZReader() +{ + this->SetFileName(0); +} + +void cv::viz::vtkXYZReader::PrintSelf(ostream& os, vtkIndent indent) +{ + this->Superclass::PrintSelf(os,indent); + os << indent << "FileName: " << (this->FileName ? this->FileName : "(none)") << "\n"; +} + +int cv::viz::vtkXYZReader::RequestData(vtkInformation*, vtkInformationVector**, vtkInformationVector* outputVector) +{ + // Make sure we have a file to read. + if(!this->FileName) + { + vtkErrorMacro("A FileName must be specified."); + return 0; + } + + // Open the input file. + std::ifstream fin(this->FileName); + if(!fin) + { + vtkErrorMacro("Error opening file " << this->FileName); + return 0; + } + + // Allocate objects to hold points and vertex cells. + vtkSmartPointer points = vtkSmartPointer::New(); + vtkSmartPointer verts = vtkSmartPointer::New(); + + // Read points from the file. + vtkDebugMacro("Reading points from file " << this->FileName); + double x[3]; + while(fin >> x[0] >> x[1] >> x[2]) + { + vtkIdType id = points->InsertNextPoint(x); + verts->InsertNextCell(1, &id); + } + vtkDebugMacro("Read " << points->GetNumberOfPoints() << " points."); + + // Store the points and cells in the output data object. + vtkPolyData* output = vtkPolyData::GetData(outputVector); + output->SetPoints(points); + output->SetVerts(verts); + + return 1; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZReader.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZReader.h new file mode 100644 index 00000000..13ae048e --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZReader.h @@ -0,0 +1,80 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __vtkXYZReader_h +#define __vtkXYZReader_h + +#include "vtkPolyDataAlgorithm.h" + +namespace cv +{ + namespace viz + { + class vtkXYZReader : public vtkPolyDataAlgorithm + { + public: + static vtkXYZReader* New(); + vtkTypeMacro(vtkXYZReader,vtkPolyDataAlgorithm) + void PrintSelf(ostream& os, vtkIndent indent); + + // Description: + // Set/Get the name of the file from which to read points. + vtkSetStringMacro(FileName) + vtkGetStringMacro(FileName) + + protected: + vtkXYZReader(); + ~vtkXYZReader(); + + char* FileName; + + int RequestData(vtkInformation*, vtkInformationVector**, vtkInformationVector*); + private: + vtkXYZReader(const vtkXYZReader&); // Not implemented. + void operator=(const vtkXYZReader&); // Not implemented. + }; + } +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZWriter.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZWriter.cpp new file mode 100644 index 00000000..56a26b38 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZWriter.cpp @@ -0,0 +1,122 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "../precomp.hpp" + +namespace cv { namespace viz +{ + vtkStandardNewMacro(vtkXYZWriter); +}} + +cv::viz::vtkXYZWriter::vtkXYZWriter() +{ + std::ofstream fout; // only used to extract the default precision + this->DecimalPrecision = fout.precision(); +} + +void cv::viz::vtkXYZWriter::WriteData() +{ + vtkPolyData *input = this->GetInput(); + if (!input) + return; + + if (!this->FileName ) + { + vtkErrorMacro(<< "No FileName specified! Can't write!"); + this->SetErrorCode(vtkErrorCode::NoFileNameError); + return; + } + + vtkDebugMacro(<<"Opening vtk file for writing..."); + std::ostream *outfilep = new std::ofstream(this->FileName, ios::out); + if (outfilep->fail()) + { + vtkErrorMacro(<< "Unable to open file: "<< this->FileName); + this->SetErrorCode(vtkErrorCode::CannotOpenFileError); + delete outfilep; + return; + } + + ostream &outfile = *outfilep; + + for(vtkIdType i = 0; i < input->GetNumberOfPoints(); ++i) + { + Vec3d p; + input->GetPoint(i, p.val); + outfile << std::setprecision(this->DecimalPrecision) << p[0] << " " << p[1] << " " << p[2] << std::endl; + } + + // Close the file + vtkDebugMacro(<<"Closing vtk file\n"); + delete outfilep; + + // Delete the file if an error occurred + if (this->ErrorCode == vtkErrorCode::OutOfDiskSpaceError) + { + vtkErrorMacro("Ran out of disk space; deleting file: " << this->FileName); + unlink(this->FileName); + } +} + +int cv::viz::vtkXYZWriter::FillInputPortInformation(int, vtkInformation *info) +{ + info->Set(vtkAlgorithm::INPUT_REQUIRED_DATA_TYPE(), "vtkPolyData"); + return 1; +} + +void cv::viz::vtkXYZWriter::PrintSelf(ostream& os, vtkIndent indent) +{ + this->Superclass::PrintSelf(os,indent); + os << indent << "DecimalPrecision: " << this->DecimalPrecision << "\n"; +} + +vtkPolyData* cv::viz::vtkXYZWriter::GetInput() +{ + return vtkPolyData::SafeDownCast(this->Superclass::GetInput()); +} + +vtkPolyData* cv::viz::vtkXYZWriter::GetInput(int port) +{ + return vtkPolyData::SafeDownCast(this->Superclass::GetInput(port)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZWriter.h b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZWriter.h new file mode 100644 index 00000000..91d0c8f6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/vtk/vtkXYZWriter.h @@ -0,0 +1,90 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef __vtkXYZWriter_h +#define __vtkXYZWriter_h + +#include "vtkWriter.h" + +namespace cv +{ + namespace viz + { + class vtkXYZWriter : public vtkWriter + { + public: + static vtkXYZWriter *New(); + vtkTypeMacro(vtkXYZWriter,vtkWriter) + void PrintSelf(ostream& os, vtkIndent indent); + + vtkGetMacro(DecimalPrecision, int) + vtkSetMacro(DecimalPrecision, int) + + // Description: + // Specify file name of data file to write. + vtkSetStringMacro(FileName) + vtkGetStringMacro(FileName) + + // Description: + // Get the input to this writer. + vtkPolyData* GetInput(); + vtkPolyData* GetInput(int port); + + protected: + vtkXYZWriter(); + ~vtkXYZWriter(){} + + void WriteData(); + int FillInputPortInformation(int port, vtkInformation *info); + + int DecimalPrecision; + char *FileName; + + private: + vtkXYZWriter(const vtkXYZWriter&); // Not implemented. + void operator=(const vtkXYZWriter&); // Not implemented. + }; + } +} +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/widget.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/widget.cpp new file mode 100644 index 00000000..b46d3d1c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/src/widget.cpp @@ -0,0 +1,352 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#include "precomp.hpp" + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// widget implementation + +class cv::viz::Widget::Impl +{ +public: + vtkSmartPointer prop; + Impl() : prop(0) {} +}; + +cv::viz::Widget::Widget() : impl_( new Impl() ) { } + +cv::viz::Widget::Widget(const Widget& other) : impl_( new Impl() ) +{ + if (other.impl_ && other.impl_->prop) + impl_->prop = other.impl_->prop; +} + +cv::viz::Widget& cv::viz::Widget::operator=(const Widget& other) +{ + if (!impl_) + impl_ = new Impl(); + + if (other.impl_) + impl_->prop = other.impl_->prop; + return *this; +} + +cv::viz::Widget::~Widget() +{ + if (impl_) + { + delete impl_; + impl_ = 0; + } +} + +cv::viz::Widget cv::viz::Widget::fromPlyFile(const String &file_name) +{ + CV_Assert(vtkPLYReader::CanReadFile(file_name.c_str())); + + vtkSmartPointer reader = vtkSmartPointer::New(); + reader->SetFileName(file_name.c_str()); + + vtkSmartPointer mapper = vtkSmartPointer::New(); + mapper->SetInputConnection( reader->GetOutputPort() ); +#if VTK_MAJOR_VERSION < 8 + mapper->ImmediateModeRenderingOff(); +#endif + + vtkSmartPointer actor = vtkSmartPointer::New(); + actor->GetProperty()->SetInterpolationToFlat(); + actor->GetProperty()->BackfaceCullingOn(); + actor->SetMapper(mapper); + + Widget widget; + WidgetAccessor::setProp(widget, actor); + return widget; +} + +void cv::viz::Widget::setRenderingProperty(int property, double value) +{ + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget type is not supported." && actor); + + switch (property) + { + case POINT_SIZE: actor->GetProperty()->SetPointSize(float(value)); break; + case OPACITY: actor->GetProperty()->SetOpacity(value); break; + case LINE_WIDTH: actor->GetProperty()->SetLineWidth(float(value)); break; +#if VTK_MAJOR_VERSION < 8 + case IMMEDIATE_RENDERING: actor->GetMapper()->SetImmediateModeRendering(int(value)); break; +#else + case IMMEDIATE_RENDERING: std::cerr << "this property has no effect" << std::endl; break; +#endif + case AMBIENT: actor->GetProperty()->SetAmbient(float(value)); break; + case LIGHTING: + { + if (value == 0) + actor->GetProperty()->LightingOff(); + else + actor->GetProperty()->LightingOn(); + break; + } + case FONT_SIZE: + { + vtkTextActor* text_actor = vtkTextActor::SafeDownCast(actor); + CV_Assert("Widget does not have text content." && text_actor); + text_actor->GetTextProperty()->SetFontSize(int(value)); + break; + } + case REPRESENTATION: + { + switch (int(value)) + { + case REPRESENTATION_POINTS: actor->GetProperty()->SetRepresentationToPoints(); break; + case REPRESENTATION_WIREFRAME: actor->GetProperty()->SetRepresentationToWireframe(); break; + case REPRESENTATION_SURFACE: actor->GetProperty()->SetRepresentationToSurface(); break; + } + break; + } + case SHADING: + { + switch (int(value)) + { + case SHADING_FLAT: actor->GetProperty()->SetInterpolationToFlat(); break; + case SHADING_GOURAUD: + { + if (!actor->GetMapper()->GetInput()->GetPointData()->GetNormals()) + { + vtkSmartPointer mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + CV_Assert("Can't set shading property for such type of widget" && mapper); + + vtkSmartPointer with_normals = VtkUtils::ComputeNormals(mapper->GetInput()); + VtkUtils::SetInputData(mapper, with_normals); + } + actor->GetProperty()->SetInterpolationToGouraud(); + break; + } + case SHADING_PHONG: + { + if (!actor->GetMapper()->GetInput()->GetPointData()->GetNormals()) + { + vtkSmartPointer mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + CV_Assert("Can't set shading property for such type of widget" && mapper); + + vtkSmartPointer with_normals = VtkUtils::ComputeNormals(mapper->GetInput()); + VtkUtils::SetInputData(mapper, with_normals); + } + actor->GetProperty()->SetInterpolationToPhong(); + break; + } + } + break; + } + default: + CV_Assert("setRenderingProperty: Unknown property"); + } + actor->Modified(); +} + +double cv::viz::Widget::getRenderingProperty(int property) const +{ + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget type is not supported." && actor); + + double value = 0.0; + switch (property) + { + case POINT_SIZE: value = actor->GetProperty()->GetPointSize(); break; + case OPACITY: value = actor->GetProperty()->GetOpacity(); break; + case LINE_WIDTH: value = actor->GetProperty()->GetLineWidth(); break; +#if VTK_MAJOR_VERSION < 8 + case IMMEDIATE_RENDERING: value = actor->GetMapper()->GetImmediateModeRendering(); break; +#else + case IMMEDIATE_RENDERING: std::cerr << "this property has no effect" << std::endl; break; +#endif + case AMBIENT: value = actor->GetProperty()->GetAmbient(); break; + case LIGHTING: value = actor->GetProperty()->GetLighting(); break; + case FONT_SIZE: + { + vtkTextActor* text_actor = vtkTextActor::SafeDownCast(actor); + CV_Assert("Widget does not have text content." && text_actor); + value = text_actor->GetTextProperty()->GetFontSize();; + break; + } + case REPRESENTATION: + { + switch (actor->GetProperty()->GetRepresentation()) + { + case VTK_POINTS: value = REPRESENTATION_POINTS; break; + case VTK_WIREFRAME: value = REPRESENTATION_WIREFRAME; break; + case VTK_SURFACE: value = REPRESENTATION_SURFACE; break; + } + break; + } + case SHADING: + { + switch (actor->GetProperty()->GetInterpolation()) + { + case VTK_FLAT: value = SHADING_FLAT; break; + case VTK_GOURAUD: value = SHADING_GOURAUD; break; + case VTK_PHONG: value = SHADING_PHONG; break; + } + break; + } + default: + CV_Assert("getRenderingProperty: Unknown property"); + } + return value; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// widget accessor implementation + +vtkSmartPointer cv::viz::WidgetAccessor::getProp(const Widget& widget) +{ + return widget.impl_->prop; +} + +void cv::viz::WidgetAccessor::setProp(Widget& widget, vtkSmartPointer prop) +{ + widget.impl_->prop = prop; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// widget3D implementation + +void cv::viz::Widget3D::setPose(const Affine3d &pose) +{ + vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget is not 3D." && actor); + + vtkSmartPointer matrix = vtkmatrix(pose.matrix); + actor->SetUserMatrix(matrix); + actor->Modified(); +} + +void cv::viz::Widget3D::updatePose(const Affine3d &pose) +{ + vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget is not 3D." && actor); + + vtkSmartPointer matrix = actor->GetUserMatrix(); + if (!matrix) + { + setPose(pose); + return; + } + + Affine3d updated_pose = pose * Affine3d(*matrix->Element); + matrix = vtkmatrix(updated_pose.matrix); + + actor->SetUserMatrix(matrix); + actor->Modified(); +} + +cv::Affine3d cv::viz::Widget3D::getPose() const +{ + vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget is not 3D." && actor); + if (!actor->GetUserMatrix()) + { + return Affine3d(); // empty user matrix, return an identity transform. + } + return Affine3d(*actor->GetUserMatrix()->Element); +} + +void cv::viz::Widget3D::applyTransform(const Affine3d &transform) +{ + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget is not 3D actor." && actor); + + vtkSmartPointer mapper = vtkPolyDataMapper::SafeDownCast(actor->GetMapper()); + CV_Assert("Widget doesn't have a polydata mapper" && mapper); + + mapper->Update(); // #10945 + VtkUtils::SetInputData(mapper, VtkUtils::TransformPolydata(mapper->GetInput(), transform)); + mapper->Update(); +} + +void cv::viz::Widget3D::setColor(const Color &color) +{ + // Cast to actor instead of prop3d since prop3d doesn't provide getproperty + vtkActor *actor = vtkActor::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget type is not supported." && actor); + + Color c = vtkcolor(color); + actor->GetMapper()->ScalarVisibilityOff(); + actor->GetProperty()->SetColor(c.val); + actor->GetProperty()->SetEdgeColor(c.val); + actor->Modified(); +} + +template<> cv::viz::Widget3D cv::viz::Widget::cast() const +{ + vtkProp3D *actor = vtkProp3D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget cannot be cast." && actor); + + Widget3D widget; + WidgetAccessor::setProp(widget, actor); + return widget; +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +/// widget2D implementation + +void cv::viz::Widget2D::setColor(const Color &color) +{ + vtkActor2D *actor = vtkActor2D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget type is not supported." && actor); + Color c = vtkcolor(color); + actor->GetProperty()->SetColor(c.val); + actor->Modified(); +} + +template<> cv::viz::Widget2D cv::viz::Widget::cast() const +{ + vtkActor2D *actor = vtkActor2D::SafeDownCast(WidgetAccessor::getProp(*this)); + CV_Assert("Widget cannot be cast." && actor); + + Widget2D widget; + WidgetAccessor::setProp(widget, actor); + return widget; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_common.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_common.cpp new file mode 100644 index 00000000..ac0cea4b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_common.cpp @@ -0,0 +1,27 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +cv::String cv::Path::combine(const String& item1, const String& item2) +{ + if (item1.empty()) + return item2; + + if (item2.empty()) + return item1; + + char last = item1[item1.size()-1]; + + bool need_append = last != '/' && last != '\\'; + return item1 + (need_append ? "/" : "") + item2; +} + +cv::String cv::Path::combine(const String& item1, const String& item2, const String& item3) +{ return combine(combine(item1, item2), item3); } + +cv::String cv::Path::change_extension(const String& file, const String& ext) +{ + String::size_type pos = file.find_last_of('.'); + return pos == String::npos ? file : file.substr(0, pos+1) + ext; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_common.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_common.hpp new file mode 100644 index 00000000..bfd6d13c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_common.hpp @@ -0,0 +1,87 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +// Authors: +// * Ozan Tonkal, ozantonkal@gmail.com +// * Anatoly Baksheev, Itseez Inc. myname.mysurname <> mycompany.com +// +//M*/ + +#ifndef OPENCV_VIZ_TEST_COMMON_HPP +#define OPENCV_VIZ_TEST_COMMON_HPP + +#include + +namespace cv +{ + struct Path + { + static String combine(const String& item1, const String& item2); + static String combine(const String& item1, const String& item2, const String& item3); + static String change_extension(const String& file, const String& ext); + }; + + inline cv::String get_dragon_ply_file_path() + { + return Path::combine(cvtest::TS::ptr()->get_data_path(), "dragon.ply"); + } + + template + inline std::vector< Affine3<_Tp> > generate_test_trajectory() + { + std::vector< Affine3<_Tp> > result; + + for (int i = 0, j = 0; i <= 270; i += 3, j += 10) + { + double x = 2 * cos(i * 3 * CV_PI/180.0) * (1.0 + 0.5 * cos(1.2 + i * 1.2 * CV_PI/180.0)); + double y = 0.25 + i/270.0 + sin(j * CV_PI/180.0) * 0.2 * sin(0.6 + j * 1.5 * CV_PI/180.0); + double z = 2 * sin(i * 3 * CV_PI/180.0) * (1.0 + 0.5 * cos(1.2 + i * CV_PI/180.0)); + result.push_back(viz::makeCameraPose(Vec3d(x, y, z), Vec3d::all(0.0), Vec3d(0.0, 1.0, 0.0))); + } + return result; + } + + inline Mat make_gray(const Mat& image) + { + Mat chs[3]; split(image, chs); + return 0.114 * chs[0] + 0.58 * chs[1] + 0.3 * chs[2]; + } +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_main.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_main.cpp new file mode 100644 index 00000000..656b1d4f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_main.cpp @@ -0,0 +1,6 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +CV_TEST_MAIN("viz") diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_precomp.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_precomp.hpp new file mode 100644 index 00000000..954f0335 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_precomp.hpp @@ -0,0 +1,10 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "opencv2/ts.hpp" +#include "test_common.hpp" + +namespace opencv_test +{ +using namespace cv::viz; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_tutorial2.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_tutorial2.cpp new file mode 100644 index 00000000..a4b5b995 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_tutorial2.cpp @@ -0,0 +1,58 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +static void tutorial2() +{ + /// Create a window + viz::Viz3d myWindow("Coordinate Frame"); + + /// Add coordinate axes + myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem()); + + /// Add line to represent (1,1,1) axis + viz::WLine axis(Point3f(-1.0, -1.0, -1.0), Point3d(1.0, 1.0, 1.0)); + axis.setRenderingProperty(viz::LINE_WIDTH, 4.0); + myWindow.showWidget("Line Widget", axis); + + /// Construct a cube widget + viz::WCube cube_widget(Point3d(0.5, 0.5, 0.0), Point3d(0.0, 0.0, -0.5), true, viz::Color::blue()); + cube_widget.setRenderingProperty(viz::LINE_WIDTH, 4.0); + + /// Display widget (update if already displayed) + myWindow.showWidget("Cube Widget", cube_widget); + + /// Rodrigues vector + Vec3d rot_vec = Vec3d::all(0); + double translation_phase = 0.0, translation = 0.0; + for(unsigned num = 0; num < 50; ++num) + { + /* Rotation using rodrigues */ + /// Rotate around (1,1,1) + rot_vec[0] += CV_PI * 0.01; + rot_vec[1] += CV_PI * 0.01; + rot_vec[2] += CV_PI * 0.01; + + /// Shift on (1,1,1) + translation_phase += CV_PI * 0.01; + translation = sin(translation_phase); + + /// Construct pose + Affine3d pose(rot_vec, Vec3d(translation, translation, translation)); + + myWindow.setWidgetPose("Cube Widget", pose); + + myWindow.spinOnce(100, true); + } +} + + +TEST(Viz, DISABLED_tutorial2_pose_of_widget) +{ + tutorial2(); +} + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_tutorial3.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_tutorial3.cpp new file mode 100644 index 00000000..32e33b19 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_tutorial3.cpp @@ -0,0 +1,64 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +/** + * @function main + */ +static void tutorial3(bool camera_pov) +{ + /// Create a window + viz::Viz3d myWindow("Coordinate Frame"); + + /// Add coordinate axes + myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem()); + + /// Let's assume camera has the following properties + Point3d cam_origin(3.0, 3.0, 3.0), cam_focal_point(3.0, 3.0, 2.0), cam_y_dir(-1.0, 0.0, 0.0); + + /// We can get the pose of the cam using makeCameraPose + Affine3d camera_pose = viz::makeCameraPose(cam_origin, cam_focal_point, cam_y_dir); + + /// We can get the transformation matrix from camera coordinate system to global using + /// - makeTransformToGlobal. We need the axes of the camera + Affine3d transform = viz::makeTransformToGlobal(Vec3d(0.0, -1.0, 0.0), Vec3d(-1.0, 0.0, 0.0), Vec3d(0.0, 0.0, -1.0), cam_origin); + + /// Create a cloud widget. + Mat dragon_cloud = viz::readCloud(get_dragon_ply_file_path()); + viz::WCloud cloud_widget(dragon_cloud, viz::Color::green()); + + /// Pose of the widget in camera frame + Affine3d cloud_pose = Affine3d().rotate(Vec3d(0.0, CV_PI/2, 0.0)).rotate(Vec3d(0.0, 0.0, CV_PI)).translate(Vec3d(0.0, 0.0, 3.0)); + /// Pose of the widget in global frame + Affine3d cloud_pose_global = transform * cloud_pose; + + /// Visualize camera frame + myWindow.showWidget("CPW_FRUSTUM", viz::WCameraPosition(Vec2f(0.889484f, 0.523599f)), camera_pose); + if (!camera_pov) + myWindow.showWidget("CPW", viz::WCameraPosition(0.5), camera_pose); + + /// Visualize widget + myWindow.showWidget("bunny", cloud_widget, cloud_pose_global); + + /// Set the viewer pose to that of camera + if (camera_pov) + myWindow.setViewerPose(camera_pose); + + /// Start event loop. + myWindow.spinOnce(500, true); +} + +TEST(Viz, tutorial3_global_view) +{ + tutorial3(false); +} + +TEST(Viz, tutorial3_camera_view) +{ + tutorial3(true); +} + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_viz3d.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_viz3d.cpp new file mode 100644 index 00000000..4ab05c3e --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/test_viz3d.cpp @@ -0,0 +1,65 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. + // Copyright (C) 2008-2013, Willow Garage Inc., all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and / or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +TEST(Viz_viz3d, DISABLED_develop) +{ + cv::Mat cloud = cv::viz::readCloud(get_dragon_ply_file_path()); + + cv::viz::Viz3d viz("abc"); + viz.setBackgroundMeshLab(); + viz.showWidget("coo", cv::viz::WCoordinateSystem(1)); + viz.showWidget("cloud", cv::viz::WPaintedCloud(cloud)); + + //---->>>>> + //std::vector gt, es; + //cv::viz::readTrajectory(gt, "d:/Datasets/trajs/gt%05d.xml"); + //cv::viz::readTrajectory(es, "d:/Datasets/trajs/es%05d.xml"); + //cv::Mat cloud = cv::viz::readCloud(get_dragon_ply_file_path()); + //---->>>>> + + viz.spinOnce(500, true); +} + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/tests_simple.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/tests_simple.cpp new file mode 100644 index 00000000..5584483f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/test/tests_simple.cpp @@ -0,0 +1,454 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. + // Copyright (C) 2008-2013, Willow Garage Inc., all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and / or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#include "test_precomp.hpp" + +namespace opencv_test { namespace { + +TEST(Viz, show_cloud_bluberry) +{ + Mat dragon_cloud = readCloud(get_dragon_ply_file_path()); + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + Viz3d viz("show_cloud_bluberry"); + viz.setBackgroundColor(Color::black()); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("dragon", WCloud(dragon_cloud, Color::bluberry()), pose); + + viz.showWidget("text2d", WText("Bluberry cloud", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_cloud_random_color) +{ + Mat dragon_cloud = readCloud(get_dragon_ply_file_path()); + + Mat colors(dragon_cloud.size(), CV_8UC3); + theRNG().fill(colors, RNG::UNIFORM, 0, 255); + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + Viz3d viz("show_cloud_random_color"); + viz.setBackgroundMeshLab(); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("dragon", WCloud(dragon_cloud, colors), pose); + viz.showWidget("text2d", WText("Random color cloud", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_cloud_masked) +{ + Mat dragon_cloud = readCloud(get_dragon_ply_file_path()); + + Vec3f qnan = Vec3f::all(std::numeric_limits::quiet_NaN()); + for(int i = 0; i < (int)dragon_cloud.total(); ++i) + if (i % 15 != 0) + dragon_cloud.at(i) = qnan; + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + Viz3d viz("show_cloud_masked"); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("dragon", WCloud(dragon_cloud), pose); + viz.showWidget("text2d", WText("Nan masked cloud", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_cloud_collection) +{ + Mat cloud = readCloud(get_dragon_ply_file_path()); + + WCloudCollection ccol; + ccol.addCloud(cloud, Color::white(), Affine3d().translate(Vec3d(0, 0, 0)).rotate(Vec3d(CV_PI/2, 0, 0))); + ccol.addCloud(cloud, Color::blue(), Affine3d().translate(Vec3d(1, 0, 0))); + ccol.addCloud(cloud, Color::red(), Affine3d().translate(Vec3d(2, 0, 0))); + ccol.finalize(); + + Viz3d viz("show_cloud_collection"); + viz.setBackgroundColor(Color::mlab()); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("ccol", ccol); + viz.showWidget("text2d", WText("Cloud collection", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_painted_clouds) +{ + Mat cloud = readCloud(get_dragon_ply_file_path()); + + Viz3d viz("show_painted_clouds"); + viz.setBackgroundMeshLab(); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("cloud1", WPaintedCloud(cloud), Affine3d(Vec3d(0.0, -CV_PI/2, 0.0), Vec3d(-1.5, 0.0, 0.0))); + viz.showWidget("cloud2", WPaintedCloud(cloud, Vec3d(0.0, -0.75, -1.0), Vec3d(0.0, 0.75, 0.0)), Affine3d(Vec3d(0.0, CV_PI/2, 0.0), Vec3d(1.5, 0.0, 0.0))); + viz.showWidget("cloud3", WPaintedCloud(cloud, Vec3d(0.0, 0.0, -1.0), Vec3d(0.0, 0.0, 1.0), Color::blue(), Color::red())); + viz.showWidget("arrow", WArrow(Vec3d(0.0, 1.0, -1.0), Vec3d(0.0, 1.0, 1.0), 0.009, Color::raspberry())); + viz.showWidget("text2d", WText("Painted clouds", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_mesh) +{ + Mesh mesh = Mesh::load(get_dragon_ply_file_path()); + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + Viz3d viz("show_mesh"); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("mesh", WMesh(mesh), pose); + viz.showWidget("text2d", WText("Just mesh", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_mesh_random_colors) +{ + Mesh mesh = Mesh::load(get_dragon_ply_file_path()); + theRNG().fill(mesh.colors, RNG::UNIFORM, 0, 255); + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + Viz3d viz("show_mesh_random_color"); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("mesh", WMesh(mesh), pose); + viz.setRenderingProperty("mesh", SHADING, SHADING_PHONG); + viz.showWidget("text2d", WText("Random color mesh", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_widget_merger) +{ + WWidgetMerger merger; + merger.addWidget(WCube(Vec3d::all(0.0), Vec3d::all(1.0), true, Color::gold())); + + RNG& rng = theRNG(); + for(int i = 0; i < 77; ++i) + { + Vec3b c; + rng.fill(c, RNG::NORMAL, Scalar::all(128), Scalar::all(48), true); + merger.addWidget(WSphere(Vec3d(c)*(1.0/255.0), 7.0/255.0, 10, Color(c[2], c[1], c[0]))); + } + merger.finalize(); + + Viz3d viz("show_mesh_random_color"); + viz.showWidget("coo", WCoordinateSystem()); + viz.showWidget("merger", merger); + viz.showWidget("text2d", WText("Widget merger", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_textured_mesh) +{ + Mat lena = imread(Path::combine(cvtest::TS::ptr()->get_data_path(), "lena.png")); + + std::vector points; + std::vector tcoords; + std::vector polygons; + for(size_t i = 0; i < 64; ++i) + { + double angle = CV_PI/2 * i/64.0; + points.push_back(Vec3d(0.00, cos(angle), sin(angle))*0.75); + points.push_back(Vec3d(1.57, cos(angle), sin(angle))*0.75); + tcoords.push_back(Vec2d(0.0, i/64.0)); + tcoords.push_back(Vec2d(1.0, i/64.0)); + } + + for(int i = 0; i < (int)points.size()/2-1; ++i) + { + int polys[] = {3, 2*i, 2*i+1, 2*i+2, 3, 2*i+1, 2*i+2, 2*i+3}; + polygons.insert(polygons.end(), polys, polys + sizeof(polys)/sizeof(polys[0])); + } + + cv::viz::Mesh mesh; + mesh.cloud = Mat(points, true).reshape(3, 1); + mesh.tcoords = Mat(tcoords, true).reshape(2, 1); + mesh.polygons = Mat(polygons, true).reshape(1, 1); + mesh.texture = lena; + + Viz3d viz("show_textured_mesh"); + viz.setBackgroundMeshLab(); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("mesh", WMesh(mesh)); + viz.setRenderingProperty("mesh", SHADING, SHADING_PHONG); + viz.showWidget("text2d", WText("Textured mesh", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_polyline) +{ + const Color palette[] = { Color::red(), Color::green(), Color::blue(), Color::gold(), Color::raspberry(), Color::bluberry(), Color::lime() }; + size_t palette_size = sizeof(palette)/sizeof(palette[0]); + + Mat polyline(1, 32, CV_64FC3), colors(1, 32, CV_8UC3); + for(int i = 0; i < (int)polyline.total(); ++i) + { + polyline.at(i) = Vec3d(i/16.0, cos(i * CV_PI/6), sin(i * CV_PI/6)); + colors.at(i) = palette[i & palette_size]; + } + + Viz3d viz("show_polyline"); + viz.showWidget("polyline", WPolyLine(polyline, colors)); + viz.showWidget("coosys", WCoordinateSystem()); + viz.showWidget("text2d", WText("Polyline", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_sampled_normals) +{ + Mesh mesh = Mesh::load(get_dragon_ply_file_path()); + computeNormals(mesh, mesh.normals); + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + Viz3d viz("show_sampled_normals"); + viz.showWidget("mesh", WMesh(mesh), pose); + viz.showWidget("normals", WCloudNormals(mesh.cloud, mesh.normals, 30, 0.1f, Color::green()), pose); + viz.setRenderingProperty("normals", LINE_WIDTH, 2.0); + viz.showWidget("text2d", WText("Cloud or mesh normals", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_cloud_shaded_by_normals) +{ + Mesh mesh = Mesh::load(get_dragon_ply_file_path()); + computeNormals(mesh, mesh.normals); + + Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); + + WCloud cloud(mesh.cloud, Color::white(), mesh.normals); + cloud.setRenderingProperty(SHADING, SHADING_GOURAUD); + + Viz3d viz("show_cloud_shaded_by_normals"); + viz.showWidget("cloud", cloud, pose); + viz.showWidget("text2d", WText("Cloud shaded by normals", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_trajectories) +{ + std::vector path = generate_test_trajectory(), sub0, sub1, sub2, sub3, sub4, sub5; + int size =(int)path.size(); + + Mat(path).rowRange(0, size/10+1).copyTo(sub0); + Mat(path).rowRange(size/10, size/5+1).copyTo(sub1); + Mat(path).rowRange(size/5, 11*size/12).copyTo(sub2); + Mat(path).rowRange(11*size/12, size).copyTo(sub3); + Mat(path).rowRange(3*size/4, 33*size/40).copyTo(sub4); + Mat(path).rowRange(33*size/40, 9*size/10).copyTo(sub5); + Matx33d K(1024.0, 0.0, 320.0, 0.0, 1024.0, 240.0, 0.0, 0.0, 1.0); + + Viz3d viz("show_trajectories"); + viz.showWidget("coos", WCoordinateSystem()); + viz.showWidget("sub0", WTrajectorySpheres(sub0, 0.25, 0.07)); + viz.showWidget("sub1", WTrajectory(sub1, WTrajectory::PATH, 0.2, Color::brown())); + viz.showWidget("sub2", WTrajectory(sub2, WTrajectory::FRAMES, 0.2)); + viz.showWidget("sub3", WTrajectory(sub3, WTrajectory::BOTH, 0.2, Color::green())); + viz.showWidget("sub4", WTrajectoryFrustums(sub4, K, 0.3, Color::yellow())); + viz.showWidget("sub5", WTrajectoryFrustums(sub5, Vec2d(0.78, 0.78), 0.15)); + viz.showWidget("text2d", WText("Different kinds of supported trajectories", Point(20, 20), 20, Color::green())); + + int i = 0; + for(unsigned num = 0; num < 50; ++num) + { + double a = --i % 360; + Vec3d pose(sin(a * CV_PI/180), 0.7, cos(a * CV_PI/180)); + viz.setViewerPose(makeCameraPose(pose * 7.5, Vec3d(0.0, 0.5, 0.0), Vec3d(0.0, 0.1, 0.0))); + viz.spinOnce(100, true); + } + viz.resetCamera(); + viz.spinOnce(500, true); +} + +TEST(Viz, show_trajectory_reposition) +{ + std::vector path = generate_test_trajectory(); + + Viz3d viz("show_trajectory_reposition_to_origin"); + viz.showWidget("coos", WCoordinateSystem()); + viz.showWidget("sub3", WTrajectory(Mat(path).rowRange(0, (int)path.size()/3), WTrajectory::BOTH, 0.2, Color::brown()), path.front().inv()); + viz.showWidget("text2d", WText("Trajectory resposition to origin", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_camera_positions) +{ + Matx33d K(1024.0, 0.0, 320.0, 0.0, 1024.0, 240.0, 0.0, 0.0, 1.0); + Mat lena = imread(Path::combine(cvtest::TS::ptr()->get_data_path(), "lena.png")); + Mat gray = make_gray(lena); + + Affine3d poses[2]; + for(int i = 0; i < 2; ++i) + { + Vec3d pose = 5 * Vec3d(sin(3.14 + 2.7 + i*60 * CV_PI/180), 0.4 - i*0.3, cos(3.14 + 2.7 + i*60 * CV_PI/180)); + poses[i] = makeCameraPose(pose, Vec3d(0.0, 0.0, 0.0), Vec3d(0.0, -0.1, 0.0)); + } + + Viz3d viz("show_camera_positions"); + viz.showWidget("sphe", WSphere(Point3d(0,0,0), 1.0, 10, Color::orange_red())); + viz.showWidget("coos", WCoordinateSystem(1.5)); + viz.showWidget("pos1", WCameraPosition(0.75), poses[0]); + viz.showWidget("pos2", WCameraPosition(Vec2d(0.78, 0.78), lena, 2.2, Color::green()), poses[0]); + viz.showWidget("pos3", WCameraPosition(0.75), poses[1]); + viz.showWidget("pos4", WCameraPosition(K, gray, 3, Color::indigo()), poses[1]); + viz.showWidget("text2d", WText("Camera positions with images", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_overlay_image) +{ + Mat lena = imread(Path::combine(cvtest::TS::ptr()->get_data_path(), "lena.png")); + Mat gray = make_gray(lena); + + Size2d half_lsize = Size2d(lena.size()) * 0.5; + + Viz3d viz("show_overlay_image"); + viz.setBackgroundMeshLab(); + Size vsz = viz.getWindowSize(); + + viz.showWidget("coos", WCoordinateSystem()); + viz.showWidget("cube", WCube()); + viz.showWidget("img1", WImageOverlay(lena, Rect(Point(10, 10), half_lsize))); + viz.showWidget("img2", WImageOverlay(gray, Rect(Point(vsz.width-10-lena.cols/2, 10), half_lsize))); + viz.showWidget("img3", WImageOverlay(gray, Rect(Point(10, vsz.height-10-lena.rows/2), half_lsize))); + viz.showWidget("img5", WImageOverlay(lena, Rect(Point(vsz.width-10-lena.cols/2, vsz.height-10-lena.rows/2), half_lsize))); + viz.showWidget("text2d", WText("Overlay images", Point(20, 20), 20, Color::green())); + + int i = 0; + for(unsigned num = 0; num < 50; ++num) + { + double a = ++i % 360; + Vec3d pose(sin(a * CV_PI/180), 0.7, cos(a * CV_PI/180)); + viz.setViewerPose(makeCameraPose(pose * 3, Vec3d(0.0, 0.5, 0.0), Vec3d(0.0, 0.1, 0.0))); + viz.getWidget("img1").cast().setImage(lena * pow(sin(i*10*CV_PI/180) * 0.5 + 0.5, 1.0)); + viz.spinOnce(100, true); + } + viz.showWidget("text2d", WText("Overlay images (stopped)", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + + +TEST(Viz, show_image_method) +{ + Mat lena = imread(Path::combine(cvtest::TS::ptr()->get_data_path(), "lena.png")); + + Viz3d viz("show_image_method"); + viz.showImage(lena); + viz.spinOnce(1500, true); + viz.showImage(lena, lena.size()); + viz.spinOnce(1500, true); + + cv::viz::imshow("show_image_method", make_gray(lena)).spinOnce(500, true); +} + +TEST(Viz, show_image_3d) +{ + Mat lena = imread(Path::combine(cvtest::TS::ptr()->get_data_path(), "lena.png")); + Mat gray = make_gray(lena); + + Viz3d viz("show_image_3d"); + viz.setBackgroundMeshLab(); + viz.showWidget("coos", WCoordinateSystem()); + viz.showWidget("cube", WCube()); + viz.showWidget("arr0", WArrow(Vec3d(0.5, 0.0, 0.0), Vec3d(1.5, 0.0, 0.0), 0.009, Color::raspberry())); + viz.showWidget("img0", WImage3D(lena, Size2d(1.0, 1.0)), Affine3d(Vec3d(0.0, CV_PI/2, 0.0), Vec3d(.5, 0.0, 0.0))); + viz.showWidget("arr1", WArrow(Vec3d(-0.5, -0.5, 0.0), Vec3d(0.2, 0.2, 0.0), 0.009, Color::raspberry())); + viz.showWidget("img1", WImage3D(gray, Size2d(1.0, 1.0), Vec3d(-0.5, -0.5, 0.0), Vec3d(1.0, 1.0, 0.0), Vec3d(0.0, 1.0, 0.0))); + + viz.showWidget("arr3", WArrow(Vec3d::all(-0.5), Vec3d::all(0.5), 0.009, Color::raspberry())); + + viz.showWidget("text2d", WText("Images in 3D", Point(20, 20), 20, Color::green())); + + int i = 0; + for(unsigned num = 0; num < 50; ++num) + { + viz.getWidget("img0").cast().setImage(lena * pow(sin(i++*7.5*CV_PI/180) * 0.5 + 0.5, 1.0)); + viz.spinOnce(100, true); + } + viz.showWidget("text2d", WText("Images in 3D (stopped)", Point(20, 20), 20, Color::green())); + viz.spinOnce(500, true); +} + +TEST(Viz, show_simple_widgets) +{ + Viz3d viz("show_simple_widgets"); + viz.setBackgroundMeshLab(); + + viz.showWidget("coos", WCoordinateSystem()); + viz.showWidget("cube", WCube()); + viz.showWidget("cub0", WCube(Vec3d::all(-1.0), Vec3d::all(-0.5), false, Color::indigo())); + viz.showWidget("arro", WArrow(Vec3d::all(-0.5), Vec3d::all(0.5), 0.009, Color::raspberry())); + viz.showWidget("cir1", WCircle(0.5, 0.01, Color::bluberry())); + viz.showWidget("cir2", WCircle(0.5, Point3d(0.5, 0.0, 0.0), Vec3d(1.0, 0.0, 0.0), 0.01, Color::apricot())); + + viz.showWidget("cyl0", WCylinder(Vec3d(-0.5, 0.5, -0.5), Vec3d(0.5, 0.5, -0.5), 0.125, 30, Color::brown())); + viz.showWidget("con0", WCone(0.25, 0.125, 6, Color::azure())); + viz.showWidget("con1", WCone(0.125, Point3d(0.5, -0.5, 0.5), Point3d(0.5, -1.0, 0.5), 6, Color::turquoise())); + + viz.showWidget("text2d", WText("Different simple widgets", Point(20, 20), 20, Color::green())); + viz.showWidget("text3d", WText3D("Simple 3D text", Point3d( 0.5, 0.5, 0.5), 0.125, false, Color::green())); + + viz.showWidget("plane1", WPlane(Size2d(0.25, 0.75))); + viz.showWidget("plane2", WPlane(Vec3d(0.5, -0.5, -0.5), Vec3d(0.0, 1.0, 1.0), Vec3d(1.0, 1.0, 0.0), Size2d(1.0, 0.5), Color::gold())); + + viz.showWidget("grid1", WGrid(Vec2i(7,7), Vec2d::all(0.75), Color::gray()), Affine3d().translate(Vec3d(0.0, 0.0, -1.0))); + + viz.spinOnce(500, true); + viz.getWidget("text2d").cast().setText("Different simple widgets (updated)"); + viz.getWidget("text3d").cast().setText("Updated text 3D"); + viz.spinOnce(500, true); +} + +TEST(Viz, show_follower) +{ + Viz3d viz("show_follower"); + + viz.showWidget("coos", WCoordinateSystem()); + viz.showWidget("cube", WCube()); + viz.showWidget("t3d_2", WText3D("Simple 3D follower", Point3d(-0.5, -0.5, 0.5), 0.125, true, Color::green())); + viz.showWidget("text2d", WText("Follower: text always facing camera", Point(20, 20), 20, Color::green())); + viz.setBackgroundMeshLab(); + viz.spinOnce(500, true); + viz.getWidget("t3d_2").cast().setText("Updated follower 3D"); + viz.spinOnce(500, true); +} + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/creating_widgets/creating_widgets.markdown b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/creating_widgets/creating_widgets.markdown new file mode 100644 index 00000000..3e97a06f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/creating_widgets/creating_widgets.markdown @@ -0,0 +1,58 @@ +Creating Widgets {#tutorial_creating_widgets} +================ + +@prev_tutorial{tutorial_transformations} +@next_tutorial{tutorial_histo3D} + +Goal +---- + +In this tutorial you will learn how to + +- Create your own widgets using WidgetAccessor and VTK. +- Show your widget in the visualization window. + +Code +---- + +You can download the code from [here ](https://github.com/opencv/opencv_contrib/tree/master/modules/viz/samples/creating_widgets.cpp). +@include viz/samples/creating_widgets.cpp + +Explanation +----------- + +Here is the general structure of the program: + +- Extend Widget3D class to create a new 3D widget. + @code{.cpp} + class WTriangle : public viz::Widget3D + { + public: + WTriangle(const Point3f &pt1, const Point3f &pt2, const Point3f &pt3, const viz::Color & color = viz::Color::white()); + }; + @endcode +- Assign a VTK actor to the widget. + @code{.cpp} + // Store this actor in the widget in order that visualizer can access it + viz::WidgetAccessor::setProp(*this, actor); + @endcode +- Set color of the widget. + @code{.cpp} + // Set the color of the widget. This has to be called after WidgetAccessor. + setColor(color); + @endcode +- Construct a triangle widget and display it in the window. + @code{.cpp} + /// Create a triangle widget + WTriangle tw(Point3f(0.0,0.0,0.0), Point3f(1.0,1.0,1.0), Point3f(0.0,1.0,0.0), viz::Color::red()); + + /// Show widget in the visualizer window + myWindow.showWidget("TRIANGLE", tw); + @endcode + +Results +------- + +Here is the result of the program. + +![](images/red_triangle.png) diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/creating_widgets/images/red_triangle.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/creating_widgets/images/red_triangle.png new file mode 100644 index 00000000..7da6ad06 Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/creating_widgets/images/red_triangle.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/histo3D/histo3D.markdown b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/histo3D/histo3D.markdown new file mode 100644 index 00000000..ff77c9cf --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/histo3D/histo3D.markdown @@ -0,0 +1,53 @@ +Creating a 3D histogram {#tutorial_histo3D} +================ + +@prev_tutorial{tutorial_creating_widgets} + +Goal +---- + +In this tutorial you will learn how to + +- Create your own callback keyboard function for viz window. +- Show your 3D histogram in a viz window. + +Code +---- + +You can download the code from [here ](https://github.com/opencv/opencv_contrib/tree/master/modules/viz/samples/histo3D.cpp). +@include viz/samples/histo3D.cpp + +Explanation +----------- + +Here is the general structure of the program: + +- You can give full path to an image in command line + @snippet histo3D.cpp command_line_parser + + or without path, a synthetic image is generated with pixel values are a gaussian distribution @ref cv::RNG::fill center(60+/-10,40+/-5,50+/-20) in first quadrant, + (160+/-20,10+/-5,50+/-10) in second quadrant, (90+/-10,100+/-20,50+/-20) in third quadrant, (100+/-10,10+/-5,150+/-40) in last quadrant. + @snippet histo3D.cpp synthetic_image + Image tridimensional histogram is calculated using opencv @ref cv::calcHist and @ref cv::normalize between 0 and 100. + @snippet histo3D.cpp calchist_for_histo3d + channel are 2, 1 and 0 to synchronise color with Viz axis color in objetc cv::viz::WCoordinateSystem. + + A slidebar is inserted in image window. Init slidebar value is 90, it means that only histogram cell greater than 9/100000.0 (23 pixels for an 512X512 pixels) will be display. + @snippet histo3D.cpp slide_bar_for_thresh + We are ready to open a viz window with a callback function to capture keyboard event in viz window. Using @ref cv::viz::Viz3d::spinOnce enable keyboard event to be capture in @ref cv::imshow window too. + @snippet histo3D.cpp manage_viz_imshow_window + The function DrawHistogram3D processes histogram Mat to display it in a Viz window. Number of plan, row and column in [three dimensional Mat](@ref CVMat_Details ) can be found using this code : + @snippet histo3D.cpp get_cube_size + To get histogram value at a specific location we use @ref cv::Mat::at(int i0,int i1, int i2) method with three arguments k, i and j where k is plane number, i row number and j column number. + @snippet histo3D.cpp get_cube_values + +- Callback function + Principle are as mouse callback function. Key code pressed is in field code of class @ref cv::viz::KeyboardEvent. + @snippet histo3D.cpp viz_keyboard_callback + +Results +------- + +Here is the result of the program with no argument and threshold equal to 50. + +![](images/histo50.png) diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/histo3D/images/histo50.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/histo3D/images/histo50.png new file mode 100644 index 00000000..b2b4416c Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/histo3D/images/histo50.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/facedetect.jpg b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/facedetect.jpg new file mode 100644 index 00000000..788b7d82 Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/facedetect.jpg differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/image_effects.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/image_effects.png new file mode 100644 index 00000000..25edb668 Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/image_effects.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/intro.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/intro.png new file mode 100644 index 00000000..5f2dc1aa Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/images/intro.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/launching_viz/images/window_demo.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/launching_viz/images/window_demo.png new file mode 100644 index 00000000..b853fe29 Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/launching_viz/images/window_demo.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/launching_viz/launching_viz.markdown b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/launching_viz/launching_viz.markdown new file mode 100644 index 00000000..661821ef --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/launching_viz/launching_viz.markdown @@ -0,0 +1,66 @@ +Launching Viz {#tutorial_launching_viz} +============= + +@next_tutorial{tutorial_widget_pose} + +Goal +---- + +In this tutorial you will learn how to + +- Open a visualization window. +- Access a window by its name. +- Start event loop. +- Start event loop for a given amount of time. + +Code +---- + +You can download the code from [here ](https://github.com/opencv/opencv_contrib/tree/master/modules/viz/samples/launching_viz.cpp). +@include viz/samples/launching_viz.cpp + +Explanation +----------- + +Here is the general structure of the program: + +- Create a window. + @code{.cpp} + /// Create a window + viz::Viz3d myWindow("Viz Demo"); + @endcode +- Start event loop. This event loop will run until user terminates it by pressing **e**, **E**, + **q**, **Q**. + @code{.cpp} + /// Start event loop + myWindow.spin(); + @endcode +- Access same window via its name. Since windows are implicitly shared, **sameWindow** is exactly + the same with **myWindow**. If the name does not exist, a new window is created. + @code{.cpp} + /// Access window via its name + viz::Viz3d sameWindow = viz::getWindowByName("Viz Demo"); + @endcode +- Start a controlled event loop. Once it starts, **wasStopped** is set to false. Inside the while + loop, in each iteration, **spinOnce** is called to prevent event loop from completely stopping. + Inside the while loop, user can execute other statements including those which interact with the + window. + @code{.cpp} + /// Event loop is over when pressed q, Q, e, E + /// Start event loop once for 1 millisecond + sameWindow.spinOnce(1, true); + while(!sameWindow.wasStopped()) + { + /// Interact with window + + /// Event loop for 1 millisecond + sameWindow.spinOnce(1, true); + } + @endcode + +Results +------- + +Here is the result of the program. + +![](images/window_demo.png) diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/table_of_content_viz.markdown b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/table_of_content_viz.markdown new file mode 100644 index 00000000..bcbebd15 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/table_of_content_viz.markdown @@ -0,0 +1,52 @@ +OpenCV Viz {#tutorial_table_of_content_viz} +========== + +- @subpage tutorial_launching_viz + + *Languages:* C++ + + *Compatibility:* \> OpenCV 3.0.0 + + *Author:* Ozan Tonkal + + You will learn how to launch a viz window. + +- @subpage tutorial_widget_pose + + *Languages:* C++ + + *Compatibility:* \> OpenCV 3.0.0 + + *Author:* Ozan Tonkal + + You will learn how to change pose of a widget. + +- @subpage tutorial_transformations + + *Languages:* C++ + + *Compatibility:* \> OpenCV 3.0.0 + + *Author:* Ozan Tonkal + + You will learn how to transform between global and camera frames. + +- @subpage tutorial_creating_widgets + + *Languages:* C++ + + *Compatibility:* \> OpenCV 3.0.0 + + *Author:* Ozan Tonkal + + You will learn how to create your own widgets. + +- @subpage tutorial_histo3D + + *Languages:* C++ + + *Compatibility:* \> OpenCV 3.0.0 + + *Author:* Laurent Berger + + You will learn how to plot a 3D histogram. diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/images/camera_view_point.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/images/camera_view_point.png new file mode 100644 index 00000000..e2ac5b0f Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/images/camera_view_point.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/images/global_view_point.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/images/global_view_point.png new file mode 100644 index 00000000..fc6de2c1 Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/images/global_view_point.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/transformations.markdown b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/transformations.markdown new file mode 100644 index 00000000..b5c73db8 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/transformations/transformations.markdown @@ -0,0 +1,91 @@ +Transformations {#tutorial_transformations} +=============== + +@prev_tutorial{tutorial_widget_pose} +@next_tutorial{tutorial_creating_widgets} + +Goal +---- + +In this tutorial you will learn how to + +- How to use makeTransformToGlobal to compute pose +- How to use makeCameraPose and Viz3d::setViewerPose +- How to visualize camera position by axes and by viewing frustum + +Code +---- + +You can download the code from [here ](https://github.com/opencv/opencv_contrib/tree/master/modules/viz/samples/transformations.cpp). +@include viz/samples/transformations.cpp + +Explanation +----------- + +Here is the general structure of the program: + +- Create a visualization window. + @code{.cpp} + /// Create a window + viz::Viz3d myWindow("Transformations"); + @endcode +- Get camera pose from camera position, camera focal point and y direction. + @code{.cpp} + /// Let's assume camera has the following properties + Point3f cam_pos(3.0f,3.0f,3.0f), cam_focal_point(3.0f,3.0f,2.0f), cam_y_dir(-1.0f,0.0f,0.0f); + + /// We can get the pose of the cam using makeCameraPose + Affine3f cam_pose = viz::makeCameraPose(cam_pos, cam_focal_point, cam_y_dir); + @endcode +- Obtain transform matrix knowing the axes of camera coordinate system. + @code{.cpp} + /// We can get the transformation matrix from camera coordinate system to global using + /// - makeTransformToGlobal. We need the axes of the camera + Affine3f transform = viz::makeTransformToGlobal(Vec3f(0.0f,-1.0f,0.0f), Vec3f(-1.0f,0.0f,0.0f), Vec3f(0.0f,0.0f,-1.0f), cam_pos); + @endcode +- Create a cloud widget from bunny.ply file + @code{.cpp} + /// Create a cloud widget. + Mat bunny_cloud = cvcloud_load(); + viz::WCloud cloud_widget(bunny_cloud, viz::Color::green()); + @endcode +- Given the pose in camera coordinate system, estimate the global pose. + @code{.cpp} + /// Pose of the widget in camera frame + Affine3f cloud_pose = Affine3f().translate(Vec3f(0.0f,0.0f,3.0f)); + /// Pose of the widget in global frame + Affine3f cloud_pose_global = transform * cloud_pose; + @endcode +- If the view point is set to be global, visualize camera coordinate frame and viewing frustum. + @code{.cpp} + /// Visualize camera frame + if (!camera_pov) + { + viz::WCameraPosition cpw(0.5); // Coordinate axes + viz::WCameraPosition cpw_frustum(Vec2f(0.889484, 0.523599)); // Camera frustum + myWindow.showWidget("CPW", cpw, cam_pose); + myWindow.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose); + } + @endcode +- Visualize the cloud widget with the estimated global pose + @code{.cpp} + /// Visualize widget + myWindow.showWidget("bunny", cloud_widget, cloud_pose_global); + @endcode +- If the view point is set to be camera's, set viewer pose to **cam_pose**. + @code{.cpp} + /// Set the viewer pose to that of camera + if (camera_pov) + myWindow.setViewerPose(cam_pose); + @endcode + +Results +------- + +-# Here is the result from the camera point of view. + +![](images/camera_view_point.png) + +-# Here is the result from global point of view. + +![](images/global_view_point.png) \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/widget_pose/images/widgetpose.png b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/widget_pose/images/widgetpose.png new file mode 100644 index 00000000..ef8a5937 Binary files /dev/null and b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/widget_pose/images/widgetpose.png differ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/widget_pose/widget_pose.markdown b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/widget_pose/widget_pose.markdown new file mode 100644 index 00000000..d9b5ff59 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/viz/tutorials/widget_pose/widget_pose.markdown @@ -0,0 +1,88 @@ +Pose of a widget {#tutorial_widget_pose} +================ + +@prev_tutorial{tutorial_launching_viz} +@next_tutorial{tutorial_transformations} + +Goal +---- + +In this tutorial you will learn how to + +- Add widgets to the visualization window +- Use Affine3 to set pose of a widget +- Rotating and translating a widget along an axis + +Code +---- + +You can download the code from [here ](https://github.com/opencv/opencv_contrib/tree/master/modules/viz/samples/widget_pose.cpp). +@include viz/samples/widget_pose.cpp + +Explanation +----------- + +Here is the general structure of the program: + +- Create a visualization window. + @code{.cpp} + /// Create a window + viz::Viz3d myWindow("Coordinate Frame"); + @endcode +- Show coordinate axes in the window using CoordinateSystemWidget. + @code{.cpp} + /// Add coordinate axes + myWindow.showWidget("Coordinate Widget", viz::WCoordinateSystem()); + @endcode +- Display a line representing the axis (1,1,1). + @code{.cpp} + /// Add line to represent (1,1,1) axis + viz::WLine axis(Point3f(-1.0f,-1.0f,-1.0f), Point3f(1.0f,1.0f,1.0f)); + axis.setRenderingProperty(viz::LINE_WIDTH, 4.0); + myWindow.showWidget("Line Widget", axis); + @endcode +- Construct a cube. + @code{.cpp} + /// Construct a cube widget + viz::WCube cube_widget(Point3f(0.5,0.5,0.0), Point3f(0.0,0.0,-0.5), true, viz::Color::blue()); + cube_widget.setRenderingProperty(viz::LINE_WIDTH, 4.0); + myWindow.showWidget("Cube Widget", cube_widget); + @endcode +- Create rotation matrix from rodrigues vector + @code{.cpp} + /// Rotate around (1,1,1) + rot_vec.at(0,0) += CV_PI * 0.01f; + rot_vec.at(0,1) += CV_PI * 0.01f; + rot_vec.at(0,2) += CV_PI * 0.01f; + + ... + + Mat rot_mat; + Rodrigues(rot_vec, rot_mat); + @endcode +- Use Affine3f to set pose of the cube. + @code{.cpp} + /// Construct pose + Affine3f pose(rot_mat, Vec3f(translation, translation, translation)); + myWindow.setWidgetPose("Cube Widget", pose); + @endcode +- Animate the rotation using wasStopped and spinOnce + @code{.cpp} + while(!myWindow.wasStopped()) + { + ... + + myWindow.spinOnce(1, true); + } + @endcode + +Results +------- + +Here is the result of the program. + +\htmlonly +
+ +
+\endhtmlonly diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/CMakeLists.txt b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/CMakeLists.txt new file mode 100644 index 00000000..a15c4337 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/CMakeLists.txt @@ -0,0 +1,40 @@ +set(the_description "WeChat QR code Detector") +ocv_define_module(wechat_qrcode opencv_core opencv_imgproc opencv_objdetect opencv_dnn WRAP java objc python js) + +# iconv support isn't automatic on some systems +if(CMAKE_VERSION VERSION_GREATER "3.11") + find_package(Iconv QUIET) + if(Iconv_FOUND) + ocv_target_link_libraries(${the_module} Iconv::Iconv) + else() + ocv_target_compile_definitions(${the_module} PRIVATE "NO_ICONV=1") + endif() +endif() + +# need to change +set(wechat_qrcode_commit_hash "a8b69ccc738421293254aec5ddb38bd523503252") +set(hash_detect_caffemodel "238e2b2d6f3c18d6c3a30de0c31e23cf") +set(hash_detect_prototxt "6fb4976b32695f9f5c6305c19f12537d") +set(hash_sr_caffemodel "cbfcd60361a73beb8c583eea7e8e6664") +set(hash_sr_prototxt "69db99927a70df953b471daaba03fbef") + +set(model_types caffemodel prototxt) +set(model_names detect sr) + +foreach(model_name ${model_names}) + foreach(model_type ${model_types}) + ocv_download(FILENAME ${model_name}.${model_type} + HASH ${hash_${model_name}_${model_type}} + URL + "${OPENCV_WECHAT_QRCODE_URL}" + "$ENV{OPENCV_WECHAT_QRCODE_URL}" + "https://raw.githubusercontent.com/WeChatCV/opencv_3rdparty/${wechat_qrcode_commit_hash}/" + DESTINATION_DIR "${CMAKE_BINARY_DIR}/downloads/wechat_qrcode" + ID "wechat_qrcode" + RELATIVE_URL + STATUS res) + if(NOT res) + message(WARNING "WeChatQRCode: Can't get ${model_name} ${model_type} file for wechat qrcode.") + endif() + endforeach() +endforeach() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/LICENSE b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/LICENSE new file mode 100644 index 00000000..303fddb1 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/LICENSE @@ -0,0 +1,253 @@ +Tencent is pleased to support the open source community by making WeChat QRCode available. + +Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. +The below software in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). +All Tencent Modifications are Copyright (C) THL A29 Limited. + +WeChat QRCode is licensed under the Apache License Version 2.0, except for the third-party components listed below. + +Terms of the Apache License Version 2.0 +-------------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + + + +Other dependencies and licenses: + +Open Source Software Licensed under the Apache License Version 2.0: +-------------------------------------------------------------------- +1. zxing +Copyright (c) zxing authors and contributors +Please note this software may have been modified by Tencent. + +Terms of the Apache License Version 2.0: +-------------------------------------------------------------------- +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/README.md b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/README.md new file mode 100644 index 00000000..38a0935e --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/README.md @@ -0,0 +1,12 @@ +WeChat QR code detector for detecting and parsing QR code. +================================================ + +WeChat QR code detector is a high-performance and lightweight QR code detect and decode library, which is contributed by WeChat Computer Vision Team (WeChatCV). It has been widely used in various Tencent applications, including WeChat, WeCom, QQ, QQ Browser, and so on. There are four primary features of WeChat QR code detector: + +1. CNN-based QR code detector. Different from the traditional detector, we introduce a tiny CNN model for multiple code detection. The detector is based on SSD architecture with a MobileNetV2-like backbone, which is run on caffe inference framework. + +2. CNN-based QR code enhancement. To improve the performance of tiny QR code, we design a lighten super-resolution CNN model for QR code, called QRSR. Depth-wise convolution, DenseNet concat and deconvolution are the core techniques in the QRSR model. + +3. More robust finder pattern detection. Besides traditional horizontal line searching, we propose an area size based finder pattern detection method. we calculate the area size of black and white block to locate the finder pattern by the pre-computed connected cells. + +4. Massive engineering optimization. Based on [zxing-cpp](https://github.com/glassechidna/zxing-cpp), we conduct massive engineering optimization to boost the decoding success rate, such as trying more binarization methods, supporting N:1:3:1:1 finder pattern detection, finding more alignment pattern, clustering similar size finder pattern, and etc. diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/include/opencv2/wechat_qrcode.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/include/opencv2/wechat_qrcode.hpp new file mode 100644 index 00000000..676104cd --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/include/opencv2/wechat_qrcode.hpp @@ -0,0 +1,77 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __OPENCV_WECHAT_QRCODE_HPP__ +#define __OPENCV_WECHAT_QRCODE_HPP__ +#include "opencv2/core.hpp" +/** @defgroup wechat_qrcode WeChat QR code detector for detecting and parsing QR code. + */ +namespace cv { +namespace wechat_qrcode { +//! @addtogroup wechat_qrcode +//! @{ +/** + * @brief WeChat QRCode includes two CNN-based models: + * A object detection model and a super resolution model. + * Object detection model is applied to detect QRCode with the bounding box. + * super resolution model is applied to zoom in QRCode when it is small. + * + */ +class CV_EXPORTS_W WeChatQRCode { +public: + /** + * @brief Initialize the WeChatQRCode. + * It includes two models, which are packaged with caffe format. + * Therefore, there are prototxt and caffe models (In total, four paramenters). + * + * @param detector_prototxt_path prototxt file path for the detector + * @param detector_caffe_model_path caffe model file path for the detector + * @param super_resolution_prototxt_path prototxt file path for the super resolution model + * @param super_resolution_caffe_model_path caffe file path for the super resolution model + */ + CV_WRAP WeChatQRCode(const std::string& detector_prototxt_path = "", + const std::string& detector_caffe_model_path = "", + const std::string& super_resolution_prototxt_path = "", + const std::string& super_resolution_caffe_model_path = ""); + ~WeChatQRCode(){}; + + /** + * @brief Both detects and decodes QR code. + * To simplify the usage, there is a only API: detectAndDecode + * + * @param img supports grayscale or color (BGR) image. + * @param points optional output array of vertices of the found QR code quadrangle. Will be + * empty if not found. + * @return list of decoded string. + */ + CV_WRAP std::vector detectAndDecode(InputArray img, OutputArrayOfArrays points = noArray()); + + /** + * @brief set scale factor + * QR code detector use neural network to detect QR. + * Before running the neural network, the input image is pre-processed by scaling. + * By default, the input image is scaled to an image with an area of 160000 pixels. + * The scale factor allows to use custom scale the input image: + * width = scaleFactor*width + * height = scaleFactor*width + * + * scaleFactor valuse must be > 0 and <= 1, otherwise the scaleFactor value is set to -1 + * and use default scaled to an image with an area of 160000 pixels. + */ + CV_WRAP void setScaleFactor(float _scalingFactor); + + CV_WRAP float getScaleFactor(); + +protected: + class Impl; + Ptr p; +}; + +//! @} +} // namespace wechat_qrcode +} // namespace cv +#endif // __OPENCV_WECHAT_QRCODE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode.py b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode.py new file mode 100644 index 00000000..7713734f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode.py @@ -0,0 +1,53 @@ +import cv2 +import sys + +print(sys.argv[0]) +print('A demo program of WeChat QRCode Detector:') +camIdx = -1 +if len(sys.argv) > 1: + if sys.argv[1] == "-camera": + camIdx = int(sys.argv[2]) if len(sys.argv)>2 else 0 + img = cv2.imread(sys.argv[1]) +else: + print(" Usage: " + sys.argv[0] + " ") + exit(0) + +# For python API generator, it follows the template: {module_name}_{class_name}, +# so it is a little weird. +# The model is downloaded to ${CMAKE_BINARY_DIR}/downloads/wechat_qrcode if cmake runs without warnings, +# otherwise you can download them from https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode. +try: + detector = cv2.wechat_qrcode_WeChatQRCode( + "detect.prototxt", "detect.caffemodel", "sr.prototxt", "sr.caffemodel") +except: + print("---------------------------------------------------------------") + print("Failed to initialize WeChatQRCode.") + print("Please, download 'detector.*' and 'sr.*' from") + print("https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode") + print("and put them into the current directory.") + print("---------------------------------------------------------------") + exit(0) + +prevstr = "" + +if camIdx < 0: + res, points = detector.detectAndDecode(img) + print(res,points) +else: + cap = cv2.VideoCapture(camIdx) + while True: + res, img = cap.read() + if img is None: + break + res, points = detector.detectAndDecode(img) + for t in res: + if t != prevstr: + print(t) + if res: + prevstr = res[-1] + cv2.imshow("image", img) + if cv2.waitKey(30) >= 0: + break + # When everything done, release the capture + cap.release() + cv2.destroyAllWindows() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_example.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_example.cpp new file mode 100644 index 00000000..046525b9 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_example.cpp @@ -0,0 +1,70 @@ +#include +#include +#include +#include + +using namespace std; +using namespace cv; + +#include +int main(int argc, char* argv[]) { + cout << endl << argv[0] << endl << endl; + cout << "A demo program of WeChat QRCode Detector: " << endl; + + Mat img; + int camIdx = -1; + if (argc > 1) { + bool live = strcmp(argv[1], "-camera") == 0; + if (live) { + camIdx = argc > 2 ? atoi(argv[2]) : 0; + } else { + img = imread(argv[1]); + } + } else { + cout << " Usage: " << argv[0] << " " << endl; + return 0; + } + // The model is downloaded to ${CMAKE_BINARY_DIR}/downloads/wechat_qrcode if cmake runs without warnings, + // otherwise you can download them from https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode. + Ptr detector; + + try { + detector = makePtr("detect.prototxt", "detect.caffemodel", + "sr.prototxt", "sr.caffemodel"); + } catch (const std::exception& e) { + cout << + "\n---------------------------------------------------------------\n" + "Failed to initialize WeChatQRCode.\n" + "Please, download 'detector.*' and 'sr.*' from\n" + "https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode\n" + "and put them into the current directory.\n" + "---------------------------------------------------------------\n"; + cout << e.what() << endl; + return 0; + } + string prevstr = ""; + vector points; + + if (camIdx < 0) { + auto res = detector->detectAndDecode(img, points); + for (const auto& t : res) cout << t << endl; + } else { + VideoCapture cap(camIdx); + for(;;) { + cap >> img; + if (img.empty()) + break; + auto res = detector->detectAndDecode(img, points); + for (const auto& t : res) { + if (t != prevstr) + cout << t << endl; + } + if (!res.empty()) + prevstr = res.back(); + imshow("image", img); + if (waitKey(30) >= 0) + break; + } + } + return 0; +} \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_example_without_nn.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_example_without_nn.cpp new file mode 100644 index 00000000..7428d0dd --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_example_without_nn.cpp @@ -0,0 +1,67 @@ +#include +#include +#include +#include + +using namespace std; +using namespace cv; + +#include + +int main(int argc, char* argv[]) { + cout << endl << argv[0] << endl << endl; + cout << "A demo program of WeChat QRCode Detector: " << endl; + + Mat img; + int camIdx = -1; + if (argc > 1) { + bool live = strcmp(argv[1], "-camera") == 0; + if (live) { + camIdx = argc > 2 ? atoi(argv[2]) : 0; + } else { + img = imread(argv[1]); + } + } else { + cout << " Usage: " << argv[0] << " " << endl; + return 0; + } + // The model is downloaded to ${CMAKE_BINARY_DIR}/downloads/wechat_qrcode if cmake runs without warnings, + // otherwise you can download them from https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode. + Ptr detector; + + try { + detector = makePtr("", "", "", ""); + } catch (const std::exception& e) { + cout << + "\n---------------------------------------------------------------\n" + "Failed to initialize WeChatQRCode.\n" + "---------------------------------------------------------------\n"; + cout << e.what() << endl; + return 0; + } + string prevstr = ""; + vector points; + + if (camIdx < 0) { + auto res = detector->detectAndDecode(img, points); + for (const auto& t : res) cout << t << endl; + } else { + VideoCapture cap(camIdx); + for(;;) { + cap >> img; + if (img.empty()) + break; + auto res = detector->detectAndDecode(img, points); + for (const auto& t : res) { + if (t != prevstr) + cout << t << endl; + } + if (!res.empty()) + prevstr = res.back(); + imshow("image", img); + if (waitKey(30) >= 0) + break; + } + } + return 0; +} \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_without_nn.py b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_without_nn.py new file mode 100644 index 00000000..9b8feb52 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/samples/qrcode_without_nn.py @@ -0,0 +1,50 @@ +import cv2 +import sys + +print(sys.argv[0]) +print('A demo program of WeChat QRCode Detector:') +camIdx = -1 +if len(sys.argv) > 1: + if sys.argv[1] == "-camera": + camIdx = int(sys.argv[2]) if len(sys.argv)>2 else 0 + img = cv2.imread(sys.argv[1]) +else: + print(" Usage: " + sys.argv[0] + " ") + exit(0) + +# For python API generator, it follows the template: {module_name}_{class_name}, +# so it is a little weird. +# The model is downloaded to ${CMAKE_BINARY_DIR}/downloads/wechat_qrcode if cmake runs without warnings, +# otherwise you can download them from https://github.com/WeChatCV/opencv_3rdparty/tree/wechat_qrcode. +try: + detector = cv2.wechat_qrcode_WeChatQRCode( + "", "", "", "") +except: + print("---------------------------------------------------------------") + print("Failed to initialize WeChatQRCode.") + print("---------------------------------------------------------------") + exit(0) + +prevstr = "" + +if camIdx < 0: + res, points = detector.detectAndDecode(img) + print(res,points) +else: + cap = cv2.VideoCapture(camIdx) + while True: + res, img = cap.read() + if img is None: + break + res, points = detector.detectAndDecode(img) + for t in res: + if t != prevstr: + print(t) + if res: + prevstr = res[-1] + cv2.imshow("image", img) + if cv2.waitKey(30) >= 0: + break + # When everything done, release the capture + cap.release() + cv2.destroyAllWindows() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/binarizermgr.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/binarizermgr.cpp new file mode 100644 index 00000000..db019cd5 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/binarizermgr.cpp @@ -0,0 +1,70 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "precomp.hpp" +#include "binarizermgr.hpp" +#include "imgsource.hpp" + + +using zxing::Binarizer; +using zxing::LuminanceSource; +namespace cv { +namespace wechat_qrcode { +BinarizerMgr::BinarizerMgr() : m_iNowRotateIndex(0), m_iNextOnceBinarizer(-1) { + m_vecRotateBinarizer.push_back(Hybrid); + m_vecRotateBinarizer.push_back(FastWindow); + m_vecRotateBinarizer.push_back(SimpleAdaptive); + m_vecRotateBinarizer.push_back(AdaptiveThreshold); +} + +BinarizerMgr::~BinarizerMgr() {} + +zxing::Ref BinarizerMgr::Binarize(zxing::Ref source) { + BINARIZER binarizerIdx = m_vecRotateBinarizer[m_iNowRotateIndex]; + if (m_iNextOnceBinarizer >= 0) { + binarizerIdx = (BINARIZER)m_iNextOnceBinarizer; + } + + zxing::Ref binarizer; + switch (binarizerIdx) { + case Hybrid: + binarizer = new zxing::HybridBinarizer(source); + break; + case FastWindow: + binarizer = new zxing::FastWindowBinarizer(source); + break; + case SimpleAdaptive: + binarizer = new zxing::SimpleAdaptiveBinarizer(source); + break; + case AdaptiveThreshold: + binarizer = new zxing::AdaptiveThresholdMeanBinarizer(source); + break; + default: + binarizer = new zxing::HybridBinarizer(source); + break; + } + + return binarizer; +} + +void BinarizerMgr::SwitchBinarizer() { + m_iNowRotateIndex = (m_iNowRotateIndex + 1) % m_vecRotateBinarizer.size(); +} + +int BinarizerMgr::GetCurBinarizer() { + if (m_iNextOnceBinarizer != -1) return m_iNextOnceBinarizer; + return m_vecRotateBinarizer[m_iNowRotateIndex]; +} + +void BinarizerMgr::SetNextOnceBinarizer(int iBinarizerIndex) { + m_iNextOnceBinarizer = iBinarizerIndex; +} + +void BinarizerMgr::SetBinarizer(vector vecRotateBinarizer) { + m_vecRotateBinarizer = vecRotateBinarizer; +} +} // namespace wechat_qrcode +} // namespace cv \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/binarizermgr.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/binarizermgr.hpp new file mode 100644 index 00000000..a8a1e4ba --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/binarizermgr.hpp @@ -0,0 +1,51 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __OPENCV_WECHAT_QRCODE_BINARIZERMGR_HPP__ +#define __OPENCV_WECHAT_QRCODE_BINARIZERMGR_HPP__ + +#include "zxing/binarizer.hpp" +#include "zxing/common/binarizer/adaptive_threshold_mean_binarizer.hpp" +#include "zxing/common/counted.hpp" +#include "zxing/common/binarizer/fast_window_binarizer.hpp" +#include "zxing/common/binarizer/hybrid_binarizer.hpp" +#include "zxing/common/binarizer/simple_adaptive_binarizer.hpp" +#include "zxing/zxing.hpp" + +namespace cv { +namespace wechat_qrcode { +class BinarizerMgr { +public: + enum BINARIZER { + Hybrid = 0, + FastWindow = 1, + SimpleAdaptive = 2, + AdaptiveThreshold = 3 + }; + +public: + BinarizerMgr(); + ~BinarizerMgr(); + + zxing::Ref Binarize(zxing::Ref source); + + void SwitchBinarizer(); + + int GetCurBinarizer(); + + void SetNextOnceBinarizer(int iBinarizerIndex); + + void SetBinarizer(vector vecRotateBinarizer); + +private: + int m_iNowRotateIndex; + int m_iNextOnceBinarizer; + vector m_vecRotateBinarizer; +}; +} // namespace wechat_qrcode +} // namespace cv +#endif // __OPENCV_WECHAT_QRCODE_BINARIZERMGR_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/decodermgr.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/decodermgr.cpp new file mode 100644 index 00000000..1e93aa15 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/decodermgr.cpp @@ -0,0 +1,92 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "precomp.hpp" +#include "decodermgr.hpp" + + +using zxing::ArrayRef; +using zxing::BinaryBitmap; +using zxing::DecodeHints; +using zxing::ErrorHandler; +using zxing::LuminanceSource; +using zxing::Ref; +using zxing::Result; +using zxing::UnicomBlock; +namespace cv { +namespace wechat_qrcode { +int DecoderMgr::decodeImage(cv::Mat src, bool use_nn_detector, vector& results, vector>& zxing_points) { + int width = src.cols; + int height = src.rows; + if (width <= 20 || height <= 20) + return -1; // image data is not enough for providing reliable results + + std::vector scaled_img_data(src.data, src.data + width * height); + zxing::ArrayRef scaled_img_zx = + zxing::ArrayRef(new zxing::Array(scaled_img_data)); + + vector> zx_results; + + decode_hints_.setUseNNDetector(use_nn_detector); + + Ref source; + qbarUicomBlock_ = new UnicomBlock(width, height); + + // Four Binarizers + int tryBinarizeTime = 4; + for (int tb = 0; tb < tryBinarizeTime; tb++) { + if (source == NULL || height * width > source->getMaxSize()) { + source = ImgSource::create(scaled_img_zx.data(), width, height); + } else { + source->reset(scaled_img_zx.data(), width, height); + } + int ret = TryDecode(source, zx_results); + if (!ret) { + for(size_t k = 0; k < zx_results.size(); k++) { + results.emplace_back(zx_results[k]->getText()->getText()); + vector tmp_qr_points; + auto tmp_zx_points = zx_results[k]->getResultPoints(); + for (int i = 0; i < tmp_zx_points->size() / 4; i++) { + const int ind = i * 4; + for (int j = 1; j < 4; j++){ + tmp_qr_points.emplace_back(tmp_zx_points[ind + j]->getX(), tmp_zx_points[ind + j]->getY()); + } + tmp_qr_points.emplace_back(tmp_zx_points[ind]->getX(), tmp_zx_points[ind]->getY()); + } + zxing_points.push_back(tmp_qr_points); + } + return ret; + } + // try different binarizers + binarizer_mgr_.SwitchBinarizer(); + } + return -1; +} + +int DecoderMgr::TryDecode(Ref source, vector>& results) { + int res = -1; + string cell_result; + + // get binarizer + zxing::Ref binarizer = binarizer_mgr_.Binarize(source); + zxing::Ref binary_bitmap(new BinaryBitmap(binarizer)); + binary_bitmap->m_poUnicomBlock = qbarUicomBlock_; + + results = Decode(binary_bitmap, decode_hints_); + res = (results.size() == 0) ? 1 : 0; + + if (res == 0) { + results[0]->setBinaryMethod(int(binarizer_mgr_.GetCurBinarizer())); + } + + return res; +} + +vector> DecoderMgr::Decode(Ref image, DecodeHints hints) { + return reader_->decode(image, hints); +} +} // namespace wechat_qrcode +} // namespace cv \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/decodermgr.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/decodermgr.hpp new file mode 100644 index 00000000..1e04203b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/decodermgr.hpp @@ -0,0 +1,46 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __OPENCV_WECHAT_QRCODE_DECODERMGR_HPP__ +#define __OPENCV_WECHAT_QRCODE_DECODERMGR_HPP__ + +// zxing +#include "zxing/binarizer.hpp" +#include "zxing/binarybitmap.hpp" +#include "zxing/decodehints.hpp" +#include "zxing/qrcode/qrcode_reader.hpp" +#include "zxing/result.hpp" + +// qbar +#include "binarizermgr.hpp" +#include "imgsource.hpp" +namespace cv { +namespace wechat_qrcode { + +class DecoderMgr { +public: + DecoderMgr() { reader_ = new zxing::qrcode::QRCodeReader(); }; + ~DecoderMgr(){}; + + int decodeImage(cv::Mat src, bool use_nn_detector, vector& result, vector>& zxing_points); + +private: + zxing::Ref qbarUicomBlock_; + zxing::DecodeHints decode_hints_; + + zxing::Ref reader_; + BinarizerMgr binarizer_mgr_; + + vector> Decode(zxing::Ref image, + zxing::DecodeHints hints); + + int TryDecode(zxing::Ref source, vector>& result); +}; + +} // namespace wechat_qrcode +} // namespace cv +#endif // __OPENCV_WECHAT_QRCODE_DECODERMGR_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/align.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/align.cpp new file mode 100644 index 00000000..05ab5335 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/align.cpp @@ -0,0 +1,66 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../precomp.hpp" +#include "align.hpp" + +using std::max; +using std::min; +namespace cv { +namespace wechat_qrcode { +Align::Align() { rotate90_ = false; } + +Mat Align::calcWarpMatrix(const Mat src, const Mat dst) { + M_ = getPerspectiveTransform(src, dst); + M_inv_ = M_.inv(); + return M_; +} + +vector Align::warpBack(const vector &dst_pts) { + vector src_pts; + for (size_t j = 0; j < dst_pts.size(); j++) { + auto src_x = (rotate90_ ? dst_pts[j].y : dst_pts[j].x) + crop_x_; + auto src_y = (rotate90_ ? dst_pts[j].x : dst_pts[j].y) + crop_y_; + src_pts.push_back(Point2f(src_x, src_y)); + } + return src_pts; +} + +Mat Align::crop(const Mat &inputImg, const int width, const int height) { + Mat warp_dst = Mat::zeros(height, width, inputImg.type()); + + warpPerspective(inputImg, warp_dst, M_, warp_dst.size(), INTER_LINEAR, BORDER_CONSTANT, 255); + + return warp_dst; +} + +Mat Align::crop(const Mat &inputImg, const Mat &srcPts, const float paddingW, const float paddingH, + const int minPadding) { + int x0 = srcPts.at(0, 0); + int y0 = srcPts.at(0, 1); + int x2 = srcPts.at(2, 0); + int y2 = srcPts.at(2, 1); + + int width = x2 - x0 + 1; + int height = y2 - y0 + 1; + + int padx = max(paddingW * width, static_cast(minPadding)); + int pady = max(paddingH * height, static_cast(minPadding)); + + crop_x_ = max(x0 - padx, 0); + crop_y_ = max(y0 - pady, 0); + int end_x = min(x2 + padx, inputImg.cols - 1); + int end_y = min(y2 + pady, inputImg.rows - 1); + + Rect crop_roi(crop_x_, crop_y_, end_x - crop_x_ + 1, end_y - crop_y_ + 1); + + Mat dst = inputImg(crop_roi).clone(); + if (rotate90_) dst = dst.t(); // transpose + return dst; +} + +} // namespace wechat_qrcode +} // namespace cv diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/align.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/align.hpp new file mode 100644 index 00000000..2ad88a5b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/align.hpp @@ -0,0 +1,41 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __DETECTOR_ALIGN_HPP_ +#define __DETECTOR_ALIGN_HPP_ + +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" + +namespace cv { +namespace wechat_qrcode { + +class Align { +public: + Align(); + Mat calcWarpMatrix(const Mat src, const Mat dst); + std::vector warpBack(const std::vector &dst_pts); + Mat crop(const Mat &inputImg, const Mat &srcPts, const float paddingW, const float paddingH, + const int minPadding); + + void setRotate90(bool v) { rotate90_ = v; } + +private: + Mat crop(const Mat &inputImg, const int width, const int height); + Mat M_; + Mat M_inv_; + + int crop_x_; + int crop_y_; + bool rotate90_; +}; + +} // namespace wechat_qrcode +} // namespace cv +#endif // __DETECTOR_ALIGN_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/ssd_detector.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/ssd_detector.cpp new file mode 100644 index 00000000..dca1851a --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/ssd_detector.cpp @@ -0,0 +1,57 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../precomp.hpp" +#include "ssd_detector.hpp" +#define CLIP(x, x1, x2) max(x1, min(x, x2)) +namespace cv { +namespace wechat_qrcode { +int SSDDetector::init(const string& proto_path, const string& model_path) { + net_ = dnn::readNetFromCaffe(proto_path, model_path); + return 0; +} + +vector SSDDetector::forward(Mat img, const int target_width, const int target_height) { + int img_w = img.cols; + int img_h = img.rows; + Mat input; + resize(img, input, Size(target_width, target_height), 0, 0, INTER_CUBIC); + + dnn::blobFromImage(input, input, 1.0 / 255, Size(input.cols, input.rows), {0.0f, 0.0f, 0.0f}, + false, false); + net_.setInput(input, "data"); + + auto prob = net_.forward("detection_output"); + vector point_list; + // the shape is (1,1,100,7)=>(batch,channel,count,dim) + for (int row = 0; row < prob.size[2]; row++) { + const float* prob_score = prob.ptr(0, 0, row); + // prob_score[0] is not used. + // prob_score[1]==1 stands for qrcode + if (prob_score[1] == 1 && prob_score[2] > 1E-5) { + // add a safe score threshold due to https://github.com/opencv/opencv_contrib/issues/2877 + // prob_score[2] is the probability of the qrcode, which is not used. + auto point = Mat(4, 2, CV_32FC1); + float x0 = CLIP(prob_score[3] * img_w, 0.0f, img_w - 1.0f); + float y0 = CLIP(prob_score[4] * img_h, 0.0f, img_h - 1.0f); + float x1 = CLIP(prob_score[5] * img_w, 0.0f, img_w - 1.0f); + float y1 = CLIP(prob_score[6] * img_h, 0.0f, img_h - 1.0f); + + point.at(0, 0) = x0; + point.at(0, 1) = y0; + point.at(1, 0) = x1; + point.at(1, 1) = y0; + point.at(2, 0) = x1; + point.at(2, 1) = y1; + point.at(3, 0) = x0; + point.at(3, 1) = y1; + point_list.push_back(point); + } + } + return point_list; +} +} // namespace wechat_qrcode +} // namespace cv \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/ssd_detector.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/ssd_detector.hpp new file mode 100644 index 00000000..e510cb32 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/detector/ssd_detector.hpp @@ -0,0 +1,31 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __DETECTOR_SSD_DETECTOR_HPP_ +#define __DETECTOR_SSD_DETECTOR_HPP_ + +#include + +#include "opencv2/dnn.hpp" +#include "opencv2/imgproc.hpp" +namespace cv { +namespace wechat_qrcode { + +class SSDDetector { +public: + SSDDetector(){}; + ~SSDDetector(){}; + int init(const std::string& proto_path, const std::string& model_path); + std::vector forward(Mat img, const int target_width, const int target_height); + +private: + dnn::Net net_; +}; + +} // namespace wechat_qrcode +} // namespace cv +#endif // __DETECTOR_SSD_DETECTOR_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/imgsource.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/imgsource.cpp new file mode 100644 index 00000000..bd49de4e --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/imgsource.cpp @@ -0,0 +1,188 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "precomp.hpp" +#include "imgsource.hpp" + +using zxing::ArrayRef; +using zxing::ByteMatrix; +using zxing::ErrorHandler; +using zxing::LuminanceSource; +using zxing::Ref; +namespace cv { +namespace wechat_qrcode { + +// Initialize the ImgSource +ImgSource::ImgSource(unsigned char* pixels, int width, int height) + : Super(width, height) { + luminances = new unsigned char[width * height]; + memset(luminances, 0, width * height); + + rgbs = pixels; + + dataWidth = width; + dataHeight = height; + left = 0; + top = 0; + + // Make gray luminances first + makeGray(); +} + +// Added for crop function +ImgSource::ImgSource(unsigned char* pixels, int width, int height, int left_, int top_, + int cropWidth, int cropHeight, + ErrorHandler& err_handler) + : Super(cropWidth, cropHeight) { + rgbs = pixels; + + dataWidth = width; + dataHeight = height; + left = left_; + top = top_; + + // super(width, height); + if ((left_ + cropWidth) > dataWidth || (top_ + cropHeight) > dataHeight || top_ < 0 || + left_ < 0) { + err_handler = + zxing::IllegalArgumentErrorHandler("Crop rectangle does not fit within image data."); + return; + } + + luminances = new unsigned char[width * height]; + + // Make gray luminances first + makeGray(); +} + +ImgSource::~ImgSource() { + if (luminances != NULL) { + delete[] luminances; + } +} + +Ref ImgSource::create(unsigned char* pixels, int width, int height) { + return Ref(new ImgSource(pixels, width, height)); +} + +Ref ImgSource::create(unsigned char* pixels, int width, int height, int left, int top, + int cropWidth, int cropHeight, + zxing::ErrorHandler& err_handler) { + return Ref(new ImgSource(pixels, width, height, left, top, cropWidth, cropHeight, err_handler)); +} + +void ImgSource::reset(unsigned char* pixels, int width, int height) { + rgbs = pixels; + left = 0; + top = 0; + + setWidth(width); + setHeight(height); + dataWidth = width; + dataHeight = height; + makeGrayReset(); +} + +ArrayRef ImgSource::getRow(int y, zxing::ArrayRef row, + zxing::ErrorHandler& err_handler) const { + if (y < 0 || y >= getHeight()) { + err_handler = zxing::IllegalArgumentErrorHandler("Requested row is outside the image"); + return ArrayRef(); + } + + int width = getWidth(); + if (row->data() == NULL || row->empty() || row->size() < width) { + row = zxing::ArrayRef(width); + } + int offset = (y + top) * dataWidth + left; + + char* rowPtr = &row[0]; + arrayCopy(luminances, offset, rowPtr, 0, width); + + return row; +} + +/** This is a more efficient implementation. */ +ArrayRef ImgSource::getMatrix() const { + int width = getWidth(); + int height = getHeight(); + + int area = width * height; + + // If the caller asks for the entire underlying image, save the copy and + // give them the original data. The docs specifically warn that + // result.length must be ignored. + if (width == dataWidth && height == dataHeight) { + return _matrix; + } + + zxing::ArrayRef newMatrix = zxing::ArrayRef(area); + + int inputOffset = top * dataWidth + left; + + // If the width matches the full width of the underlying data, perform a + // single copy. + if (width == dataWidth) { + arrayCopy(luminances, inputOffset, &newMatrix[0], 0, area); + return newMatrix; + } + + // Otherwise copy one cropped row at a time. + for (int y = 0; y < height; y++) { + int outputOffset = y * width; + arrayCopy(luminances, inputOffset, &newMatrix[0], outputOffset, width); + inputOffset += dataWidth; + } + return newMatrix; +} + + +void ImgSource::makeGray() { + int area = dataWidth * dataHeight; + _matrix = zxing::ArrayRef(area); + arrayCopy(rgbs, 0, &_matrix[0], 0, area); +} + +void ImgSource::makeGrayReset() { + int area = dataWidth * dataHeight; + arrayCopy(rgbs, 0, &_matrix[0], 0, area); +} + +void ImgSource::arrayCopy(unsigned char* src, int inputOffset, char* dst, int outputOffset, + int length) const { + const unsigned char* srcPtr = src + inputOffset; + char* dstPtr = dst + outputOffset; + + memcpy(dstPtr, srcPtr, length * sizeof(unsigned char)); +} + +bool ImgSource::isCropSupported() const { return true; } + +Ref ImgSource::crop(int left_, int top_, int width, int height, + ErrorHandler& err_handler) const { + return ImgSource::create(rgbs, dataWidth, dataHeight, left + left_, top + top_, width, height, err_handler); +} + +bool ImgSource::isRotateSupported() const { return false; } + +Ref ImgSource::rotateCounterClockwise(ErrorHandler& err_handler) const { + // Intentionally flip the left, top, width, and height arguments as + // needed. dataWidth and dataHeight are always kept unrotated. + int width = getWidth(); + int height = getHeight(); + + return ImgSource::create(rgbs, dataWidth, dataHeight, top, left, height, width, err_handler); +} + + +Ref ImgSource::getByteMatrix() const { + return Ref(new ByteMatrix(getWidth(), getHeight(), getMatrix())); +} +} // namespace wechat_qrcode +} // namespace cv \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/imgsource.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/imgsource.hpp new file mode 100644 index 00000000..ec62dc6a --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/imgsource.hpp @@ -0,0 +1,63 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __OPENCV_WECHAT_QRCODE_IMGSOURCE_HPP__ +#define __OPENCV_WECHAT_QRCODE_IMGSOURCE_HPP__ +#include "zxing/common/bytematrix.hpp" +#include "zxing/errorhandler.hpp" +#include "zxing/luminance_source.hpp" +namespace cv { +namespace wechat_qrcode { +class ImgSource : public zxing::LuminanceSource { +private: + typedef LuminanceSource Super; + zxing::ArrayRef _matrix; + unsigned char* rgbs; + unsigned char* luminances; + int dataWidth; + int dataHeight; + int left; + int top; + void makeGray(); + void makeGrayReset(); + + void arrayCopy(unsigned char* src, int inputOffset, char* dst, int outputOffset, + int length) const; + + + ~ImgSource(); + +public: + ImgSource(unsigned char* pixels, int width, int height); + ImgSource(unsigned char* pixels, int width, int height, int left, int top, int cropWidth, + int cropHeight, zxing::ErrorHandler& err_handler); + + static zxing::Ref create(unsigned char* pixels, int width, int height); + static zxing::Ref create(unsigned char* pixels, int width, int height, int left, + int top, int cropWidth, int cropHeight, zxing::ErrorHandler& err_handler); + void reset(unsigned char* pixels, int width, int height); + zxing::ArrayRef getRow(int y, zxing::ArrayRef row, + zxing::ErrorHandler& err_handler) const override; + zxing::ArrayRef getMatrix() const override; + zxing::Ref getByteMatrix() const override; + + bool isCropSupported() const override; + zxing::Ref crop(int left, int top, int width, int height, + zxing::ErrorHandler& err_handler) const override; + + bool isRotateSupported() const override; + zxing::Ref rotateCounterClockwise( + zxing::ErrorHandler& err_handler) const override; + + int getMaxSize() { return dataHeight * dataWidth; } +}; +} // namespace wechat_qrcode +} // namespace cv +#endif // __OPENCV_WECHAT_QRCODE_IMGSOURCE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/precomp.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/precomp.hpp new file mode 100644 index 00000000..1fc21c0f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/precomp.hpp @@ -0,0 +1,29 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __OPENCV_WECHAT_QRCODE_PRECOMP_HPP__ +#define __OPENCV_WECHAT_QRCODE_PRECOMP_HPP__ +#ifdef _MSC_VER +#pragma warning(disable: 4244) +#pragma warning(disable: 4267) +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "imgsource.hpp" +using std::ostringstream; +using std::string; +using std::vector; +#endif // __OPENCV_WECHAT_QRCODE_PRECOMP_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/scale/super_scale.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/scale/super_scale.cpp new file mode 100644 index 00000000..8b3b1138 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/scale/super_scale.cpp @@ -0,0 +1,63 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../precomp.hpp" +#include "super_scale.hpp" + + +#define CLIP(x, x1, x2) max(x1, min(x, x2)) +namespace cv { +namespace wechat_qrcode { +int SuperScale::init(const std::string &proto_path, const std::string &model_path) { + srnet_ = dnn::readNetFromCaffe(proto_path, model_path); + net_loaded_ = true; + return 0; +} + +Mat SuperScale::processImageScale(const Mat &src, float scale, const bool &use_sr, + int sr_max_size) { + Mat dst = src; + if (scale == 1.0) { // src + return dst; + } + + int width = src.cols; + int height = src.rows; + if (scale == 2.0) { // upsample + int SR_TH = sr_max_size; + if (use_sr && (int)sqrt(width * height * 1.0) < SR_TH && net_loaded_) { + int ret = superResoutionScale(src, dst); + if (ret == 0) return dst; + } + + { resize(src, dst, Size(), scale, scale, INTER_CUBIC); } + } else if (scale < 1.0) { // downsample + resize(src, dst, Size(), scale, scale, INTER_AREA); + } + + return dst; +} + +int SuperScale::superResoutionScale(const Mat &src, Mat &dst) { + Mat blob; + dnn::blobFromImage(src, blob, 1.0 / 255, Size(src.cols, src.rows), {0.0f}, false, false); + + srnet_.setInput(blob); + auto prob = srnet_.forward(); + + dst = Mat(prob.size[2], prob.size[3], CV_8UC1); + + for (int row = 0; row < prob.size[2]; row++) { + const float *prob_score = prob.ptr(0, 0, row); + for (int col = 0; col < prob.size[3]; col++) { + float pixel = prob_score[col] * 255.0; + dst.at(row, col) = static_cast(CLIP(pixel, 0.0f, 255.0f)); + } + } + return 0; +} +} // namespace wechat_qrcode +} // namespace cv \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/scale/super_scale.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/scale/super_scale.hpp new file mode 100644 index 00000000..2717932c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/scale/super_scale.hpp @@ -0,0 +1,32 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __SCALE_SUPER_SCALE_HPP_ +#define __SCALE_SUPER_SCALE_HPP_ + +#include +#include "opencv2/dnn.hpp" +#include "opencv2/imgproc.hpp" +namespace cv { +namespace wechat_qrcode { + +class SuperScale { +public: + SuperScale(){}; + ~SuperScale(){}; + int init(const std::string &proto_path, const std::string &model_path); + Mat processImageScale(const Mat &src, float scale, const bool &use_sr, int sr_max_size = 160); + +private: + dnn::Net srnet_; + bool net_loaded_ = false; + int superResoutionScale(const cv::Mat &src, cv::Mat &dst); +}; + +} // namespace wechat_qrcode +} // namespace cv +#endif // __SCALE_SUPER_SCALE_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/wechat_qrcode.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/wechat_qrcode.cpp new file mode 100644 index 00000000..f4bec7c2 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/wechat_qrcode.cpp @@ -0,0 +1,247 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "precomp.hpp" +#include "opencv2/wechat_qrcode.hpp" +#include "decodermgr.hpp" +#include "detector/align.hpp" +#include "detector/ssd_detector.hpp" +#include "opencv2/core.hpp" +#include "opencv2/core/utils/filesystem.hpp" +#include "scale/super_scale.hpp" +#include "zxing/result.hpp" +namespace cv { +namespace wechat_qrcode { +class WeChatQRCode::Impl { +public: + Impl() {} + ~Impl() {} + /** + * @brief detect QR codes from the given image + * + * @param img supports grayscale or color (BGR) image. + * @return vector detected QR code bounding boxes. + */ + std::vector detect(const Mat& img); + /** + * @brief decode QR codes from detected points + * + * @param img supports grayscale or color (BGR) image. + * @param candidate_points detected points. we name it "candidate points" which means no + * all the qrcode can be decoded. + * @param points succussfully decoded qrcode with bounding box points. + * @return vector + */ + std::vector decode(const Mat& img, std::vector& candidate_points, + std::vector& points); + int applyDetector(const Mat& img, std::vector& points); + Mat cropObj(const Mat& img, const Mat& point, Align& aligner); + std::vector getScaleList(const int width, const int height); + std::shared_ptr detector_; + std::shared_ptr super_resolution_model_; + bool use_nn_detector_, use_nn_sr_; + float scaleFactor = -1.f; +}; + +WeChatQRCode::WeChatQRCode(const String& detector_prototxt_path, + const String& detector_caffe_model_path, + const String& super_resolution_prototxt_path, + const String& super_resolution_caffe_model_path) { + p = makePtr(); + if (!detector_caffe_model_path.empty() && !detector_prototxt_path.empty()) { + // initialize detector model (caffe) + p->use_nn_detector_ = true; + CV_Assert(utils::fs::exists(detector_prototxt_path)); + CV_Assert(utils::fs::exists(detector_caffe_model_path)); + p->detector_ = make_shared(); + auto ret = p->detector_->init(detector_prototxt_path, detector_caffe_model_path); + CV_Assert(ret == 0); + } else { + p->use_nn_detector_ = false; + p->detector_ = NULL; + } + // initialize super_resolution_model + // it could also support non model weights by cubic resizing + // so, we initialize it first. + p->super_resolution_model_ = make_shared(); + if (!super_resolution_prototxt_path.empty() && !super_resolution_caffe_model_path.empty()) { + p->use_nn_sr_ = true; + // initialize dnn model (caffe format) + CV_Assert(utils::fs::exists(super_resolution_prototxt_path)); + CV_Assert(utils::fs::exists(super_resolution_caffe_model_path)); + auto ret = p->super_resolution_model_->init(super_resolution_prototxt_path, + super_resolution_caffe_model_path); + CV_Assert(ret == 0); + } else { + p->use_nn_sr_ = false; + } +} + +vector WeChatQRCode::detectAndDecode(InputArray img, OutputArrayOfArrays points) { + CV_Assert(!img.empty()); + CV_CheckDepthEQ(img.depth(), CV_8U, ""); + + if (img.cols() <= 20 || img.rows() <= 20) { + return vector(); // image data is not enough for providing reliable results + } + Mat input_img; + int incn = img.channels(); + CV_Check(incn, incn == 1 || incn == 3 || incn == 4, ""); + if (incn == 3 || incn == 4) { + cvtColor(img, input_img, COLOR_BGR2GRAY); + } else { + input_img = img.getMat(); + } + auto candidate_points = p->detect(input_img); + auto res_points = vector(); + auto ret = p->decode(input_img, candidate_points, res_points); + // opencv type convert + vector tmp_points; + if (points.needed()) { + for (size_t i = 0; i < res_points.size(); i++) { + Mat tmp_point; + tmp_points.push_back(tmp_point); + res_points[i].convertTo(((OutputArray)tmp_points[i]), CV_32FC2); + } + points.createSameSize(tmp_points, CV_32FC2); + points.assign(tmp_points); + } + return ret; +} + +void WeChatQRCode::setScaleFactor(float _scaleFactor) { + if (_scaleFactor > 0 && _scaleFactor <= 1.f) + p->scaleFactor = _scaleFactor; + else + p->scaleFactor = -1.f; +}; + +float WeChatQRCode::getScaleFactor() { + return p->scaleFactor; +}; + +vector WeChatQRCode::Impl::decode(const Mat& img, vector& candidate_points, + vector& points) { + if (candidate_points.size() == 0) { + return vector(); + } + vector decode_results; + for (auto& point : candidate_points) { + Mat cropped_img; + Align aligner; + if (use_nn_detector_) { + cropped_img = cropObj(img, point, aligner); + } else { + cropped_img = img; + } + // scale_list contains different scale ratios + auto scale_list = getScaleList(cropped_img.cols, cropped_img.rows); + for (auto cur_scale : scale_list) { + Mat scaled_img = + super_resolution_model_->processImageScale(cropped_img, cur_scale, use_nn_sr_); + string result; + DecoderMgr decodemgr; + vector> zxing_points, check_points; + auto ret = decodemgr.decodeImage(scaled_img, use_nn_detector_, decode_results, zxing_points); + if (ret == 0) { + for(size_t i = 0; i points_qr = zxing_points[i]; + for (auto&& pt: points_qr) { + pt /= cur_scale; + } + + if (use_nn_detector_) + points_qr = aligner.warpBack(points_qr); + for (int j = 0; j < 4; ++j) { + point.at(j, 0) = points_qr[j].x; + point.at(j, 1) = points_qr[j].y; + } + // try to find duplicate qr corners + bool isDuplicate = false; + for (const auto &tmp_points: check_points) { + const float eps = 10.f; + for (size_t j = 0; j < tmp_points.size(); j++) { + if (abs(tmp_points[j].x - points_qr[j].x) < eps && + abs(tmp_points[j].y - points_qr[j].y) < eps) { + isDuplicate = true; + } + else { + isDuplicate = false; + break; + } + } + } + if (isDuplicate == false) { + points.push_back(point); + check_points.push_back(points_qr); + } + else { + decode_results.erase(decode_results.begin() + i, decode_results.begin() + i + 1); + } + } + break; + } + } + } + + return decode_results; +} + +vector WeChatQRCode::Impl::detect(const Mat& img) { + auto points = vector(); + + if (use_nn_detector_) { + // use cnn detector + auto ret = applyDetector(img, points); + CV_Assert(ret == 0); + } else { + auto width = img.cols, height = img.rows; + // if there is no detector, use the full image as input + auto point = Mat(4, 2, CV_32FC1); + point.at(0, 0) = 0; + point.at(0, 1) = 0; + point.at(1, 0) = width - 1; + point.at(1, 1) = 0; + point.at(2, 0) = width - 1; + point.at(2, 1) = height - 1; + point.at(3, 0) = 0; + point.at(3, 1) = height - 1; + points.push_back(point); + } + return points; +} + +int WeChatQRCode::Impl::applyDetector(const Mat& img, vector& points) { + int img_w = img.cols; + int img_h = img.rows; + + const float targetArea = 400.f * 400.f; + // hard code input size + const float tmpScaleFactor = scaleFactor == -1.f ? min(1.f, sqrt(targetArea / (img_w * img_h))) : scaleFactor; + int detect_width = img_w * tmpScaleFactor; + int detect_height = img_h * tmpScaleFactor; + + points = detector_->forward(img, detect_width, detect_height); + + return 0; +} + +Mat WeChatQRCode::Impl::cropObj(const Mat& img, const Mat& point, Align& aligner) { + // make some padding to boost the qrcode details recall. + float padding_w = 0.1f, padding_h = 0.1f; + auto min_padding = 15; + auto cropped = aligner.crop(img, point, padding_w, padding_h, min_padding); + return cropped; +} + +// empirical rules +vector WeChatQRCode::Impl::getScaleList(const int width, const int height) { + if (width < 320 || height < 320) return {1.0, 2.0, 0.5}; + if (width < 640 && height < 640) return {1.0, 0.5}; + return {0.5, 1.0}; +} +} // namespace wechat_qrcode +} // namespace cv \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarizer.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarizer.cpp new file mode 100644 index 00000000..68508287 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarizer.cpp @@ -0,0 +1,88 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../precomp.hpp" +#include "binarizer.hpp" + +namespace zxing { + +Binarizer::Binarizer(Ref source) : source_(source) { + dataWidth = source->getWidth(); + dataHeight = source->getHeight(); + + width = dataWidth; + height = dataHeight; + + matrix_ = NULL; + matrix0_ = NULL; + matrixInverted_ = NULL; + + histogramBinarized = false; + usingHistogram = false; +} + +Binarizer::~Binarizer() {} + +Ref Binarizer::getLuminanceSource() const { return source_; } + +int Binarizer::getWidth() const { + return width; +} + +int Binarizer::getHeight() const { + return height; +} + +int Binarizer::rotateCounterClockwise() { return 0; } + +int Binarizer::rotateCounterClockwise45() { return 0; } + +Ref Binarizer::getInvertedMatrix(ErrorHandler& err_handler) { + if (!matrix_) { + return Ref(); + } + + if (matrixInverted_ == NULL) { + matrixInverted_ = new BitMatrix(matrix_->getWidth(), matrix_->getHeight(), err_handler); + matrixInverted_->copyOf(matrix_, err_handler); + matrixInverted_->flipAll(); + } + + return matrixInverted_; +} + +// Return different black matrix according to cacheMode +Ref Binarizer::getBlackMatrix(ErrorHandler& err_handler) { + if (err_handler.ErrCode()) return Ref(); + matrix_ = matrix0_; + return matrix_; +} + +Ref Binarizer::getBlackRow(int y, Ref row, ErrorHandler& err_handler) { + if (!matrix_) { + matrix_ = getBlackMatrix(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + matrix_->getRow(y, row); + return row; +} + +ArrayRef Binarizer::getBlockArray(int size) { + ArrayRef blocks(new Array(size)); + + for (int i = 0; i < blocks->size(); i++) { + blocks[i].sum = 0; + blocks[i].min = 0xFF; + blocks[i].max = 0; + } + + return blocks; +} +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarizer.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarizer.hpp new file mode 100644 index 00000000..7e444c52 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarizer.hpp @@ -0,0 +1,88 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_BINARIZER_HPP__ +#define __ZXING_BINARIZER_HPP__ + +#include "common/bitarray.hpp" +#include "common/bitmatrix.hpp" +#include "common/counted.hpp" +#include "errorhandler.hpp" +#include "luminance_source.hpp" + +#define ONED_ENABLE_LINE_BINARIZER + +namespace zxing { + +// typedef unsigned char uint8_t; + +struct BINARIZER_BLOCK { + int sum; + int min; + int max; + int threshold; + // int average; +}; + +#ifdef ONED_ENABLE_LINE_BINARIZER +struct DecodeTipInfo { + int class_id; +}; +#endif + +class Binarizer : public Counted { +private: + Ref source_; + bool histogramBinarized; + bool usingHistogram; + +public: + explicit Binarizer(Ref source); + virtual ~Binarizer(); + + // Added for store binarized result + + int dataWidth; + int dataHeight; + int width; + int height; + + // Store dynamicalli choice of which matrix is currently used + Ref matrix_; + + // Restore 0 degree result + Ref matrix0_; + + Ref matrixInverted_; + + bool isRotateSupported() const { return false; } + + // rotate counter clockwise 45 & 90 degree from binarized cache + int rotateCounterClockwise(); + int rotateCounterClockwise45(); + + virtual Ref getBlackMatrix(ErrorHandler& err_handler); + virtual Ref getInvertedMatrix(ErrorHandler& err_handler); + virtual Ref getBlackRow(int y, Ref row, ErrorHandler& err_handler); + + Ref getLuminanceSource() const; + // virtual Ref createBinarizer(Ref source) = 0; + virtual Ref createBinarizer(Ref source) { + return Ref(new Binarizer(source)); + }; + + int getWidth() const; + int getHeight() const; + + ArrayRef getBlockArray(int size); +}; + +} // namespace zxing +#endif // __ZXING_BINARIZER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarybitmap.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarybitmap.cpp new file mode 100644 index 00000000..28352ca8 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarybitmap.cpp @@ -0,0 +1,66 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../precomp.hpp" +#include "binarybitmap.hpp" + +using zxing::BinaryBitmap; +using zxing::BitArray; +using zxing::BitMatrix; +using zxing::ErrorHandler; +using zxing::LuminanceSource; +using zxing::Ref; + +// VC++ +using zxing::Binarizer; + +BinaryBitmap::BinaryBitmap(Ref binarizer) : binarizer_(binarizer) {} + +BinaryBitmap::~BinaryBitmap() {} + +Ref BinaryBitmap::getBlackRow(int y, Ref row, ErrorHandler& err_handler) { + Ref bitary = binarizer_->getBlackRow(y, row, err_handler); + if (err_handler.ErrCode()) return Ref(); + return bitary; +} + +Ref BinaryBitmap::getBlackMatrix(ErrorHandler& err_handler) { + Ref bitmtx = binarizer_->getBlackMatrix(err_handler); + if (err_handler.ErrCode()) return Ref(); + return bitmtx; +} + +Ref BinaryBitmap::getInvertedMatrix(ErrorHandler& err_handler) { + Ref bitmtx = binarizer_->getInvertedMatrix(err_handler); + if (err_handler.ErrCode()) return Ref(); + return bitmtx; +} + +int BinaryBitmap::getWidth() const { return binarizer_->getWidth(); } + +int BinaryBitmap::getHeight() const { return binarizer_->getHeight(); } + +Ref BinaryBitmap::getLuminanceSource() const { + return binarizer_->getLuminanceSource(); +} + +bool BinaryBitmap::isCropSupported() const { return getLuminanceSource()->isCropSupported(); } + +Ref BinaryBitmap::crop(int left, int top, int width, int height, + ErrorHandler& err_handler) { + return Ref(new BinaryBitmap(binarizer_->createBinarizer( + getLuminanceSource()->crop(left, top, width, height, err_handler)))); +} + +bool BinaryBitmap::isRotateSupported() const { return binarizer_->isRotateSupported(); } + +Ref BinaryBitmap::rotateCounterClockwise() { + binarizer_->rotateCounterClockwise(); + return Ref(new BinaryBitmap(binarizer_)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarybitmap.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarybitmap.hpp new file mode 100644 index 00000000..d483fcd3 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/binarybitmap.hpp @@ -0,0 +1,53 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_BINARYBITMAP_HPP__ +#define __ZXING_BINARYBITMAP_HPP__ + +#include "binarizer.hpp" +#include "common/bitarray.hpp" +#include "common/bitmatrix.hpp" +#include "common/counted.hpp" +#include "common/unicomblock.hpp" +#include "errorhandler.hpp" + +namespace zxing { + +class BinaryBitmap : public Counted { +private: + Ref binarizer_; + +public: + explicit BinaryBitmap(Ref binarizer); + virtual ~BinaryBitmap(); + + Ref getBlackRow(int y, Ref row, ErrorHandler& err_handler); + Ref getBlackMatrix(ErrorHandler& err_handler); + Ref getInvertedMatrix(ErrorHandler& err_handler); + + Ref getLuminanceSource() const; + Ref m_poUnicomBlock; + + int getWidth() const; + int getHeight() const; + + bool isRotateSupported() const; + Ref rotateCounterClockwise(); + + bool isCropSupported() const; + Ref crop(int left, int top, int width, int height, ErrorHandler& err_handler); + + bool isHistogramBinarized() const; + bool ifUseHistogramBinarize() const; +}; + +} // namespace zxing + +#endif // __ZXING_BINARYBITMAP_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/array.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/array.hpp new file mode 100644 index 00000000..9187937e --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/array.hpp @@ -0,0 +1,113 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_ARRAY_HPP__ +#define __ZXING_COMMON_ARRAY_HPP__ + +#include "counted.hpp" + +namespace zxing { + +template +class Array : public Counted { +protected: +public: + std::vector values_; + Array() {} + explicit Array(int n) : Counted(), values_(n, T()) {} + Array(T const *ts, int n) : Counted(), values_(ts, ts + n) {} + Array(T const *ts, T const *te) : Counted(), values_(ts, te) {} + Array(T v, int n) : Counted(), values_(n, v) {} + explicit Array(std::vector &v) : Counted(), values_(v) {} + Array(Array &other) : Counted(), values_(other.values_) {} + explicit Array(Array *other) : Counted(), values_(other->values_) {} + virtual ~Array() {} + Array &operator=(const Array &other) { + values_ = other.values_; + return *this; + } + Array &operator=(const std::vector &array) { + values_ = array; + return *this; + } + T const &operator[](int i) const { return values_[i]; } + T &operator[](int i) { return values_[i]; } + int size() const { return values_.size(); } + bool empty() const { return values_.size() == 0; } + std::vector const &values() const { return values_; } + std::vector &values() { return values_; } + + T *data() { + // return values_.data(); + return &values_[0]; + } + void append(T value) { values_.push_back(value); } +}; + +template +class ArrayRef : public Counted { +private: +public: + Array *array_; + ArrayRef() : array_(0) {} + explicit ArrayRef(int n) : array_(0) { reset(new Array(n)); } + ArrayRef(T *ts, int n) : array_(0) { reset(new Array(ts, n)); } + explicit ArrayRef(Array *a) : array_(0) { reset(a); } + ArrayRef(const ArrayRef &other) : Counted(), array_(0) { reset(other.array_); } + + ~ArrayRef() { + if (array_) { + array_->release(); + } + array_ = 0; + } + + T const &operator[](int i) const { return (*array_)[i]; } + + T &operator[](int i) { return (*array_)[i]; } + + void reset(Array *a) { + if (a) { + a->retain(); + } + if (array_) { + array_->release(); + } + array_ = a; + } + void reset(const ArrayRef &other) { reset(other.array_); } + ArrayRef &operator=(const ArrayRef &other) { + reset(other); + return *this; + } + ArrayRef &operator=(Array *a) { + reset(a); + return *this; + } + + Array &operator*() const { return *array_; } + + Array *operator->() const { return array_; } + + operator bool() const { return array_ != 0; } + bool operator!() const { return array_ == 0; } + + T *data() { return array_->data(); } + + void clear() { + T *ptr = array_->data(); + memset(ptr, 0, array_->size()); + } + void append(T value) { array_->append(value); } +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_ARRAY_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/adaptive_threshold_mean_binarizer.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/adaptive_threshold_mean_binarizer.cpp new file mode 100644 index 00000000..38a79b37 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/adaptive_threshold_mean_binarizer.cpp @@ -0,0 +1,99 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../../../precomp.hpp" +#include "adaptive_threshold_mean_binarizer.hpp" +using zxing::AdaptiveThresholdMeanBinarizer; + +namespace { +const int BLOCK_SIZE = 25; +const int Bias = 10; +} // namespace + +AdaptiveThresholdMeanBinarizer::AdaptiveThresholdMeanBinarizer(Ref source) + : GlobalHistogramBinarizer(source) {} + +AdaptiveThresholdMeanBinarizer::~AdaptiveThresholdMeanBinarizer() {} + +Ref AdaptiveThresholdMeanBinarizer::createBinarizer(Ref source) { + return Ref(new AdaptiveThresholdMeanBinarizer(source)); +} + +Ref AdaptiveThresholdMeanBinarizer::getBlackRow(int y, Ref row, + ErrorHandler& err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeImage(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackRow(y, row, err_handler); +} + +Ref AdaptiveThresholdMeanBinarizer::getBlackMatrix(ErrorHandler& err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeImage(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + return Binarizer::getBlackMatrix(err_handler); +} + +int AdaptiveThresholdMeanBinarizer::binarizeImage(ErrorHandler& err_handler) { + if (width >= BLOCK_SIZE && height >= BLOCK_SIZE) { + LuminanceSource& source = *getLuminanceSource(); + Ref matrix(new BitMatrix(width, height, err_handler)); + if (err_handler.ErrCode()) return -1; + auto src = (unsigned char*)source.getMatrix()->data(); + auto dst = matrix->getPtr(); + cv::Mat mDst; + mDst = cv::Mat::zeros(cv::Size(width, height), CV_8UC1); + TransBufferToMat(src, mDst, width, height); + cv::Mat result; + int bs = width / 10; + bs = bs + bs % 2 - 1; + if (!(bs % 2 == 1 && bs > 1)) return -1; + cv::adaptiveThreshold(mDst, result, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, + bs, Bias); + TransMatToBuffer(result, dst, width, height); + if (err_handler.ErrCode()) return -1; + matrix0_ = matrix; + } else { + matrix0_ = GlobalHistogramBinarizer::getBlackMatrix(err_handler); + if (err_handler.ErrCode()) return 1; + } + return 0; +} + +int AdaptiveThresholdMeanBinarizer::TransBufferToMat(unsigned char* pBuffer, cv::Mat& mDst, + int nWidth, int nHeight) { + for (int j = 0; j < nHeight; ++j) { + unsigned char* data = mDst.ptr(j); + unsigned char* pSubBuffer = pBuffer + (nHeight - 1 - j) * nWidth; + memcpy(data, pSubBuffer, nWidth); + } + return 0; +} + +int AdaptiveThresholdMeanBinarizer::TransMatToBuffer(cv::Mat mSrc, unsigned char* ppBuffer, + int& nWidth, int& nHeight) { + nWidth = mSrc.cols; + // nWidth = ((nWidth + 3) / 4) * 4; + nHeight = mSrc.rows; + for (int j = 0; j < nHeight; ++j) { + unsigned char* pdi = ppBuffer + j * nWidth; + for (int z = 0; z < nWidth; ++z) { + int nj = nHeight - j - 1; + int value = *(uchar*)(mSrc.ptr(nj) + z); + if (value > 120) + pdi[z] = 0; + else + pdi[z] = 1; + } + } + return 0; +} \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/adaptive_threshold_mean_binarizer.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/adaptive_threshold_mean_binarizer.hpp new file mode 100644 index 00000000..bd51dde8 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/adaptive_threshold_mean_binarizer.hpp @@ -0,0 +1,38 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __ZXING_COMMON_ADAPTIVE_THRESHOLD_MEAN_BINARIZER_HPP__ +#define __ZXING_COMMON_ADAPTIVE_THRESHOLD_MEAN_BINARIZER_HPP__ +#include +#include +#include "../../binarizer.hpp" +#include "../../errorhandler.hpp" +#include "../bitarray.hpp" +#include "../bitmatrix.hpp" +#include "../bytematrix.hpp" +#include "global_histogram_binarizer.hpp" + + +namespace zxing { + +class AdaptiveThresholdMeanBinarizer : public GlobalHistogramBinarizer { +public: + explicit AdaptiveThresholdMeanBinarizer(Ref source); + virtual ~AdaptiveThresholdMeanBinarizer(); + + virtual Ref getBlackMatrix(ErrorHandler& err_handler) override; + virtual Ref getBlackRow(int y, Ref row, ErrorHandler& err_handler) override; + Ref createBinarizer(Ref source) override; + +private: + int binarizeImage(ErrorHandler& err_handler); + int TransBufferToMat(unsigned char* pBuffer, cv::Mat& mDst, int nWidth, int nHeight); + int TransMatToBuffer(cv::Mat mSrc, unsigned char* ppBuffer, int& nWidth, int& nHeight); +}; + +} // namespace zxing +#endif // __ZXING_COMMON_ADAPTIVE_THRESHOLD_MEAN_BINARIZER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/fast_window_binarizer.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/fast_window_binarizer.cpp new file mode 100644 index 00000000..b0d9f657 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/fast_window_binarizer.cpp @@ -0,0 +1,285 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "fast_window_binarizer.hpp" +using zxing::FastWindowBinarizer; + + +namespace { +const int BLOCK_SIZE = 6; +// const int BLOCK_SIZE = 8; // not as good as BLOCK_SIZE = 6 +const float WINDOW_FRACTION = 0.13f; + +static int min(int a, int b) { return a < b ? a : b; } + +static int max(int a, int b) { return a > b ? a : b; } + +} // namespace + +FastWindowBinarizer::FastWindowBinarizer(Ref source) + : GlobalHistogramBinarizer(source), matrix_(NULL), cached_row_(NULL) { + width = source->getWidth(); + height = source->getHeight(); + int aw = width / BLOCK_SIZE; + int ah = height / BLOCK_SIZE; + + int ah2 = ah; + int ow2 = aw + 1; + + _luminancesInt = new int[width * height]; + _blockTotals = new int[ah * aw]; + _totals = new int[(ah + 1) * (aw + 1)]; + _rowTotals = new int[ah2 * ow2]; + + _internal = new unsigned int[(height + 1) * (width + 1)]; +} + +FastWindowBinarizer::~FastWindowBinarizer() { + delete[] _totals; + delete[] _blockTotals; + delete[] _luminancesInt; + delete[] _rowTotals; + + delete[] _internal; +} + +Ref FastWindowBinarizer::createBinarizer(Ref source) { + return Ref(new FastWindowBinarizer(source)); +} + +/** + * Calculates the final BitMatrix once for all requests. This could be called + * once from the constructor instead, but there are some advantages to doing it + * lazily, such as making profiling easier, and not doing heavy lifting when + * callers don't expect it. + */ +Ref FastWindowBinarizer::getBlackMatrix(ErrorHandler& err_handler) { + if (!matrix0_) { + binarizeImage1(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + return Binarizer::getBlackMatrix(err_handler); +} + +/** + * Calculate black row from BitMatrix + * If BitMatrix has been calculated then just get the row + * If BitMatrix has not been calculated then call getBlackMatrix first + */ + +Ref FastWindowBinarizer::getBlackRow(int y, Ref row, + ErrorHandler& err_handler) { + if (!matrix0_) { + binarizeImage1(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackRow(y, row, err_handler); +} + +void FastWindowBinarizer::calcBlockTotals(int* luminancesInt, int* output, int aw, int ah) { + for (int by = 0; by < ah; by++) { + int ey = (by + 1) * BLOCK_SIZE; + for (int bx = 0; bx < aw; bx++) { + int t = 0; + for (int y = by * BLOCK_SIZE; y < ey; y++) { + int offset = y * width + bx * BLOCK_SIZE; + int ex = offset + BLOCK_SIZE; + for (; offset < ex; offset++) { + // int v = luminancesInt[offset] & 0xff; + t += luminancesInt[offset]; + } + } + output[by * aw + bx] = t; + } + } +} + +void FastWindowBinarizer::cumulative(int* data, int* output, int _width, int _height) { + int ah = _height; + int aw = _width; + int ow = _width + 1; + // int[][] totals = new int[ah + 1][aw + 1]; + // int* rowTotals = new int[ah*ow]; + + for (int y = 0; y < ah; y++) { + int* row = _rowTotals + (y * ow); + int* rowdata = data + (y * aw); + int t = 0; + row[0] = t; + + for (int x = 0; x < aw; x++) { + t += rowdata[x]; + row[x + 1] = t; + } + } + + for (int x = 0; x <= aw; x++) { + output[x] = 0; // First row + int t = 0; + + for (int y = 0; y < ah; y++) { + t += _rowTotals[y * ow + x]; + output[(y + 1) * ow + x] = t; + } + } +} + +void FastWindowBinarizer::fastIntegral(const unsigned char* inputMatrix, + unsigned int* outputMatrix) { + // memset(outputMatrix,0,sizeof(int)*(height+1)*(width+1)); + // unsigned int *columnSum = new unsigned int[width]; // sum of each column + // calculate integral of the first line + outputMatrix[0] = outputMatrix[width + 1] = 0; + for (int i = 0; i < width; i++) { + // columnSum[i]=inputMatrix[i]; + outputMatrix[i + 1] = 0; + outputMatrix[width + 1 + i + 1] = outputMatrix[width + 1 + i] + inputMatrix[i]; + } + for (int i = 1; i < height; i++) { + const unsigned char* psi = inputMatrix + i * width; + unsigned int* pdi = outputMatrix + (i + 1) * (width + 1); + // first column of each line + pdi[0] = 0; + pdi[1] = psi[0]; + int row_sum = psi[0]; + // other columns + for (int j = 1; j < width; j++) { + row_sum += psi[j]; + pdi[j + 1] = pdi[j + 1 - width - 1] + row_sum; + } + } + return; +} + +int FastWindowBinarizer::binarizeImage1(ErrorHandler& err_handler) { + LuminanceSource& source = *getLuminanceSource(); + Ref matrix(new BitMatrix(width, height, err_handler)); + if (err_handler.ErrCode()) return -1; + + ArrayRef localLuminances = source.getMatrix(); + + unsigned char* src = (unsigned char*)localLuminances->data(); + unsigned char* dst = matrix->getPtr(); + fastWindow(src, dst, err_handler); + if (err_handler.ErrCode()) return -1; + + matrix0_ = matrix; + return 0; +} + +void FastWindowBinarizer::fastWindow(const unsigned char* src, unsigned char* dst, + ErrorHandler& err_handler) { + int r = (int)(min(width, height) * WINDOW_FRACTION / BLOCK_SIZE / 2 + 1); + const int NEWH_BLOCK_SIZE = BLOCK_SIZE * r; + if (height < NEWH_BLOCK_SIZE || width < NEWH_BLOCK_SIZE) { + matrix_ = GlobalHistogramBinarizer::getBlackMatrix(err_handler); + return; + } + const unsigned char* _img = src; + fastIntegral(_img, _internal); + int aw = width / BLOCK_SIZE; + int ah = height / BLOCK_SIZE; + memset(dst, 0, sizeof(char) * height * width); + for (int ai = 0; ai < ah; ai++) { + int top = max(0, ((ai - r + 1) * BLOCK_SIZE)); + int bottom = min(height, (ai + r) * BLOCK_SIZE); + unsigned int* pt = _internal + top * (width + 1); + unsigned int* pb = _internal + bottom * (width + 1); + for (int aj = 0; aj < aw; aj++) { + int left = max(0, (aj - r + 1) * BLOCK_SIZE); + int right = min(width, (aj + r) * BLOCK_SIZE); + unsigned int block = pb[right] + pt[left] - pt[right] - pb[left]; + int pixels = (bottom - top) * (right - left); + int avg = (int)block / pixels; + for (int bi = ai * BLOCK_SIZE; bi < height && bi < (ai + 1) * BLOCK_SIZE; bi++) { + const unsigned char* psi = src + bi * width; + unsigned char* pdi = dst + bi * width; + for (int bj = aj * BLOCK_SIZE; bj < width && bj < (aj + 1) * BLOCK_SIZE; bj++) { + if ((int)psi[bj] < avg) + pdi[bj] = 1; + else + pdi[bj] = 0; + } + } + } + } + // delete [] _internal; + return; +} + +int FastWindowBinarizer::binarizeImage0(ErrorHandler& err_handler) { + // if (matrix_) { + // return matrix_; + //} + + LuminanceSource& source = *getLuminanceSource(); + if (width >= BLOCK_SIZE && height >= BLOCK_SIZE) { + int r = (int)(min(width, height) * WINDOW_FRACTION / BLOCK_SIZE / 2 + 1); + + int aw = width / BLOCK_SIZE; + int ah = height / BLOCK_SIZE; + int ow = aw + 1; + + ArrayRef _luminances = source.getMatrix(); + + // Get luminances for int value first + for (int i = 0; i < width * height; i++) { + _luminancesInt[i] = _luminances[i] & 0xff; + } + + calcBlockTotals(_luminancesInt, _blockTotals, aw, ah); + + cumulative(_blockTotals, _totals, aw, ah); + + Ref newMatrix(new BitMatrix(width, height, err_handler)); + if (err_handler.ErrCode()) return -1; + unsigned char* newimg = newMatrix->getPtr(); + for (int by = 0; by < ah; by++) { + int top = max(0, by - r + 1); + int bottom = min(ah, by + r); + + for (int bx = 0; bx < aw; bx++) { + int left = max(0, bx - r + 1); + int right = min(aw, bx + r); + + int block = _totals[bottom * ow + right] + _totals[top * ow + left] - + _totals[top * ow + right] - _totals[bottom * ow + left]; + + int pixels = (bottom - top) * (right - left) * BLOCK_SIZE * BLOCK_SIZE; + int avg = block / pixels; + + for (int y = by * BLOCK_SIZE; y < (by + 1) * BLOCK_SIZE; y++) { + // int offset = y*width; + int* plumint = _luminancesInt + y * width; + unsigned char* pn = newimg + y * width; + for (int x = bx * BLOCK_SIZE; x < (bx + 1) * BLOCK_SIZE; x++) { + // int pixel = luminances[y*width + x] & 0xff; + // if(plumint[x] < avg) + // newMatrix->set(x, y); + if (plumint[x] < avg) + pn[x] = 1; + else + pn[x] = 0; + } + } + } + } + // delete[] data; + matrix_ = newMatrix; + } else { + // If the image is too small, fall back to the global histogram + // approach. + matrix_ = GlobalHistogramBinarizer::getBlackMatrix(err_handler); + } + + return 0; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/fast_window_binarizer.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/fast_window_binarizer.hpp new file mode 100644 index 00000000..a18c186e --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/fast_window_binarizer.hpp @@ -0,0 +1,56 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_DETECTOR_RESULT_HPP__ +#define __ZXING_COMMON_DETECTOR_RESULT_HPP__ + +#include "../../binarizer.hpp" +#include "../../errorhandler.hpp" +#include "../bitarray.hpp" +#include "../bitmatrix.hpp" +#include "global_histogram_binarizer.hpp" + +#include + +namespace zxing { + +class FastWindowBinarizer : public GlobalHistogramBinarizer { +private: + Ref matrix_; + Ref cached_row_; + + int* _luminancesInt; + int* _blockTotals; + int* _totals; + int* _rowTotals; + + unsigned int* _internal; + +public: + explicit FastWindowBinarizer(Ref source); + virtual ~FastWindowBinarizer(); + + virtual Ref getBlackMatrix(ErrorHandler& err_handler) override; + virtual Ref getBlackRow(int y, Ref row, ErrorHandler& err_handler) override; + + Ref createBinarizer(Ref source) override; + +private: + void calcBlockTotals(int* luminancesInt, int* output, int aw, int ah); + void cumulative(int* data, int* output, int _width, int _height); + int binarizeImage0(ErrorHandler& err_handler); + void fastIntegral(const unsigned char* inputMatrix, unsigned int* outputMatrix); + int binarizeImage1(ErrorHandler& err_handler); + void fastWindow(const unsigned char* src, unsigned char* dst, ErrorHandler& err_handler); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_DETECTOR_RESULT_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/global_histogram_binarizer.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/global_histogram_binarizer.cpp new file mode 100644 index 00000000..0f1aa056 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/global_histogram_binarizer.cpp @@ -0,0 +1,308 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "global_histogram_binarizer.hpp" +using zxing::GlobalHistogramBinarizer; + +namespace { +const int LUMINANCE_BITS = 5; +const int LUMINANCE_SHIFT = 8 - LUMINANCE_BITS; +const int LUMINANCE_BUCKETS = 1 << LUMINANCE_BITS; +const ArrayRef EMPTY(0); +} // namespace + +GlobalHistogramBinarizer::GlobalHistogramBinarizer(Ref source) + : Binarizer(source), luminances(EMPTY), buckets(LUMINANCE_BUCKETS) { + filtered = false; +} + +GlobalHistogramBinarizer::~GlobalHistogramBinarizer() {} + +void GlobalHistogramBinarizer::initArrays(int luminanceSize) { + if (luminances->size() < luminanceSize) { + luminances = ArrayRef(luminanceSize); + } + for (int x = 0; x < LUMINANCE_BUCKETS; x++) { + buckets[x] = 0; + } +} + +// Applies simple sharpening to the row data to improve performance of the 1D +// readers. +Ref GlobalHistogramBinarizer::getBlackRow(int y, Ref row, + ErrorHandler& err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeImage0(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackRow(y, row, err_handler); +} + +// Does not sharpen the data, as this call is intended to only be used by 2D +// readers. +Ref GlobalHistogramBinarizer::getBlackMatrix(ErrorHandler& err_handler) { + binarizeImage0(err_handler); + if (err_handler.ErrCode()) return Ref(); + // First call binarize image in child class to get matrix0_ and binCache + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackMatrix(err_handler); +} + +using namespace std; + +int GlobalHistogramBinarizer::estimateBlackPoint(ArrayRef const& _buckets, + ErrorHandler& err_handler) { + // Find tallest peak in histogram + int numBuckets = _buckets->size(); + int maxBucketCount = 0; + int firstPeak = 0; + int firstPeakSize = 0; + for (int x = 0; x < numBuckets; x++) { + if (_buckets[x] > firstPeakSize) { + firstPeak = x; + firstPeakSize = _buckets[x]; + } + if (_buckets[x] > maxBucketCount) { + maxBucketCount = _buckets[x]; + } + } + + // Find second-tallest peak -- well, another peak that is tall and not + // so close to the first one + int secondPeak = 0; + int secondPeakScore = 0; + for (int x = 0; x < numBuckets; x++) { + int distanceToBiggest = x - firstPeak; + // Encourage more distant second peaks by multiplying by square of + // distance + int score = _buckets[x] * distanceToBiggest * distanceToBiggest; + if (score > secondPeakScore) { + secondPeak = x; + secondPeakScore = score; + } + } + // Make sure firstPeak corresponds to the black peak. + if (firstPeak > secondPeak) { + int temp = firstPeak; + firstPeak = secondPeak; + secondPeak = temp; + } + + // Kind of arbitrary; if the two peaks are very close, then we figure there + // is so little dynamic range in the image, that discriminating black and + // white is too error-prone. Decoding the image/line is either pointless, or + // may in some cases lead to a false positive for 1D formats, which are + // relatively lenient. We arbitrarily say "close" is "<= 1/16 of the total + // histogram buckets apart" std::cerr << "! " << secondPeak << " " << + // firstPeak << " " << numBuckets << std::endl; + if (secondPeak - firstPeak <= numBuckets >> 4) { + err_handler = NotFoundErrorHandler("NotFound GlobalHistogramBinarizer"); + return -1; + } + + // Find a valley between them that is low and closer to the white peak + int bestValley = secondPeak - 1; + int bestValleyScore = -1; + for (int x = secondPeak - 1; x > firstPeak; x--) { + int fromFirst = x - firstPeak; + // Favor a "valley" that is not too close to either peak -- especially + // not the black peak -- and that has a low value of course + int score = fromFirst * fromFirst * (secondPeak - x) * (maxBucketCount - buckets[x]); + if (score > bestValleyScore) { + bestValley = x; + bestValleyScore = score; + } + } + + // std::cerr << "bps " << (bestValley << LUMINANCE_SHIFT) << std::endl; + return bestValley << LUMINANCE_SHIFT; +} + +// codes from sagazhou, only works well on one dataset +int GlobalHistogramBinarizer::estimateBlackPoint2(ArrayRef const& _buckets) { + int midValue = LUMINANCE_BUCKETS / 2 + 1; + // Find tallest and lowest peaks in histogram + // const int numBuckets = buckets->size(); + int maxPointArray[LUMINANCE_BUCKETS] = {0}; + int maxCrusor = 0; + int maxValue = 0, maxIndex = 0; + int minPointArray[LUMINANCE_BUCKETS] = {0}; + int minCrusor = 0; + + for (int i = 2; i < LUMINANCE_BUCKETS - 3; i++) { + if (_buckets[i] < _buckets[i + 1] && _buckets[i] < _buckets[i + 2] && + _buckets[i] < _buckets[i - 1] && _buckets[i] < _buckets[i - 2]) { + minPointArray[minCrusor++] = i; + } else if (_buckets[i] > _buckets[i + 1] && _buckets[i] > _buckets[i + 2] && + _buckets[i] > _buckets[i - 1] && _buckets[i] > _buckets[i - 2]) { + maxPointArray[maxCrusor++] = i; + if (_buckets[i] > maxValue) { + maxValue = _buckets[i]; + maxIndex = i; + } + } + } + bool bSlantBlack = true; + // most pixels are black + for (int i = 0; i < maxCrusor; ++i) { + if (maxPointArray[i] > midValue) { + bSlantBlack = false; + break; + } + } + + bool bSlantWhite = true; + // most pixels are white + for (int i = 0; i < maxCrusor; ++i) { + if (maxPointArray[i] < midValue) { + bSlantWhite = false; + break; + } + } + + if (bSlantBlack) { + int start = maxIndex + 30; + int end = midValue; + + if (minCrusor == 0) // unimodal + { + return 255; + } else { + int mostLeftIndex = 0; + bool bFind = false; + + for (int i = 0; i < minCrusor; ++i) // wave motion + { + if (minPointArray[i] > start && minPointArray[i] < end) { + mostLeftIndex = minPointArray[i]; + bFind = true; + break; + } + } + + if (bFind) { + return mostLeftIndex; + } else { + return 255; + } + } + } + + if (bSlantWhite) { + int start = midValue; + int end = maxIndex - 30; + + if (minCrusor == 0) // unimodal + { + return 0; + } else { + int mostRightIndex = 0; + bool bFind = false; + + for (int i = 0; i < minCrusor; ++i) // wave motion + { + if (minPointArray[i] > start && minPointArray[i] < end) { + mostRightIndex = i; + bFind = true; + } + } + + if (bFind) { + return mostRightIndex; + } else { + return 0; + } + } + } + + // balanced distribution + if (maxIndex < midValue) { + // the minest min value + if (minCrusor == 0) { + return 255; // all black + } else { + int start = maxIndex + 30; + int end = 253; + + for (int i = 0; i < minCrusor; ++i) // wave motion + { + if (minPointArray[i] > start && minPointArray[i] < end) { + return minPointArray[i]; + } + } + } + } else { + // maxest min value + if (minCrusor == 0) { + return 0; // white + } else { + int start = 0; + int end = maxIndex - 30; + int mostRightIndex = 0; + + for (int i = 0; i < minCrusor; ++i) // wave motion + { + if (minPointArray[i] > start && minPointArray[i] < end) { + mostRightIndex = minPointArray[i]; + } + } + + return mostRightIndex; + } + } + return 0; +} + +int GlobalHistogramBinarizer::binarizeImage0(ErrorHandler& err_handler) { + LuminanceSource& source = *getLuminanceSource(); + Ref matrix(new BitMatrix(width, height, err_handler)); + if (err_handler.ErrCode()) return -1; + // Quickly calculates the histogram by sampling four rows from the image. + // This proved to be more robust on the blackbox tests than sampling a + // diagonal as we used to do. + initArrays(width); + ArrayRef localBuckets = buckets; + + for (int y = 1; y < 5; y++) { + int row = height * y / 5; + ArrayRef localLuminances = source.getRow(row, luminances, err_handler); + if (err_handler.ErrCode()) return -1; + int right = (width << 2) / 5; + for (int x = width / 5; x < right; x++) { + int pixel = localLuminances[x] & 0xff; + localBuckets[pixel >> LUMINANCE_SHIFT]++; + } + } + + + int blackPoint = estimateBlackPoint(localBuckets, err_handler); + if (err_handler.ErrCode()) return -1; + + ArrayRef localLuminances = source.getMatrix(); + for (int y = 0; y < height; y++) { + int offset = y * width; + for (int x = 0; x < width; x++) { + int pixel = localLuminances[offset + x] & 0xff; + if (pixel < blackPoint) { + matrix->set(x, y); + } + } + } + + matrix0_ = matrix; + + return 0; +} + +Ref GlobalHistogramBinarizer::createBinarizer(Ref source) { + return Ref(new GlobalHistogramBinarizer(source)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/global_histogram_binarizer.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/global_histogram_binarizer.hpp new file mode 100644 index 00000000..f61b3cfe --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/global_histogram_binarizer.hpp @@ -0,0 +1,55 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_GLOBAL_HISTOGRAM_BINARIZER_HPP__ +#define __ZXING_COMMON_GLOBAL_HISTOGRAM_BINARIZER_HPP__ + +#include "../../binarizer.hpp" +#include "../../errorhandler.hpp" +#include "../array.hpp" +#include "../bitarray.hpp" +#include "../bitmatrix.hpp" +#include "../bytematrix.hpp" + +using zxing::ArrayRef; +using zxing::Binarizer; +using zxing::BitArray; +using zxing::BitMatrix; +using zxing::ByteMatrix; +using zxing::ErrorHandler; +using zxing::LuminanceSource; +using zxing::Ref; + +namespace zxing { + +class GlobalHistogramBinarizer : public Binarizer { +protected: + ArrayRef luminances; + ArrayRef buckets; + +public: + explicit GlobalHistogramBinarizer(Ref source); + virtual ~GlobalHistogramBinarizer(); + + virtual Ref getBlackRow(int y, Ref row, ErrorHandler &err_handler) override; + virtual Ref getBlackMatrix(ErrorHandler &err_handler) override; + int estimateBlackPoint(ArrayRef const &buckets, ErrorHandler &err_handler); + int estimateBlackPoint2(ArrayRef const &buckets); + Ref createBinarizer(Ref source) override; + +private: + int binarizeImage0(ErrorHandler &err_handler); + void initArrays(int luminanceSize); + bool filtered; +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_GLOBAL_HISTOGRAM_BINARIZER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/hybrid_binarizer.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/hybrid_binarizer.cpp new file mode 100644 index 00000000..fb55ed8f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/hybrid_binarizer.cpp @@ -0,0 +1,419 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "hybrid_binarizer.hpp" + +using zxing::HybridBinarizer; +using zxing::BINARIZER_BLOCK; + +// This class uses 5*5 blocks to compute local luminance, where each block is +// 8*8 pixels So this is the smallest dimension in each axis we can accept. +namespace { +const int BLOCK_SIZE_POWER = 3; +const int BLOCK_SIZE = 1 << BLOCK_SIZE_POWER; // ...0100...00 +const int BLOCK_SIZE_MASK = BLOCK_SIZE - 1; // ...0011...11 +const int MINIMUM_DIMENSION = BLOCK_SIZE * 5; +#ifdef USE_SET_INT +const int BITS_PER_BYTE = 8; +const int BITS_PER_WORD = BitMatrix::bitsPerWord; +#endif +} // namespace + +HybridBinarizer::HybridBinarizer(Ref source) : GlobalHistogramBinarizer(source) { + int subWidth = width >> BLOCK_SIZE_POWER; + if ((width & BLOCK_SIZE_MASK) != 0) { + subWidth++; + } + int subHeight = height >> BLOCK_SIZE_POWER; + if ((height & BLOCK_SIZE_MASK) != 0) { + subHeight++; + } + + grayByte_ = source->getByteMatrix(); + + blocks_ = getBlockArray(subWidth * subHeight); + + subWidth_ = subWidth; + subHeight_ = subHeight; + + initBlocks(); + initBlockIntegral(); +} + +HybridBinarizer::~HybridBinarizer() { +} + +Ref HybridBinarizer::createBinarizer(Ref source) { + return Ref(new GlobalHistogramBinarizer(source)); +} + +int HybridBinarizer::initBlockIntegral() { + blockIntegralWidth = subWidth_ + 1; + blockIntegralHeight = subHeight_ + 1; + blockIntegral_ = new Array(blockIntegralWidth * blockIntegralHeight); + + int* integral = blockIntegral_->data(); + + // unsigned char* therow = grayByte_->getByteRow(0); + + // first row only + int rs = 0; + + for (int j = 0; j < blockIntegralWidth; j++) { + integral[j] = 0; + } + + for (int i = 0; i < blockIntegralHeight; i++) { + integral[i * blockIntegralWidth] = 0; + } + + // remaining cells are sum above and to the left + int offsetBlock = 0; + int offsetIntegral = 0; + + for (int i = 0; i < subHeight_; ++i) { + // therow = grayByte_->getByteRow(i); + offsetBlock = i * subWidth_; + offsetIntegral = (i + 1) * blockIntegralWidth; + rs = 0; + + for (int j = 0; j < subWidth_; ++j) { + rs += blocks_[offsetBlock + j].threshold; + integral[offsetIntegral + j + 1] = rs + integral[offsetIntegral - blockIntegralWidth + j + 1]; + } + } + + return 1; +} + +/** + * Calculates the final BitMatrix once for all requests. This could be called + * once from the constructor instead, but there are some advantages to doing it + * lazily, such as making profiling easier, and not doing heavy lifting when + * callers don't expect it. + */ +Ref HybridBinarizer::getBlackMatrix(ErrorHandler& err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeByBlock(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + // First call binarize image in child class to get matrix0_ and binCache + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackMatrix(err_handler); +} + +#if 1 +/** + * Calculate black row from BitMatrix + * If BitMatrix has been calculated then just get the row + * If BitMatrix has not been calculated then call getBlackMatrix first + */ +Ref HybridBinarizer::getBlackRow(int y, Ref row, ErrorHandler& err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeByBlock(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackRow(y, row, err_handler); +} +#endif + +namespace { +inline int cap(int value, int min, int max) { + return value < min ? min : value > max ? max : value; +} +} // namespace + + +// For each block in the image, calculates the average black point using a 5*5 +// grid of the blocks around it. Also handles the corner cases (fractional +// blocks are computed based on the last pixels in the row/column which are also +// used in the previous block.) + +#define THRES_BLOCKSIZE 2 + +// No use of level now +ArrayRef HybridBinarizer::getBlackPoints() { + int blackWidth, blackHeight; + + blackWidth = subWidth_; + blackHeight = subHeight_; + + ArrayRef blackPoints(blackWidth * blackHeight); + + int* blackArray = blackPoints->data(); + + int offset = 0; + for (int i = 0; i < blackHeight; i++) { + offset = i * blackWidth; + for (int j = 0; j < blackWidth; j++) { + blackArray[offset + j] = blocks_[offset + j].threshold; + } + } + + return blackPoints; +} + +// Original code 20140606 +void HybridBinarizer::calculateThresholdForBlock(Ref& _luminances, int subWidth, + int subHeight, int SIZE_POWER, + // ArrayRef &blackPoints, + Ref const& matrix, + ErrorHandler& err_handler) { + int block_size = 1 << SIZE_POWER; + + int maxYOffset = height - block_size; + int maxXOffset = width - block_size; + + int* blockIntegral = blockIntegral_->data(); + + int blockArea = ((2 * THRES_BLOCKSIZE + 1) * (2 * THRES_BLOCKSIZE + 1)); + + for (int y = 0; y < subHeight; y++) { + int yoffset = y << SIZE_POWER; + if (yoffset > maxYOffset) { + yoffset = maxYOffset; + } + for (int x = 0; x < subWidth; x++) { + int xoffset = x << SIZE_POWER; + if (xoffset > maxXOffset) { + xoffset = maxXOffset; + } + int left = cap(x, THRES_BLOCKSIZE, subWidth - THRES_BLOCKSIZE - 1); + int top = cap(y, THRES_BLOCKSIZE, subHeight - THRES_BLOCKSIZE - 1); + + int sum = 0; + // int sum2 = 0; + + int offset1 = (top - THRES_BLOCKSIZE) * blockIntegralWidth + left - THRES_BLOCKSIZE; + int offset2 = (top + THRES_BLOCKSIZE + 1) * blockIntegralWidth + left - THRES_BLOCKSIZE; + + int blocksize = THRES_BLOCKSIZE * 2 + 1; + + sum = blockIntegral[offset1] - blockIntegral[offset1 + blocksize] - + blockIntegral[offset2] + blockIntegral[offset2 + blocksize]; + + int average = sum / blockArea; + thresholdBlock(_luminances, xoffset, yoffset, average, matrix, err_handler); + if (err_handler.ErrCode()) return; + } + } +} + +#ifdef USE_SET_INT +void HybridBinarizer::thresholdFourBlocks(Ref& luminances, int xoffset, int yoffset, + int* thresholds, int stride, + Ref const& matrix) { + int setIntCircle = BITS_PER_WORD / BITS_PER_BYTE; + for (int y = 0; y < BLOCK_SIZE; y++) { + unsigned char* pTemp = luminances->getByteRow(yoffset + y); + pTemp = pTemp + xoffset; + unsigned int valueInt = 0; + int bitPosition = 0; + for (int k = 0; k < setIntCircle; k++) { + for (int x = 0; x < BLOCK_SIZE; x++) { + int pixel = *pTemp++; + if (pixel <= thresholds[k]) { + // bitPosition=(3-k)*8+x; + valueInt |= (unsigned int)1 << bitPosition; + } + bitPosition++; + } + } + matrix->setIntOneTime(xoffset, yoffset + y, valueInt); + } + return; +} +#endif + +// Applies a single threshold to a block of pixels +void HybridBinarizer::thresholdBlock(Ref& _luminances, int xoffset, int yoffset, + int threshold, Ref const& matrix, + ErrorHandler& err_handler) { + int rowBitsSize = matrix->getRowBitsSize(); + int rowSize = width; + + int rowBitStep = rowBitsSize - BLOCK_SIZE; + int rowStep = rowSize - BLOCK_SIZE; + + unsigned char* pTemp = _luminances->getByteRow(yoffset, err_handler); + if (err_handler.ErrCode()) return; + bool* bpTemp = matrix->getRowBoolPtr(yoffset); + + pTemp += xoffset; + bpTemp += xoffset; + + for (int y = 0; y < BLOCK_SIZE; y++) { + for (int x = 0; x < BLOCK_SIZE; x++) { + // comparison needs to be <= so that black == 0 pixels are black + // even if the threshold is 0. + *bpTemp++ = (*pTemp++ <= threshold) ? true : false; + } + + pTemp += rowBitStep; + bpTemp += rowStep; + } +} + +void HybridBinarizer::thresholdIrregularBlock(Ref& _luminances, int xoffset, + int yoffset, int blockWidth, int blockHeight, + int threshold, Ref const& matrix, + ErrorHandler& err_handler) { + for (int y = 0; y < blockHeight; y++) { + unsigned char* pTemp = _luminances->getByteRow(yoffset + y, err_handler); + if (err_handler.ErrCode()) return; + pTemp = pTemp + xoffset; + for (int x = 0; x < blockWidth; x++) { + // comparison needs to be <= so that black == 0 pixels are black + // even if the threshold is 0. + int pixel = *pTemp++; + if (pixel <= threshold) { + matrix->set(xoffset + x, yoffset + y); + } + } + } +} + +namespace { + +inline int getBlackPointFromNeighbors(ArrayRef block, int subWidth, int x, int y) { + return (block[(y - 1) * subWidth + x].threshold + 2 * block[y * subWidth + x - 1].threshold + + block[(y - 1) * subWidth + x - 1].threshold) >> + 2; +} + +} // namespace + + +#define MIN_DYNAMIC_RANGE 24 + +// Calculates a single black point for each block of pixels and saves it away. +int HybridBinarizer::initBlocks() { + Ref& _luminances = grayByte_; + int subWidth = subWidth_; + int subHeight = subHeight_; + + unsigned char* bytes = _luminances->bytes; + + const int minDynamicRange = 24; + + for (int y = 0; y < subHeight; y++) { + int yoffset = y << BLOCK_SIZE_POWER; + int maxYOffset = height - BLOCK_SIZE; + if (yoffset > maxYOffset) yoffset = maxYOffset; + for (int x = 0; x < subWidth; x++) { + int xoffset = x << BLOCK_SIZE_POWER; + int maxXOffset = width - BLOCK_SIZE; + if (xoffset > maxXOffset) xoffset = maxXOffset; + int sum = 0; + int min = 0xFF; + int max = 0; + for (int yy = 0, offset = yoffset * width + xoffset; yy < BLOCK_SIZE; + yy++, offset += width) { + for (int xx = 0; xx < BLOCK_SIZE; xx++) { + // int pixel = luminances->bytes[offset + xx] & 0xFF; + int pixel = bytes[offset + xx]; + sum += pixel; + + // still looking for good contrast + if (pixel < min) { + min = pixel; + } + if (pixel > max) { + max = pixel; + } + } + + // short-circuit min/max tests once dynamic range is met + if (max - min > minDynamicRange) { + // finish the rest of the rows quickly + for (yy++, offset += width; yy < BLOCK_SIZE; yy++, offset += width) { + for (int xx = 0; xx < BLOCK_SIZE; xx += 2) { + sum += bytes[offset + xx]; + sum += bytes[offset + xx + 1]; + } + } + } + } + + blocks_[y * subWidth + x].min = min; + blocks_[y * subWidth + x].max = max; + blocks_[y * subWidth + x].sum = sum; + blocks_[y * subWidth + x].threshold = + getBlockThreshold(x, y, subWidth, sum, min, max, minDynamicRange, BLOCK_SIZE_POWER); + } + } + + return 1; +} + +int HybridBinarizer::getBlockThreshold(int x, int y, int subWidth, int sum, int min, int max, + int minDynamicRange, int SIZE_POWER) { + // See + // http://groups.google.com/group/zxing/browse_thread/thread/d06efa2c35a7ddc0 + + // The default estimate is the average of the values in the block. + int average = sum >> (SIZE_POWER * 2); + if (max - min <= minDynamicRange) { + // If variation within the block is low, assume this is a block withe + // only light or only dark pixels. In that case we do not want to use + // the average, as it would divide this low contrast area into black and + // white pixels, essentially creating data out of noise. The default + // assumption is that the block is light/background. Since no estimate + // for the level of dark pixels exists locally, use half the min for the + // block. + average = min >> 1; + if (y > 0 && x > 0) { + // Correct the "white background" assumption for blocks that have + // neighbors by comparing the pixels in this block to the previously + // calculated black points. This is based on the fact that dark + // barcode symbology is always surrounded by some amout of light + // background for which reasonable black point estimates were made. + // The bp estimated at the boundaries is used for the interior. + int bp = getBlackPointFromNeighbors(blocks_, subWidth, x, y); + // The (min= MINIMUM_DIMENSION && height >= MINIMUM_DIMENSION) { + Ref newMatrix(new BitMatrix(width, height, err_handler)); + if (err_handler.ErrCode()) return -1; + + calculateThresholdForBlock(grayByte_, subWidth_, subHeight_, BLOCK_SIZE_POWER, newMatrix, + err_handler); + if (err_handler.ErrCode()) return -1; + + matrix0_ = newMatrix; + + } else { + // If the image is too small, fall back to the global histogram + // approach. + matrix0_ = GlobalHistogramBinarizer::getBlackMatrix(err_handler); + if (err_handler.ErrCode()) return 1; + } + // return matrix0_; + return 1; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/hybrid_binarizer.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/hybrid_binarizer.hpp new file mode 100644 index 00000000..c134918c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/hybrid_binarizer.hpp @@ -0,0 +1,85 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_HYBRID_BINARIZER_HPP__ +#define __ZXING_COMMON_HYBRID_BINARIZER_HPP__ + +#include "../../binarizer.hpp" +#include "../../errorhandler.hpp" +#include "../bitarray.hpp" +#include "../bitmatrix.hpp" +#include "../bytematrix.hpp" +#include "global_histogram_binarizer.hpp" + +#include + + +namespace zxing { + +class HybridBinarizer : public GlobalHistogramBinarizer { +private: + Ref grayByte_; + // ArrayRef integral_; + ArrayRef blockIntegral_; + ArrayRef blocks_; + + ArrayRef blackPoints_; + int level_; + + int subWidth_; + int subHeight_; + int blockIntegralWidth; + int blockIntegralHeight; + +public: + explicit HybridBinarizer(Ref source); + virtual ~HybridBinarizer(); + + virtual Ref getBlackMatrix(ErrorHandler& err_handler) override; + virtual Ref getBlackRow(int y, Ref row, ErrorHandler& err_handler) override; + + Ref createBinarizer(Ref source) override; + +private: + int initIntegral(); + int initBlockIntegral(); + int initBlocks(); + + // int calculateBlackPoints(); + ArrayRef getBlackPoints(); + int getBlockThreshold(int x, int y, int subWidth, int sum, int min, int max, + int minDynamicRange, int SIZE_POWER); + + + void calculateThresholdForBlock(Ref& luminances, int subWidth, int subHeight, + int SIZE_POWER, Ref const& matrix, + ErrorHandler& err_handler); + + + void thresholdBlock(Ref& luminances, int xoffset, int yoffset, int threshold, + Ref const& matrix, ErrorHandler& err_handler); + + void thresholdIrregularBlock(Ref& luminances, int xoffset, int yoffset, + int blockWidth, int blockHeight, int threshold, + Ref const& matrix, ErrorHandler& err_handler); + +#ifdef USE_SET_INT + void thresholdFourBlocks(Ref& luminances, int xoffset, int yoffset, int* thresholds, + int stride, Ref const& matrix); +#endif + + // Add for binarize image when call getBlackMatrix + // By Skylook + int binarizeByBlock(ErrorHandler& err_handler); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_HYBRID_BINARIZER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/simple_adaptive_binarizer.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/simple_adaptive_binarizer.cpp new file mode 100644 index 00000000..2364bc39 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/simple_adaptive_binarizer.cpp @@ -0,0 +1,155 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "simple_adaptive_binarizer.hpp" + +using zxing::SimpleAdaptiveBinarizer; + + +SimpleAdaptiveBinarizer::SimpleAdaptiveBinarizer(Ref source) + : GlobalHistogramBinarizer(source) { + filtered = false; +} + +SimpleAdaptiveBinarizer::~SimpleAdaptiveBinarizer() {} + +// Applies simple sharpening to the row data to improve performance of the 1D +// readers. +Ref SimpleAdaptiveBinarizer::getBlackRow(int y, Ref row, + ErrorHandler &err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeImage0(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackRow(y, row, err_handler); +} + +// Does not sharpen the data, as this call is intended to only be used by 2D +// readers. +Ref SimpleAdaptiveBinarizer::getBlackMatrix(ErrorHandler &err_handler) { + // First call binarize image in child class to get matrix0_ and binCache + if (!matrix0_) { + binarizeImage0(err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + // First call binarize image in child class to get matrix0_ and binCache + // Call parent getBlackMatrix to get current matrix + return Binarizer::getBlackMatrix(err_handler); +} + +using namespace std; + +int SimpleAdaptiveBinarizer::binarizeImage0(ErrorHandler &err_handler) { + LuminanceSource &source = *getLuminanceSource(); + + Ref matrix(new BitMatrix(width, height, err_handler)); + if (err_handler.ErrCode()) return -1; + + ArrayRef localLuminances = source.getMatrix(); + + unsigned char *src = (unsigned char *)localLuminances->data(); + unsigned char *dst = matrix->getPtr(); + + qrBinarize(src, dst); + + matrix0_ = matrix; + + return 0; +} + +/*A simplified adaptive thresholder. + This compares the current pixel value to the mean value of a (large) window + surrounding it.*/ +int SimpleAdaptiveBinarizer::qrBinarize(const unsigned char *src, unsigned char *dst) { + unsigned char *mask = dst; + + if (width > 0 && height > 0) { + unsigned *col_sums; + int logwindw; + int logwindh; + int windw; + int windh; + int y0offs; + int y1offs; + unsigned g; + int x; + int y; + /*We keep the window size fairly large to ensure it doesn't fit + completely inside the center of a finder pattern of a version 1 QR + code at full resolution.*/ + for (logwindw = 4; logwindw < 8 && (1 << logwindw) < ((width + 7) >> 3); logwindw++) + ; + for (logwindh = 4; logwindh < 8 && (1 << logwindh) < ((height + 7) >> 3); logwindh++) + ; + windw = 1 << logwindw; + windh = 1 << logwindh; + + int logwinds = (logwindw + logwindh); + + col_sums = (unsigned *)malloc(width * sizeof(*col_sums)); + /*Initialize sums down each column.*/ + for (x = 0; x < width; x++) { + g = src[x]; + col_sums[x] = (g << (logwindh - 1)) + g; + } + for (y = 1; y < (windh >> 1); y++) { + y1offs = min(y, height - 1) * width; + for (x = 0; x < width; x++) { + g = src[y1offs + x]; + col_sums[x] += g; + } + } + for (y = 0; y < height; y++) { + unsigned m; + int x0; + int x1; + /*Initialize the sum over the window.*/ + m = (col_sums[0] << (logwindw - 1)) + col_sums[0]; + for (x = 1; x < (windw >> 1); x++) { + x1 = min(x, width - 1); + m += col_sums[x1]; + } + + int offset = y * width; + + for (x = 0; x < width; x++) { + /*Perform the test against the threshold T = (m/n)-D, + where n=windw*windh and D=3.*/ + g = src[offset + x]; + mask[offset + x] = ((g + 3) << (logwinds) < m); + /*Update the window sum.*/ + if (x + 1 < width) { + x0 = max(0, x - (windw >> 1)); + x1 = min(x + (windw >> 1), width - 1); + m += col_sums[x1] - col_sums[x0]; + } + } + /*Update the column sums.*/ + if (y + 1 < height) { + y0offs = max(0, y - (windh >> 1)) * width; + y1offs = min(y + (windh >> 1), height - 1) * width; + for (x = 0; x < width; x++) { + col_sums[x] -= src[y0offs + x]; + col_sums[x] += src[y1offs + x]; + } + } + } + free(col_sums); + } + + return 1; +} + +Ref SimpleAdaptiveBinarizer::createBinarizer(Ref source) { + return Ref(new SimpleAdaptiveBinarizer(source)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/simple_adaptive_binarizer.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/simple_adaptive_binarizer.hpp new file mode 100644 index 00000000..956e87ee --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/binarizer/simple_adaptive_binarizer.hpp @@ -0,0 +1,40 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_SIMPLEADAPTIVEBINARIZER_HPP__ +#define __ZXING_COMMON_SIMPLEADAPTIVEBINARIZER_HPP__ + +#include "../../binarizer.hpp" +#include "../array.hpp" +#include "../bitarray.hpp" +#include "../bitmatrix.hpp" +#include "global_histogram_binarizer.hpp" + + +namespace zxing { + +class SimpleAdaptiveBinarizer : public GlobalHistogramBinarizer { +public: + explicit SimpleAdaptiveBinarizer(Ref source); + virtual ~SimpleAdaptiveBinarizer(); + + virtual Ref getBlackRow(int y, Ref row, ErrorHandler &err_handler) override; + virtual Ref getBlackMatrix(ErrorHandler &err_handler) override; + Ref createBinarizer(Ref source) override; + +private: + int binarizeImage0(ErrorHandler &err_handler); + int qrBinarize(const unsigned char *src, unsigned char *dst); + bool filtered; +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_SIMPLEADAPTIVEBINARIZER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitarray.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitarray.cpp new file mode 100644 index 00000000..e0cb9417 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitarray.cpp @@ -0,0 +1,233 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "bitarray.hpp" + +using zxing::ArrayRef; +using zxing::BitArray; +using zxing::ErrorHandler; +using zxing::Ref; + +BitArray::BitArray(int size_) : size(size_), bits(size_), nextSets(size_), nextUnSets(size_) {} + +void BitArray::setUnchar(int i, unsigned char newBits) { bits[i] = newBits; } + +bool BitArray::isRange(int start, int end, bool value, ErrorHandler &err_handler) { + if (end < start) { + err_handler = IllegalArgumentErrorHandler("isRange"); + return false; + } + if (start < 0 || end >= bits->size()) { + err_handler = IllegalArgumentErrorHandler("isRange"); + return false; + } + if (end == start) { + return true; // empty range matches + } + + bool startBool = bits[start] != (unsigned char)0; + + int end2 = start; + + if (startBool) { + end2 = getNextUnset(start); + } else { + end2 = getNextSet(start); + } + + if (startBool == value) { + if (end2 < end) { + return false; + } + } else { + return false; + } + + return true; +} + +void BitArray::reverse() { + bool *rowBits = getRowBoolPtr(); + bool tempBit; + + for (int i = 0; i < size / 2; i++) { + tempBit = rowBits[i]; + rowBits[i] = rowBits[size - i - 1]; + rowBits[size - i - 1] = tempBit; + } +} + +void BitArray::initAllNextSets() { + bool *rowBits = getRowBoolPtr(); + + int *nextSetArray = nextSets->data(); + int *nextUnsetArray = nextUnSets->data(); + + // Init the last one + if (rowBits[size - 1]) { + nextSetArray[size - 1] = size - 1; + nextUnsetArray[size - 1] = size; + } else { + nextUnsetArray[size - 1] = size - 1; + nextSetArray[size - 1] = size; + } + + // do inits + for (int i = size - 2; i >= 0; i--) { + if (rowBits[i]) { + nextSetArray[i] = i; + nextUnsetArray[i] = nextUnsetArray[i + 1]; + } else { + nextUnsetArray[i] = i; + nextSetArray[i] = nextSetArray[i + 1]; + } + } +} + +void BitArray::initAllNextSetsFromCounters(std::vector counters) { + bool *rowBits = getRowBoolPtr(); + bool isWhite = rowBits[0]; + int c = 0; + int offset = 0; + int count = 0; + int prevCount = 0; + int currCount = 0; + int _size = counters.size(); + + int *nextSetArray = nextSets->data(); + int *nextUnsetArray = nextUnSets->data(); + + // int* countersArray = counters.data(); + int *countersArray = &counters[0]; + + while (c < _size) { + currCount = countersArray[c]; + + count += currCount; + + if (isWhite) { + for (int i = 0; i < currCount; i++) { + offset = prevCount + i; + nextSetArray[offset] = prevCount + i; + nextUnsetArray[offset] = count; + } + } else { + for (int i = 0; i < currCount; i++) { + offset = prevCount + i; + nextSetArray[offset] = count; + nextUnsetArray[offset] = prevCount + i; + } + } + + isWhite = !isWhite; + + prevCount += currCount; + + c++; + } +} + +int BitArray::getNextSet(int from) { + if (from >= size) { + return size; + } + return nextSets[from]; +} + +int BitArray::getNextUnset(int from) { + if (from >= size) { + return size; + } + return nextUnSets[from]; +} + +BitArray::~BitArray() {} + +int BitArray::getSize() const { return size; } + +void BitArray::clear() { + int max = bits->size(); + for (int i = 0; i < max; i++) { + bits[i] = 0; + } +} + +BitArray::Reverse::Reverse(Ref array_) : array(array_) { array->reverse(); } + +BitArray::Reverse::~Reverse() { array->reverse(); } + +void BitArray::appendBit(bool value) { + ArrayRef newBits(size + 1); + for (int i = 0; i < size; i++) { + newBits[i] = bits[i]; + } + bits = newBits; + if (value) { + set(size); + } + ++size; +} + +int BitArray::getSizeInBytes() const { return size; } + +// Appends the least-significant bits, from value, in order from +// most-significant to least-significant. For example, appending 6 bits +// from 0x000001E will append the bits 0, 1, 1, 1, 1, 0 in that order. +void BitArray::appendBits(int value, int numBits, ErrorHandler &err_handler) { + if (numBits < 0 || numBits > 32) { + err_handler = IllegalArgumentErrorHandler("Number of bits must be between 0 and 32"); + return; + } + ArrayRef newBits(size + numBits); + for (int i = 0; i < size; i++) newBits[i] = bits[i]; + bits = newBits; + for (int numBitsLeft = numBits; numBitsLeft > 0; numBitsLeft--) { + if (((value >> (numBitsLeft - 1)) & 0x01) == 1) { + set(size); + } + ++size; + } + return; +} + +void BitArray::appendBitArray(const BitArray &array) { + ArrayRef newBits(size + array.getSize()); + for (int i = 0; i < size; ++i) { + newBits[i] = bits[i]; + } + bits = newBits; + for (int i = 0; i < array.getSize(); ++i) { + if (array.get(i)) { + set(size); + } + ++size; + } +} + +void BitArray::toBytes(int bitOffset, ArrayRef &array, int offset, int numBytes) { + for (int i = 0; i < numBytes; i++) { + int theByte = 0; + if (get(bitOffset)) { + theByte = 1; + } + bitOffset++; + array[offset + i] = theByte; + } +} +void BitArray::bitXOR(const BitArray &other, ErrorHandler &err_handler) { + if (size != other.size) { + err_handler = IllegalArgumentErrorHandler("Sizes don't match"); + return; + } + + for (int i = 0; i < bits->size(); i++) { + bits[i] = bits[i] == other.bits[i] ? 0 : 1; + } +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitarray.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitarray.hpp new file mode 100644 index 00000000..b6f1f560 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitarray.hpp @@ -0,0 +1,88 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_BITARRAY_HPP__ +#define __ZXING_COMMON_BITARRAY_HPP__ + +#include "../errorhandler.hpp" +#include "../zxing.hpp" +#include "array.hpp" +#include "counted.hpp" +#include + +namespace zxing { + +class BitArray : public Counted { +private: + int size; + ArrayRef bits; + ArrayRef nextSets; + ArrayRef nextUnSets; + // bool nextSetsInited; + +public: + explicit BitArray(int size); + ~BitArray(); + int getSize() const; + + bool get(int i) const { return bits[i] != 0; } + void set(int i) { + // bits[i] |= 0xFF; + bits[i] = true; + } + void setOneRow(unsigned char* rowBits, int length) { + unsigned char* dst = bits->data(); + memcpy(dst, rowBits, length); + } + + bool* getRowBoolPtr() { + // return (bool*)bits.data(); + return (bool*)bits->data(); + } + + // Init for next sets and unsets to speed up + void initAllNextSets(); + void initAllNextSetsFromCounters(std::vector counters); + + int getNextSet(int from); + int getNextUnset(int from); + + void setUnchar(int i, unsigned char newBist); + + void clear(); + bool isRange(int start, int end, bool value, ErrorHandler& err_handler); + + void reverse(); + + class Reverse { + private: + Ref array; + + public: + explicit Reverse(Ref array); + ~Reverse(); + }; + + void appendBit(bool value); + int getSizeInBytes() const; + void appendBits(int value, int numberOfBits, ErrorHandler& err_handler); + void appendBitArray(const BitArray& array); + void toBytes(int bitOffset, ArrayRef& array, int offset, int numBytes); + void bitXOR(const BitArray& other, ErrorHandler& err_handler); + +#ifndef USE_BYTE_FOR_BIT +private: + static int makeArraySize(int size); +#endif +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_BITARRAY_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitmatrix.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitmatrix.cpp new file mode 100644 index 00000000..53df192a --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitmatrix.cpp @@ -0,0 +1,397 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "bitmatrix.hpp" + +using zxing::ArrayRef; +using zxing::BitArray; +using zxing::BitMatrix; +using zxing::ErrorHandler; +using zxing::Ref; + +void BitMatrix::init(int _width, int _height, ErrorHandler& err_handler) { + if (_width < 1 || _height < 1) { + err_handler = IllegalArgumentErrorHandler("Both dimensions must be greater than 0"); + return; + } + width = _width; + height = _height; + this->rowBitsSize = width; + bits = ArrayRef(width * height); + rowOffsets = ArrayRef(height); + + // offsetRowSize = new int[height]; + rowOffsets[0] = 0; + for (int i = 1; i < height; i++) { + rowOffsets[i] = rowOffsets[i - 1] + width; + } + + isInitRowCounters = false; + isInitColsCounters = false; +} + +void BitMatrix::init(int _width, int _height, unsigned char* bitsPtr, ErrorHandler& err_handler) { + init(_width, _height, err_handler); + if (err_handler.ErrCode()) return; + memcpy(bits->data(), bitsPtr, width * height * sizeof(unsigned char)); +} + +void BitMatrix::initRowCounters() { + if (isInitRowCounters == true) { + return; + } + + row_counters = vector(width * height, 0); + row_counters_offset = vector(width * height, 0); + row_point_offset = vector(width * height, 0); + row_counter_offset_end = vector(height, 0); + + row_counters_recorded = vector(height, false); + + isInitRowCounters = true; +} +void BitMatrix::initColsCounters() { + if (isInitColsCounters == true) { + return; + } + + cols_counters = vector(width * height, 0); + cols_counters_offset = vector(width * height, 0); + cols_point_offset = vector(width * height, 0); + cols_counter_offset_end = vector(width, 0); + + cols_counters_recorded = vector(width, false); + + isInitColsCounters = true; +} + +BitMatrix::BitMatrix(int dimension, ErrorHandler& err_handler) { + init(dimension, dimension, err_handler); +} + +BitMatrix::BitMatrix(int _width, int _height, ErrorHandler& err_handler) { + init(_width, _height, err_handler); +} + +BitMatrix::BitMatrix(int _width, int _height, unsigned char* bitsPtr, ErrorHandler& err_handler) { + init(_width, _height, bitsPtr, err_handler); +} +// Copy bitMatrix +void BitMatrix::copyOf(Ref _bits, ErrorHandler& err_handler) { + int _width = _bits->getWidth(); + int _height = _bits->getHeight(); + init(_width, _height, err_handler); + + for (int y = 0; y < height; y++) { + bool* rowPtr = _bits->getRowBoolPtr(y); + setRowBool(y, rowPtr); + } +} + +void BitMatrix::xxor(Ref _bits) { + if (width != _bits->getWidth() || height != _bits->getHeight()) { + return; + } + + for (int y = 0; y < height && y < _bits->getHeight(); ++y) { + bool* rowPtrA = _bits->getRowBoolPtr(y); + bool* rowPtrB = getRowBoolPtr(y); + + for (int x = 0; x < width && x < _bits->getWidth(); ++x) { + rowPtrB[x] = rowPtrB[x] ^ rowPtrA[x]; + } + setRowBool(y, rowPtrB); + } +} + +BitMatrix::~BitMatrix() {} + +void BitMatrix::flip(int x, int y) { + bits[rowOffsets[y] + x] = (bits[rowOffsets[y] + x] == (unsigned char)0); +} + +void BitMatrix::flipAll() { + bool* matrixBits = (bool*)bits->data(); + for (int i = 0; i < bits->size(); i++) { + matrixBits[i] = !matrixBits[i]; + } +} + +void BitMatrix::flipRegion(int left, int top, int _width, int _height, ErrorHandler& err_handler) { + if (top < 0 || left < 0) { + err_handler = IllegalArgumentErrorHandler("Left and top must be nonnegative"); + return; + } + if (_height < 1 || _width < 1) { + err_handler = IllegalArgumentErrorHandler("Height and width must be at least 1"); + return; + } + int right = left + _width; + int bottom = top + _height; + if (bottom > this->height || right > this->width) { + err_handler = IllegalArgumentErrorHandler("The region must fit inside the matrix"); + return; + } + + for (int y = top; y < bottom; y++) { + for (int x = left; x < right; x++) { + bits[rowOffsets[y] + x] ^= 1; + } + } +} + +void BitMatrix::setRegion(int left, int top, int _width, int _height, ErrorHandler& err_handler) { + if (top < 0 || left < 0) { + err_handler = IllegalArgumentErrorHandler("Left and top must be nonnegative"); + return; + } + if (_height < 1 || _width < 1) { + err_handler = IllegalArgumentErrorHandler("Height and width must be at least 1"); + return; + } + int right = left + _width; + int bottom = top + _height; + if (bottom > this->height || right > this->width) { + err_handler = IllegalArgumentErrorHandler("The region must fit inside the matrix"); + return; + } + + for (int y = top; y < bottom; y++) { + for (int x = left; x < right; x++) { + bits[rowOffsets[y] + x] = true; + // bits[rowOffsets[y]+x] |= 0xFF; + } + } +} + +Ref BitMatrix::getRow(int y, Ref row) { + if (row.empty() || row->getSize() < width) { + row = new BitArray(width); + } + + // row-> + unsigned char* src = bits.data() + rowOffsets[y]; + row->setOneRow(src, width); + + return row; +} + +ArrayRef BitMatrix::getTopLeftOnBit() const { + int bitsOffset = 0; + while (bitsOffset < bits->size() && bits[bitsOffset] == 0) { + bitsOffset++; + } + if (bitsOffset == bits->size()) { + return ArrayRef(); + } + int y = bitsOffset / width; + int x = bitsOffset % width; + ArrayRef res(2); + res[0] = x; + res[1] = y; + return res; +} + +ArrayRef BitMatrix::getBottomRightOnBit() const { + int bitsOffset = bits->size() - 1; + while (bitsOffset >= 0 && bits[bitsOffset] == 0) { + bitsOffset--; + } + if (bitsOffset < 0) { + return ArrayRef(); + } + + int y = bitsOffset / width; + int x = bitsOffset % width; + ArrayRef res(2); + res[0] = x; + res[1] = y; + return res; +} + +void BitMatrix::getRowBool(int y, bool* getrow) { + int offset = rowOffsets[y]; + unsigned char* src = bits.data() + offset; + memcpy(getrow, src, rowBitsSize * sizeof(bool)); +} + +void BitMatrix::setRowBool(int y, bool* row) { + int offset = rowOffsets[y]; + unsigned char* dst = bits.data() + offset; + memcpy(dst, row, rowBitsSize * sizeof(bool)); + + return; +} + +bool* BitMatrix::getRowBoolPtr(int y) { + int offset = y * rowBitsSize; + unsigned char* src = bits.data() + offset; + return (bool*)src; +} + +void BitMatrix::clear() { + int size = bits->size(); + + unsigned char* dst = bits->data(); + memset(dst, 0, size * sizeof(unsigned char)); +} + +int BitMatrix::getWidth() const { return width; } + +int BitMatrix::getHeight() const { return height; } + +COUNTER_TYPE* BitMatrix::getRowPointInRecords(int y) { + if (!row_point_offset[y]) { + setRowRecords(y); + } + int offset = y * width; + COUNTER_TYPE* counters = &row_point_offset[0] + offset; + return (COUNTER_TYPE*)counters; +} + +COUNTER_TYPE* BitMatrix::getRowRecords(int y) { + if (!row_counters_recorded[y]) { + setRowRecords(y); + } + int offset = y * width; + COUNTER_TYPE* counters = &row_counters[0] + offset; + return (COUNTER_TYPE*)counters; +} + +COUNTER_TYPE* BitMatrix::getRowRecordsOffset(int y) { + if (!row_counters_recorded[y]) { + setRowRecords(y); + } + int offset = y * width; + COUNTER_TYPE* counters = &row_counters_offset[0] + offset; + return (COUNTER_TYPE*)counters; +} + +bool BitMatrix::getRowFirstIsWhite(int y) { + bool is_white = !get(0, y); + return is_white; +} + +bool BitMatrix::getRowLastIsWhite(int y) { + bool last_is_white = !get(width - 1, y); + return last_is_white; +} + +COUNTER_TYPE BitMatrix::getRowCounterOffsetEnd(int y) { + if (!row_counters_recorded[y]) { + setRowRecords(y); + } + return row_counter_offset_end[y]; +} + +void BitMatrix::setRowRecords(int y) { + COUNTER_TYPE* cur_row_counters = &row_counters[0] + y * width; + COUNTER_TYPE* cur_row_counters_offset = &row_counters_offset[0] + y * width; + COUNTER_TYPE* cur_row_point_in_counters = &row_point_offset[0] + y * width; + int end = width; + + bool* rowBit = getRowBoolPtr(y); + bool isWhite = !rowBit[0]; + int counterPosition = 0; + int i = 0; + cur_row_counters_offset[0] = 0; + while (i < end) { + if (rowBit[i] ^ isWhite) { // that is, exactly one is true + cur_row_counters[counterPosition]++; + } else { + counterPosition++; + if (counterPosition == end) { + break; + } else { + cur_row_counters[counterPosition] = 1; + isWhite = !isWhite; + cur_row_counters_offset[counterPosition] = i; + } + } + cur_row_point_in_counters[i] = counterPosition; + i++; + } + + // use the last row__onedReaderData->counter_size to record + // _onedReaderData->counter_size + row_counter_offset_end[y] = counterPosition < end ? (counterPosition + 1) : end; + + row_counters_recorded[y] = true; + return; +} + +COUNTER_TYPE* BitMatrix::getColsPointInRecords(int x) { + if (!cols_point_offset[x]) { + setColsRecords(x); + } + int offset = x * height; + COUNTER_TYPE* counters = &cols_point_offset[0] + offset; + return (COUNTER_TYPE*)counters; +} + +COUNTER_TYPE* BitMatrix::getColsRecords(int x) { + if (!cols_counters_recorded[x]) { + setColsRecords(x); + } + int offset = x * height; + COUNTER_TYPE* counters = &cols_counters[0] + offset; + return (COUNTER_TYPE*)counters; +} + +COUNTER_TYPE* BitMatrix::getColsRecordsOffset(int x) { + if (!cols_counters_recorded[x]) { + setColsRecords(x); + } + int offset = x * height; + COUNTER_TYPE* counters = &cols_counters_offset[0] + offset; + return (COUNTER_TYPE*)counters; +} + +COUNTER_TYPE BitMatrix::getColsCounterOffsetEnd(int x) { + if (!cols_counters_recorded[x]) { + setColsRecords(x); + } + return cols_counter_offset_end[x]; +} + +void BitMatrix::setColsRecords(int x) { + COUNTER_TYPE* cur_cols_counters = &cols_counters[0] + x * height; + COUNTER_TYPE* cur_cols_counters_offset = &cols_counters_offset[0] + x * height; + COUNTER_TYPE* cur_cols_point_in_counters = &cols_point_offset[0] + x * height; + int end = height; + + bool* rowBit = getRowBoolPtr(0); + bool isWhite = !rowBit[0]; + int counterPosition = 0; + int i = 0; + cur_cols_counters_offset[0] = 0; + while (i < end) { + if (rowBit[i] ^ isWhite) { // that is, exactly one is true + cur_cols_counters[counterPosition]++; + } else { + counterPosition++; + if (counterPosition == end) { + break; + } else { + cur_cols_counters[counterPosition] = 1; + isWhite = !isWhite; + cur_cols_counters_offset[counterPosition] = i; + } + } + cur_cols_point_in_counters[i] = counterPosition; + i++; + rowBit += width; + } + + cols_counter_offset_end[x] = counterPosition < end ? (counterPosition + 1) : end; + + cols_counters_recorded[x] = true; + return; +}; diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitmatrix.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitmatrix.hpp new file mode 100644 index 00000000..3cf7ec3c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitmatrix.hpp @@ -0,0 +1,115 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_BITMATRIX_HPP__ +#define __ZXING_COMMON_BITMATRIX_HPP__ + +#include "../errorhandler.hpp" +#include "array.hpp" +#include "bitarray.hpp" +#include "counted.hpp" +using namespace std; + +namespace zxing { + +class BitMatrix : public Counted { +public: + static const int bitsPerWord = std::numeric_limits::digits; + +private: + int width; + int height; + int rowBitsSize; + + vector row_counters; + vector row_counters_offset; + vector row_counters_recorded; + vector row_counter_offset_end; + vector row_point_offset; + + vector cols_counters; + vector cols_counters_offset; + vector cols_counters_recorded; + vector cols_counter_offset_end; + vector cols_point_offset; + + ArrayRef bits; + ArrayRef rowOffsets; + +public: + BitMatrix(int _width, int _height, unsigned char* bitsPtr, ErrorHandler& err_handler); + BitMatrix(int dimension, ErrorHandler& err_handler); + BitMatrix(int _width, int _height, ErrorHandler& err_handler); + + void copyOf(Ref _bits, ErrorHandler& err_handler); + void xxor(Ref _bits); + + ~BitMatrix(); + + unsigned char get(int x, int y) const { return bits[width * y + x]; } + + void set(int x, int y) { bits[rowOffsets[y] + x] = (unsigned char)1; } + + void set(int x, int y, unsigned char value) { bits[rowOffsets[y] + x] = value; } + + void swap(int srcX, int srcY, int dstX, int dstY) { + auto temp = bits[width * srcY + srcX]; + bits[width * srcY + srcX] = bits[width * dstY + dstX]; + bits[width * dstY + dstX] = temp; + } + + void getRowBool(int y, bool* row); + bool* getRowBoolPtr(int y); + void setRowBool(int y, bool* row); + int getRowBitsSize() { return rowBitsSize; } + unsigned char* getPtr() { return bits->data(); } + + void flip(int x, int y); + void flipAll(); + void clear(); + void setRegion(int left, int top, int _width, int _height, ErrorHandler& err_handler); + void flipRegion(int left, int top, int _width, int _height, ErrorHandler& err_handler); + Ref getRow(int y, Ref row); + + int getWidth() const; + int getHeight() const; + + ArrayRef getTopLeftOnBit() const; + ArrayRef getBottomRightOnBit() const; + + bool isInitRowCounters; + void initRowCounters(); + COUNTER_TYPE* getRowRecords(int y); + COUNTER_TYPE* getRowRecordsOffset(int y); + bool getRowFirstIsWhite(int y); + COUNTER_TYPE getRowCounterOffsetEnd(int y); + bool getRowLastIsWhite(int y); + COUNTER_TYPE* getRowPointInRecords(int y); + + bool isInitColsCounters; + void initColsCounters(); + COUNTER_TYPE* getColsRecords(int x); + COUNTER_TYPE* getColsRecordsOffset(int x); + COUNTER_TYPE* getColsPointInRecords(int x); + COUNTER_TYPE getColsCounterOffsetEnd(int x); + +private: + inline void init(int, int, ErrorHandler& err_handler); + inline void init(int _width, int _height, unsigned char* bitsPtr, ErrorHandler& err_handler); + + void setRowRecords(int y); + void setColsRecords(int x); + + BitMatrix(const BitMatrix&, ErrorHandler& err_handler); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_BITMATRIX_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitsource.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitsource.cpp new file mode 100644 index 00000000..52187e1d --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitsource.cpp @@ -0,0 +1,62 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "bitsource.hpp" +#include + +namespace zxing { + +int BitSource::readBits(int numBits, ErrorHandler& err_handler) { + if (numBits < 0 || numBits > 32 || numBits > available()) { + std::ostringstream oss; + oss << numBits; + err_handler = IllegalArgumentErrorHandler(oss.str().c_str()); + return -1; + } + + int result = 0; + + // First, read remainder from current byte + if (bitOffset_ > 0) { + int bitsLeft = 8 - bitOffset_; + int toRead = numBits < bitsLeft ? numBits : bitsLeft; + int bitsToNotRead = bitsLeft - toRead; + int mask = (0xFF >> (8 - toRead)) << bitsToNotRead; + result = (bytes_[byteOffset_] & mask) >> bitsToNotRead; + numBits -= toRead; + bitOffset_ += toRead; + if (bitOffset_ == 8) { + bitOffset_ = 0; + byteOffset_++; + } + } + + // Next read whole bytes + if (numBits > 0) { + while (numBits >= 8) { + result = (result << 8) | (bytes_[byteOffset_] & 0xFF); + byteOffset_++; + numBits -= 8; + } + + // Finally read a partial byte + if (numBits > 0) { + int bitsToNotRead = 8 - numBits; + int mask = (0xFF >> bitsToNotRead) << bitsToNotRead; + result = (result << numBits) | ((bytes_[byteOffset_] & mask) >> bitsToNotRead); + bitOffset_ += numBits; + } + } + + return result; +} + +int BitSource::available() { return 8 * (bytes_->size() - byteOffset_) - bitOffset_; } +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitsource.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitsource.hpp new file mode 100644 index 00000000..797c2b5c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bitsource.hpp @@ -0,0 +1,57 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_BITSOURCE_HPP__ +#define __ZXING_COMMON_BITSOURCE_HPP__ + +#include "../errorhandler.hpp" +#include "array.hpp" + +namespace zxing { +/** + *

This provides an easy abstraction to read bits at a time from a sequence + * of bytes, where the number of bits read is not often a multiple of 8.

+ * + *

This class is not thread-safe.

+ * + * @author srowen@google.com (Sean Owen) + * @author christian.brunschen@gmail.com (Christian Brunschen) + */ +class BitSource : public Counted { + typedef char byte; + +private: + ArrayRef bytes_; + int byteOffset_; + int bitOffset_; + +public: + /** + * @param bytes bytes from which this will read bits. Bits will be read from + * the first byte first. Bits are read within a byte from most-significant + * to least-significant bit. + */ + explicit BitSource(ArrayRef &bytes) : bytes_(bytes), byteOffset_(0), bitOffset_(0) {} + + int getBitOffset() { return bitOffset_; } + + int getByteOffset() { return byteOffset_; } + + int readBits(int numBits, ErrorHandler &err_handler); + + /** + * @return number of bits that can be read successfully + */ + int available(); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_BITSOURCE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bytematrix.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bytematrix.cpp new file mode 100644 index 00000000..61cf2517 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bytematrix.cpp @@ -0,0 +1,50 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../../precomp.hpp" +#include "bytematrix.hpp" + +using zxing::ArrayRef; +using zxing::ByteMatrix; +using zxing::ErrorHandler; +using zxing::Ref; + +void ByteMatrix::init(int _width, int _height) { + if (_width < 1 || _height < 1) { + return; + } + this->width = _width; + this->height = _height; + bytes = new unsigned char[width * height]; + row_offsets = new int[height]; + row_offsets[0] = 0; + for (int i = 1; i < height; i++) { + row_offsets[i] = row_offsets[i - 1] + width; + } +} + +ByteMatrix::ByteMatrix(int dimension) { init(dimension, dimension); } + +ByteMatrix::ByteMatrix(int _width, int _height) { init(_width, _height); } + +ByteMatrix::ByteMatrix(int _width, int _height, ArrayRef source) { + init(_width, _height); + int size = _width * _height; + memcpy(&bytes[0], &source[0], size); +} + +ByteMatrix::~ByteMatrix() { + if (bytes) delete[] bytes; + if (row_offsets) delete[] row_offsets; +} + +unsigned char* ByteMatrix::getByteRow(int y, ErrorHandler& err_handler) { + if (y < 0 || y >= getHeight()) { + err_handler = IllegalArgumentErrorHandler("Requested row is outside the image."); + return NULL; + } + return &bytes[row_offsets[y]]; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bytematrix.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bytematrix.hpp new file mode 100644 index 00000000..c3ee35ea --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/bytematrix.hpp @@ -0,0 +1,58 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __ZXING_COMMON_BYTEMATRIX_HPP__ +#define __ZXING_COMMON_BYTEMATRIX_HPP__ + +#include "../errorhandler.hpp" +#include "array.hpp" +#include "bitarray.hpp" +#include "counted.hpp" + +namespace zxing { + +class ByteMatrix : public Counted { +public: + explicit ByteMatrix(int dimension); + ByteMatrix(int _width, int _height); + ByteMatrix(int _width, int _height, ArrayRef source); + ~ByteMatrix(); + + char get(int x, int y) const { + int offset = row_offsets[y] + x; + return bytes[offset]; + } + + void set(int x, int y, char char_value) { + int offset = row_offsets[y] + x; + bytes[offset] = char_value & 0XFF; + } + + unsigned char* getByteRow(int y, ErrorHandler& err_handler); + + int getWidth() const { return width; } + int getHeight() const { return height; } + + unsigned char* bytes; + +private: + int width; + int height; + + // ArrayRef bytes; + // ArrayRef row_offsets; + int* row_offsets; + +private: + inline void init(int, int); + ByteMatrix(const ByteMatrix&); + ByteMatrix& operator=(const ByteMatrix&); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_BYTEMATRIX_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/characterseteci.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/characterseteci.cpp new file mode 100644 index 00000000..acef2044 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/characterseteci.cpp @@ -0,0 +1,111 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "characterseteci.hpp" +using zxing::common::CharacterSetECI; + +// Fix memory leak +// https://github.com/ukeller/zxing-cpp/commit/c632ffe47ca7342f894ae533263be249cbdfd37e +// std::map CharacterSetECI::VALUE_TO_ECI; +// std::map CharacterSetECI::NAME_TO_ECI; +std::map > CharacterSetECI::VALUE_TO_ECI; +std::map > CharacterSetECI::NAME_TO_ECI; + +const bool CharacterSetECI::inited = CharacterSetECI::init_tables(); + +#define ADD_CHARACTER_SET(VALUES, STRINGS) \ + { \ + static int values[] = {VALUES, -1}; \ + static char const* strings[] = {STRINGS, 0}; \ + addCharacterSet(values, strings); \ + } + +#define XC , + +bool CharacterSetECI::init_tables() { + ADD_CHARACTER_SET(0 XC 2, "Cp437"); + ADD_CHARACTER_SET(1 XC 3, "ISO8859_1" XC "ISO-8859-1"); + ADD_CHARACTER_SET(4, "ISO8859_2" XC "ISO-8859-2"); + ADD_CHARACTER_SET(5, "ISO8859_3" XC "ISO-8859-3"); + ADD_CHARACTER_SET(6, "ISO8859_4" XC "ISO-8859-4"); + ADD_CHARACTER_SET(7, "ISO8859_5" XC "ISO-8859-5"); + ADD_CHARACTER_SET(8, "ISO8859_6" XC "ISO-8859-6"); + ADD_CHARACTER_SET(9, "ISO8859_7" XC "ISO-8859-7"); + ADD_CHARACTER_SET(10, "ISO8859_8" XC "ISO-8859-8"); + ADD_CHARACTER_SET(11, "ISO8859_9" XC "ISO-8859-9"); + ADD_CHARACTER_SET(12, "ISO8859_10" XC "ISO-8859-10"); + ADD_CHARACTER_SET(13, "ISO8859_11" XC "ISO-8859-11"); + ADD_CHARACTER_SET(15, "ISO8859_13" XC "ISO-8859-13"); + ADD_CHARACTER_SET(16, "ISO8859_14" XC "ISO-8859-14"); + ADD_CHARACTER_SET(17, "ISO8859_15" XC "ISO-8859-15"); + ADD_CHARACTER_SET(18, "ISO8859_16" XC "ISO-8859-16"); + ADD_CHARACTER_SET(20, "SJIS" XC "Shift_JIS"); + ADD_CHARACTER_SET(21, "Cp1250" XC "windows-1250"); + ADD_CHARACTER_SET(22, "Cp1251" XC "windows-1251"); + ADD_CHARACTER_SET(23, "Cp1252" XC "windows-1252"); + ADD_CHARACTER_SET(24, "Cp1256" XC "windows-1256"); + ADD_CHARACTER_SET(25, "UnicodeBigUnmarked" XC "UTF-16BE" XC "UnicodeBig"); + ADD_CHARACTER_SET(26, "UTF8" XC "UTF-8"); + ADD_CHARACTER_SET(27 XC 170, "ASCII" XC "US-ASCII"); + ADD_CHARACTER_SET(28, "Big5"); + ADD_CHARACTER_SET(29, "GB18030" XC "GB2312" XC "EUC_CN" XC "GBK"); + ADD_CHARACTER_SET(30, "EUC_KR" XC "EUC-KR"); + return true; +} + +#undef XC + +CharacterSetECI::CharacterSetECI(int const* values, char const* const* names) + : values_(values), names_(names) { + zxing::Ref this_ref(this); + + for (int const* p_values = values_; *p_values != -1; p_values++) { + // VALUE_TO_ECI[*values] = this; + VALUE_TO_ECI[*p_values] = this_ref; + } + for (char const* const* p_names = names_; *p_names; p_names++) { + // NAME_TO_ECI[string(*names)] = this; + NAME_TO_ECI[string(*p_names)] = this_ref; + } +} + +char const* CharacterSetECI::name() const { return names_[0]; } + +int CharacterSetECI::getValue() const { return values_[0]; } + +void CharacterSetECI::addCharacterSet(int const* values, char const* const* names) { + new CharacterSetECI(values, names); +} + +CharacterSetECI* CharacterSetECI::getCharacterSetECIByValueFind(int value) { + if (value < 0 || value >= 900) { + return zxing::Ref(0); + } + + std::map >::iterator iter; + iter = VALUE_TO_ECI.find(value); + + if (iter != VALUE_TO_ECI.end()) { + return iter->second; + } else { + return zxing::Ref(0); + } +} + +CharacterSetECI* CharacterSetECI::getCharacterSetECIByName(string const& name) { + std::map >::iterator iter; + iter = NAME_TO_ECI.find(name); + + if (iter != NAME_TO_ECI.end()) { + return iter->second; + } else { + return zxing::Ref(0); + } +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/characterseteci.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/characterseteci.hpp new file mode 100644 index 00000000..9b44f14d --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/characterseteci.hpp @@ -0,0 +1,46 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_CHARACTERSETECI_HPP__ +#define __ZXING_COMMON_CHARACTERSETECI_HPP__ + +#include +#include "../decodehints.hpp" +#include "counted.hpp" + +namespace zxing { +namespace common { + +class CharacterSetECI : public Counted { +private: + static std::map > VALUE_TO_ECI; + static std::map > NAME_TO_ECI; + static const bool inited; + static bool init_tables(); + + int const* const values_; + char const* const* const names_; + + CharacterSetECI(int const* values, char const* const* names); + + static void addCharacterSet(int const* value, char const* const* encodingNames); + +public: + char const* name() const; + int getValue() const; + + static CharacterSetECI* getCharacterSetECIByValueFind(int value); + static CharacterSetECI* getCharacterSetECIByName(std::string const& name); +}; + +} // namespace common +} // namespace zxing + +#endif // __ZXING_COMMON_CHARACTERSETECI_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/counted.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/counted.hpp new file mode 100644 index 00000000..d40d62a3 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/counted.hpp @@ -0,0 +1,110 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_COUNTED_HPP__ +#define __ZXING_COMMON_COUNTED_HPP__ + +#include +#include +namespace zxing { + +/* base class for reference-counted objects */ +class Counted { +private: + unsigned int count_; + +public: + Counted() : count_(0) {} + virtual ~Counted() {} + Counted* retain() { + count_++; + return this; + } + void release() { + count_--; + if (count_ == 0) { + count_ = 0xDEADF001; + delete this; + } + } + + /* return the current count for denugging purposes or similar */ + int count() const { return count_; } +}; + +/* counting reference to reference-counted objects */ +template +class Ref { +private: +public: + T* object_; + explicit Ref(T* o = 0) : object_(0) { reset(o); } + Ref(const Ref& other) : object_(0) { reset(other.object_); } + + template + Ref(const Ref& other) : object_(0) { + reset(other.object_); + } + + ~Ref() { + if (object_) { + object_->release(); + } + } + + void reset(T* o) { + if (o) { + o->retain(); + } + if (object_ != 0) { + object_->release(); + } + object_ = o; + } + Ref& operator=(const Ref& other) { + reset(other.object_); + return *this; + } + template + Ref& operator=(const Ref& other) { + reset(other.object_); + return *this; + } + Ref& operator=(T* o) { + reset(o); + return *this; + } + template + Ref& operator=(Y* o) { + reset(o); + return *this; + } + + T& operator*() { return *object_; } + T* operator->() const { return object_; } + operator T*() const { return object_; } + + bool operator==(const T* that) { return object_ == that; } + bool operator==(const Ref& other) const { + return object_ == other.object_ || *object_ == *(other.object_); + } + template + bool operator==(const Ref& other) const { + return object_ == other.object_ || *object_ == *(other.object_); + } + + bool operator!=(const T* that) { return !(*this == that); } + + bool empty() const { return object_ == 0; } +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_COUNTED_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/decoder_result.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/decoder_result.cpp new file mode 100644 index 00000000..3de6656f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/decoder_result.cpp @@ -0,0 +1,65 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "decoder_result.hpp" + +using zxing::DecoderResult; +using zxing::Ref; +using zxing::ArrayRef; +using zxing::String; +DecoderResult::DecoderResult(ArrayRef rawBytes, Ref text, + ArrayRef >& byteSegments, string const& ecLevel) + : rawBytes_(rawBytes), text_(text), byteSegments_(byteSegments), ecLevel_(ecLevel) { + outputCharset_ = "UTF-8"; + otherClassName = ""; + qrcodeVersion_ = -1; +} + +DecoderResult::DecoderResult(ArrayRef rawBytes, Ref text, + ArrayRef >& byteSegments, string const& ecLevel, + string outputCharset) + : rawBytes_(rawBytes), + text_(text), + byteSegments_(byteSegments), + ecLevel_(ecLevel), + outputCharset_(outputCharset) { + otherClassName = ""; + qrcodeVersion_ = -1; +} + +DecoderResult::DecoderResult(ArrayRef rawBytes, Ref text, + ArrayRef >& byteSegments, string const& ecLevel, + string outputCharset, int qrcodeVersion, string& charsetMode) + : rawBytes_(rawBytes), + text_(text), + byteSegments_(byteSegments), + ecLevel_(ecLevel), + outputCharset_(outputCharset), + qrcodeVersion_(qrcodeVersion), + charsetMode_(charsetMode) { + otherClassName = ""; +} + +DecoderResult::DecoderResult(ArrayRef rawBytes, Ref text) + : rawBytes_(rawBytes), text_(text) { + outputCharset_ = "UTF-8"; + otherClassName = ""; +} + +DecoderResult::DecoderResult(ArrayRef rawBytes, Ref text, std::string outputCharset) + : rawBytes_(rawBytes), text_(text), outputCharset_(outputCharset) { + otherClassName = ""; +} + +ArrayRef DecoderResult::getRawBytes() { return rawBytes_; } + +Ref DecoderResult::getText() { return text_; } + +string DecoderResult::getCharset() { return outputCharset_; } diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/decoder_result.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/decoder_result.hpp new file mode 100644 index 00000000..7ffd002c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/decoder_result.hpp @@ -0,0 +1,77 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_DECODER_RESULT_HPP__ +#define __ZXING_COMMON_DECODER_RESULT_HPP__ + +#include "../qrcode/decoder/qrcode_decoder_metadata.hpp" +#include "array.hpp" +#include "counted.hpp" +#include "str.hpp" + +namespace zxing { + +class DecoderResult : public Counted { +private: + ArrayRef rawBytes_; + Ref text_; + ArrayRef > byteSegments_; + std::string ecLevel_; + std::string outputCharset_; + int qrcodeVersion_; + std::string charsetMode_; + + Ref other_; + string otherClassName; + +public: + DecoderResult(ArrayRef rawBytes, Ref text, + ArrayRef >& byteSegments, std::string const& ecLevel); + + DecoderResult(ArrayRef rawBytes, Ref text, + ArrayRef >& byteSegments, std::string const& ecLevel, + std::string outputCharset); + + DecoderResult(ArrayRef rawBytes, Ref text, + ArrayRef >& byteSegments, std::string const& ecLevel, + std::string outputCharset, int qrcodeVersion, std::string& charsetMode); + + DecoderResult(ArrayRef rawBytes, Ref text); + + DecoderResult(ArrayRef rawBytes, Ref text, std::string outputCharset); + + ArrayRef getRawBytes(); + Ref getText(); + std::string getCharset(); + + void setOther(Ref other) { + other_ = other; + otherClassName = "QRCodeDecoderMetaData"; + }; + + Ref getOther() { + // className = otherClassName; + return other_; + }; + + string getOtherClassName() { return otherClassName; }; + + int getQRCodeVersion() const { return qrcodeVersion_; }; + + void setText(Ref text) { text_ = text; }; + + string getEcLevel() { return ecLevel_; } + + string getCharsetMode() { return charsetMode_; } +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_DECODER_RESULT_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/detector_result.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/detector_result.cpp new file mode 100644 index 00000000..2e50a5dc --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/detector_result.cpp @@ -0,0 +1,27 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "detector_result.hpp" + +namespace zxing { + +DetectorResult::DetectorResult(Ref bits, ArrayRef > points, + int dimension, float modulesize) + : bits_(bits), points_(points), dimension_(dimension), modulesize_(modulesize) {} + +void DetectorResult::SetGray(Ref gray) { gray_ = gray; } + +Ref DetectorResult::getBits() { return bits_; } + +Ref DetectorResult::getGray() { return gray_; } + +ArrayRef > DetectorResult::getPoints() { return points_; } + +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/detector_result.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/detector_result.hpp new file mode 100644 index 00000000..c48e9efe --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/detector_result.hpp @@ -0,0 +1,42 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_DETECTOR_RESULT_HPP__ +#define __ZXING_COMMON_DETECTOR_RESULT_HPP__ + +#include "../resultpoint.hpp" +#include "array.hpp" +#include "bitmatrix.hpp" +#include "bytematrix.hpp" +#include "counted.hpp" + +namespace zxing { + +class DetectorResult : public Counted { +private: + Ref bits_; + Ref gray_; + ArrayRef > points_; + +public: + DetectorResult(Ref bits, ArrayRef > points, int dimension = 0, + float modulesize = 0); + DetectorResult(Ref gray, ArrayRef > points, int dimension = 0, + float modulesize = 0); + Ref getBits(); + Ref getGray(); + void SetGray(Ref gray); + ArrayRef > getPoints(); + int dimension_; + float modulesize_; +}; +} // namespace zxing + +#endif // __ZXING_COMMON_DETECTOR_RESULT_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_luminance_source.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_luminance_source.cpp new file mode 100644 index 00000000..cf35ccbc --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_luminance_source.cpp @@ -0,0 +1,77 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "greyscale_luminance_source.hpp" +#include "bytematrix.hpp" +#include "greyscale_rotated_luminance_source.hpp" +using zxing::ArrayRef; +using zxing::ByteMatrix; +using zxing::ErrorHandler; +using zxing::GreyscaleLuminanceSource; +using zxing::LuminanceSource; +using zxing::Ref; + +GreyscaleLuminanceSource::GreyscaleLuminanceSource(ArrayRef greyData, int dataWidth, + int dataHeight, int left, int top, int width, + int height, ErrorHandler& err_handler) + : Super(width, height), + greyData_(greyData), + dataWidth_(dataWidth), + dataHeight_(dataHeight), + left_(left), + top_(top) { + if (left + width > dataWidth || top + height > dataHeight || top < 0 || left < 0) { + err_handler = IllegalArgumentErrorHandler("Crop rectangle does not fit within image data."); + } +} + +ArrayRef GreyscaleLuminanceSource::getRow(int y, ArrayRef row, + ErrorHandler& err_handler) const { + if (y < 0 || y >= this->getHeight()) { + err_handler = IllegalArgumentErrorHandler("Requested row is outside the image."); + return ArrayRef(); + } + int width = getWidth(); + if (!row || row->size() < width) { + ArrayRef temp(width); + row = temp; + } + int offset = (y + top_) * dataWidth_ + left_; + memcpy(&row[0], &greyData_[offset], width); + return row; +} + +ArrayRef GreyscaleLuminanceSource::getMatrix() const { + int size = getWidth() * getHeight(); + ArrayRef result(size); + if (left_ == 0 && top_ == 0 && dataWidth_ == getWidth() && dataHeight_ == getHeight()) { + memcpy(&result[0], &greyData_[0], size); + } else { + for (int row = 0; row < getHeight(); row++) { + memcpy(&result[row * getWidth()], &greyData_[(top_ + row) * dataWidth_ + left_], + getWidth()); + } + } + return result; +} + +Ref GreyscaleLuminanceSource::rotateCounterClockwise( + ErrorHandler& err_handler) const { + // Intentionally flip the left, top, width, and height arguments as + // needed. dataWidth and dataHeight are always kept unrotated. + Ref result(new GreyscaleRotatedLuminanceSource( + greyData_, dataWidth_, dataHeight_, top_, left_, getHeight(), getWidth(), err_handler)); + if (err_handler.ErrCode()) return Ref(); + return result; +} + +Ref GreyscaleLuminanceSource::getByteMatrix() const { + return Ref(new ByteMatrix(getWidth(), getHeight(), getMatrix())); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_luminance_source.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_luminance_source.hpp new file mode 100644 index 00000000..438b14e3 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_luminance_source.hpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_GREYSCALE_LUMINANCE_SOURCE_HPP__ +#define __ZXING_COMMON_GREYSCALE_LUMINANCE_SOURCE_HPP__ + +#include "../errorhandler.hpp" +#include "../luminance_source.hpp" +#include "bytematrix.hpp" + +namespace zxing { + +class GreyscaleLuminanceSource : public LuminanceSource { +private: + typedef LuminanceSource Super; + ArrayRef greyData_; + const int dataWidth_; + const int dataHeight_; + const int left_; + const int top_; + +public: + GreyscaleLuminanceSource(ArrayRef greyData, int dataWidth, int dataHeight, int left, + int top, int width, int height, ErrorHandler& err_handler); + + ArrayRef getRow(int y, ArrayRef row, ErrorHandler& err_handler) const override; + ArrayRef getMatrix() const override; + Ref getByteMatrix() const override; + + bool isRotateSupported() const override { return true; } + + Ref rotateCounterClockwise(ErrorHandler& err_handler) const override; +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_GREYSCALE_LUMINANCE_SOURCE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_rotated_luminance_source.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_rotated_luminance_source.cpp new file mode 100644 index 00000000..4f9e6261 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_rotated_luminance_source.cpp @@ -0,0 +1,67 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "greyscale_rotated_luminance_source.hpp" +#include "bytematrix.hpp" +using zxing::ArrayRef; +using zxing::ByteMatrix; +using zxing::ErrorHandler; +using zxing::GreyscaleRotatedLuminanceSource; +using zxing::Ref; + +// Note that dataWidth and dataHeight are not reversed, as we need to +// be able to traverse the greyData correctly, which does not get +// rotated. +GreyscaleRotatedLuminanceSource::GreyscaleRotatedLuminanceSource(ArrayRef greyData, + int dataWidth, int dataHeight, + int left, int top, int _width, + int _height, + ErrorHandler& err_handler) + : Super(_width, _height), greyData_(greyData), dataWidth_(dataWidth), left_(left), top_(top) { + // Intentionally comparing to the opposite dimension since we're rotated. + if (left + _width > dataHeight || top + _height > dataWidth) { + err_handler = IllegalArgumentErrorHandler("Crop rectangle does not fit within image data."); + } +} + +// The API asks for rows, but we're rotated, so we return columns. +ArrayRef GreyscaleRotatedLuminanceSource::getRow(int y, ArrayRef row, + ErrorHandler& err_handler) const { + if (y < 0 || y >= getHeight()) { + err_handler = IllegalArgumentErrorHandler("Requested row is outside the image."); + return ArrayRef(); + } + if (!row || row->size() < getWidth()) { + row = ArrayRef(getWidth()); + } + int offset = (left_ * dataWidth_) + (dataWidth_ - 1 - (y + top_)); + for (int x = 0; x < getWidth(); x++) { + row[x] = greyData_[offset]; + offset += dataWidth_; + } + return row; +} + +ArrayRef GreyscaleRotatedLuminanceSource::getMatrix() const { + ArrayRef result(getWidth() * getHeight()); + for (int y = 0; y < getHeight(); y++) { + char* row = &result[y * getWidth()]; + int offset = (left_ * dataWidth_) + (dataWidth_ - 1 - (y + top_)); + for (int x = 0; x < getWidth(); x++) { + row[x] = greyData_[offset]; + offset += dataWidth_; + } + } + return result; +} + +Ref GreyscaleRotatedLuminanceSource::getByteMatrix() const { + return Ref(new ByteMatrix(getWidth(), getHeight(), getMatrix())); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_rotated_luminance_source.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_rotated_luminance_source.hpp new file mode 100644 index 00000000..2e1c8f67 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/greyscale_rotated_luminance_source.hpp @@ -0,0 +1,39 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_GREYSCALE_ROTATED_LUMINANCE_SOURCE_HPP__ +#define __ZXING_COMMON_GREYSCALE_ROTATED_LUMINANCE_SOURCE_HPP__ + +#include "../errorhandler.hpp" +#include "../luminance_source.hpp" +#include "bytematrix.hpp" +namespace zxing { + +class GreyscaleRotatedLuminanceSource : public LuminanceSource { +private: + typedef LuminanceSource Super; + ArrayRef greyData_; + const int dataWidth_; + const int left_; + const int top_; + +public: + GreyscaleRotatedLuminanceSource(ArrayRef greyData, int dataWidth, int dataHeight, + int left, int top, int _width, int _height, + ErrorHandler& err_handler); + + ArrayRef getRow(int y, ArrayRef row, ErrorHandler& err_handler) const override; + ArrayRef getMatrix() const override; + Ref getByteMatrix() const override; +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_GREYSCALE_ROTATED_LUMINANCE_SOURCE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/grid_sampler.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/grid_sampler.cpp new file mode 100644 index 00000000..52564f2f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/grid_sampler.cpp @@ -0,0 +1,119 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "grid_sampler.hpp" +#include "perspective_transform.hpp" +#include + +namespace zxing { + +GridSampler GridSampler::gridSampler; + +GridSampler::GridSampler() {} + +// Samples an image for a rectangular matrix of bits of the given dimension. +Ref GridSampler::sampleGrid(Ref image, int dimension, + Ref transform, + ErrorHandler &err_handler) { + Ref bits(new BitMatrix(dimension, err_handler)); + if (err_handler.ErrCode()) return Ref(); + + vector points(dimension << 1, 0.0f); + + int outlier = 0; + int maxOutlier = dimension * dimension * 3 / 10 - 1; + + for (int y = 0; y < dimension; y++) { + int max = points.size(); + float yValue = (float)y + 0.5f; + for (int x = 0; x < max; x += 2) { + points[x] = (float)(x >> 1) + 0.5f; + points[x + 1] = yValue; + } + transform->transformPoints(points); + // Quick check to see if points transformed to something inside the + // image; sufficient to check the endpoings + outlier += checkAndNudgePoints(image->getWidth(), image->getHeight(), points, err_handler); + if (err_handler.ErrCode()) return Ref(); + + if (outlier >= maxOutlier) { + ostringstream s; + s << "Over 30% points out of bounds."; + err_handler = ReaderErrorHandler(s.str().c_str()); + return Ref(); + } + + for (int x = 0; x < max; x += 2) { + if (image->get((int)points[x], (int)points[x + 1])) { + // Black (-ish) pixel + bits->set(x >> 1, y); + } + } + } + return bits; +} + +int GridSampler::checkAndNudgePoints(int width, int height, vector &points, + ErrorHandler &err_handler) { + // Modified to support stlport + float *pts = NULL; + + if (points.size() > 0) { + pts = &points[0]; + } else { + err_handler = ReaderErrorHandler("checkAndNudgePoints:: no points!"); + return -1; + } + + int size = (int)points.size() / 2; + + // The Java code assumes that if the start and end points are in bounds, the + // rest will also be. However, in some unusual cases points in the middle + // may also be out of bounds. Since we can't rely on an + // ArrayIndexOutOfBoundsException like Java, we check every point. + + int outCount = 0; + // int maxError = (int)(size/2/3 - 1); + + float maxborder = width / size * 3; + + for (size_t offset = 0; offset < points.size(); offset += 2) { + int x = (int)pts[offset]; + int y = (int)pts[offset + 1]; + // if((int)offset==0) + // cout<<"checkAndNudgePoints "<<(int)offset<<": ("< width || y < -1 || y > height) { + outCount++; + if (x > width + maxborder || y > height + maxborder || x < -maxborder || + y < -maxborder) { + err_handler = ReaderErrorHandler("checkAndNudgePoints::Out of bounds!"); + return -1; + } + } + + if (x <= -1) { + points[offset] = 0.0f; + } else if (x >= width) { + points[offset] = float(width - 1); + } + if (y <= -1) { + points[offset + 1] = 0.0f; + } else if (y >= height) { + points[offset + 1] = float(height - 1); + } + } + + return outCount; +} + +GridSampler &GridSampler::getInstance() { return gridSampler; } +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/grid_sampler.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/grid_sampler.hpp new file mode 100644 index 00000000..c697d1c0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/grid_sampler.hpp @@ -0,0 +1,34 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_GRID_SAMPLER_HPP__ +#define __ZXING_COMMON_GRID_SAMPLER_HPP__ + +#include "bitmatrix.hpp" +#include "bytematrix.hpp" +#include "counted.hpp" +#include "perspective_transform.hpp" + +namespace zxing { +class GridSampler { +private: + static GridSampler gridSampler; + GridSampler(); + +public: + Ref sampleGrid(Ref image, int dimension, + Ref transform, ErrorHandler &err_handler); + static int checkAndNudgePoints(int width, int height, vector &points, + ErrorHandler &err_handler); + static GridSampler &getInstance(); +}; +} // namespace zxing + +#endif // __ZXING_COMMON_GRID_SAMPLER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/imagecut.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/imagecut.cpp new file mode 100644 index 00000000..86fdf7f3 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/imagecut.cpp @@ -0,0 +1,66 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../../precomp.hpp" +#include "imagecut.hpp" + + +namespace zxing { + +ImageCut::ImageCut() {} + +ImageCut::~ImageCut() {} + +int ImageCut::Cut(uint8_t* poImageData, int iWidth, int iHeight, int iTopLeftX, int iTopLeftY, + int iBottomRightX, int iBottomRightY, ImageCutResult& result) { + if (iTopLeftX < 0 || iTopLeftX > iBottomRightX || iBottomRightX >= iWidth) return -1; + if (iTopLeftY < 0 || iTopLeftY > iBottomRightY || iBottomRightY >= iHeight) return -1; + int iNewWidth = iBottomRightX - iTopLeftX + 1; + int iNewHeight = iBottomRightY - iTopLeftY + 1; + + result.arrImage = new Array(iNewWidth * iNewHeight); + result.iHeight = iNewHeight; + result.iWidth = iNewWidth; + + int idx = 0; + for (int y = 0; y < iHeight; ++y) { + if (y < iTopLeftY || y > iBottomRightY) continue; + for (int x = 0; x < iWidth; ++x) { + if (x < iTopLeftX || x > iBottomRightX) continue; + result.arrImage[idx++] = poImageData[y * iWidth + x]; + } + } + return 0; +} + +int ImageCut::Cut(Ref matrix, float fRatio, ImageCutResult& result) { + int iWidth = matrix->getWidth(); + int iHeight = matrix->getHeight(); + + int iMinX = iWidth * (1 - fRatio) / 2; + int iMinY = iHeight * (1 - fRatio) / 2; + int iMaxX = iWidth * (1 + fRatio) / 2 - 1; + int iMaxY = iHeight * (1 + fRatio) / 2 - 1; + + if (iMinY < 0 || iMinY > iMaxX || iMaxX >= iWidth) return -1; + if (iMinX < 0 || iMinX > iMaxX || iMaxX >= iWidth) return -1; + int iNewHeight = iMaxY - iMinY + 1; + int iNewWidth = iMaxX - iMinX + 1; + + result.arrImage = new Array(iNewWidth * iNewHeight); + result.iWidth = iNewWidth; + result.iHeight = iNewHeight; + + int idx = 0; + for (int y = 0; y < iNewHeight; ++y) { + for (int x = 0; x < iNewWidth; ++x) { + result.arrImage[idx++] = matrix->get(x + iMinX, y + iMinY); + } + } + return 0; +} + +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/imagecut.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/imagecut.hpp new file mode 100644 index 00000000..377191ff --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/imagecut.hpp @@ -0,0 +1,32 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __ZXING_COMMON_IMAGECUT_HPP__ +#define __ZXING_COMMON_IMAGECUT_HPP__ +#include "bytematrix.hpp" +#include "counted.hpp" + +namespace zxing { + +typedef struct _ImageCutResult { + ArrayRef arrImage; + int iWidth; + int iHeight; +} ImageCutResult; + +class ImageCut { +public: + ImageCut(); + ~ImageCut(); + + static int Cut(uint8_t* poImageData, int iWidth, int iHeight, int iTopLeftX, int iTopLeftY, + int iBottomRightX, int iBottomRightY, ImageCutResult& result); + static int Cut(Ref matrix, float fRatio, ImageCutResult& result); +}; + +} // namespace zxing +#endif // __ZXING_COMMON_IMAGECUT_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/kmeans.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/kmeans.cpp new file mode 100644 index 00000000..2c55edc3 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/kmeans.cpp @@ -0,0 +1,89 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../../precomp.hpp" +#include "kmeans.hpp" + +typedef unsigned int uint; + +namespace zxing { + +double cal_distance(vector a, vector b) { + const float KMEANS_COUNT_FACTOR = 0; + const float KMEANS_MS_FACTOR = 1; + + uint da = a.size(); + double val = 0.0; + for (uint i = 0; i < da; i++) { + if (i == 1) + val += KMEANS_MS_FACTOR * pow((a[i] - b[i]), 2); + else if (i == 0) + val += KMEANS_COUNT_FACTOR * pow((a[i] - b[i]), 2); + else + val += pow((a[i] - b[i]), 2); + } + return pow(val, 0.5); +} + +/* + * maxepoches max iteration epochs + * minchanged min central change times + */ +vector k_means(vector > trainX, uint k, uint maxepoches, uint minchanged) { + const uint row_num = trainX.size(); + const uint col_num = trainX[0].size(); + + // initialize the cluster central + vector clusters(k); + int step = trainX.size() / k; + + for (uint i = 0; i < k; i++) { + clusters[i].centroid = trainX[i * step]; + } + + // try max epochs times iteration untill convergence + for (uint it = 0; it < maxepoches; it++) { + for (uint i = 0; i < k; i++) { + clusters[i].samples.clear(); + } + for (uint j = 0; j < row_num; j++) { + uint c = 0; + double min_distance = cal_distance(trainX[j], clusters[c].centroid); + for (uint i = 1; i < k; i++) { + double distance = cal_distance(trainX[j], clusters[i].centroid); + if (distance < min_distance) { + min_distance = distance; + c = i; + } + } + clusters[c].samples.push_back(j); + } + + uint changed = 0; + // update cluster central + for (uint i = 0; i < k; i++) { + vector val(col_num, 0.0); + for (uint j = 0; j < clusters[i].samples.size(); j++) { + uint sample = clusters[i].samples[j]; + for (uint d = 0; d < col_num; d++) { + val[d] += trainX[sample][d]; + if (j == clusters[i].samples.size() - 1) { + double value = val[d] / clusters[i].samples.size(); + if (clusters[i].centroid[d] != value) { + clusters[i].centroid[d] = value; + changed++; + } + } + } + } + } + + if (changed <= minchanged) return clusters; + } + return clusters; +} + +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/kmeans.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/kmeans.hpp new file mode 100644 index 00000000..5d656dfb --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/kmeans.hpp @@ -0,0 +1,26 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __ZXING_COMMON_KMEANS_HPP__ +#define __ZXING_COMMON_KMEANS_HPP__ +#include + +namespace zxing { + +using namespace std; +typedef unsigned int uint; + +struct Cluster { + vector centroid; + vector samples; +}; + +double cal_distance(vector a, vector b); +vector k_means(vector > trainX, uint k, uint maxepoches, uint minchanged); + +} // namespace zxing +#endif // __ZXING_COMMON_KMEANS_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/mathutils.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/mathutils.hpp new file mode 100644 index 00000000..bd466224 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/mathutils.hpp @@ -0,0 +1,95 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_MATHUTILS_HPP__ +#define __ZXING_COMMON_MATHUTILS_HPP__ + +#include +#if (defined __GNUC__ && defined __x86_64__ && defined __SSE2__ && !defined __APPLE__ && \ + !defined __GXX_WEAK__) +#include +#elif defined _MSC_VER && (defined _M_X64 || defined _M_IX86) +#include +#endif + +#include +#include +#include + +namespace zxing { +namespace common { + +class MathUtils { +private: + MathUtils(); + ~MathUtils(); + +public: + static inline float distance(float aX, float aY, float bX, float bY) { + float xDiff = aX - bX; + float yDiff = aY - bY; + return sqrt(float(xDiff * xDiff + yDiff * yDiff)); + } + + static inline float distance_4_int(int aX, int aY, int bX, int bY) { + return sqrt(float((aX - bX) * (aX - bX) + (aY - bY) * (aY - bY))); + } + + static inline void getRangeValues(int& minValue, int& maxValue, int min, int max) { + int finalMinValue, finalMaxValue; + + if (minValue < maxValue) { + finalMinValue = minValue; + finalMaxValue = maxValue; + } else { + finalMinValue = maxValue; + finalMaxValue = minValue; + } + + finalMinValue = finalMinValue > min ? finalMinValue : min; + finalMaxValue = finalMaxValue < max ? finalMaxValue : max; + + minValue = finalMinValue; + maxValue = finalMaxValue; + } + + static inline bool isInRange(float x, float y, float width, float height) { + if ((x >= 0.0 && x <= (width - 1.0)) && (y >= 0.0 && y <= (height - 1.0))) { + return true; + } else { + return false; + } + } + + static inline float distance(int aX, int aY, int bX, int bY) { + int xDiff = aX - bX; + int yDiff = aY - bY; + return sqrt(float(xDiff * xDiff + yDiff * yDiff)); + } + + static inline float VecCross(float* v1, float* v2) { return v1[0] * v2[1] - v1[1] * v2[0]; } + + static inline void Stddev(std::vector& resultSet, float& avg, float& stddev) { + double sum = std::accumulate(resultSet.begin(), resultSet.end(), 0.0); + avg = sum / resultSet.size(); + + double accum = 0.0; + for (std::size_t i = 0; i < resultSet.size(); i++) { + accum += (resultSet[i] - avg) * (resultSet[i] - avg); + } + + stddev = sqrt(accum / (resultSet.size())); + } +}; + +} // namespace common +} // namespace zxing + +#endif // __ZXING_COMMON_MATHUTILS_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/perspective_transform.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/perspective_transform.cpp new file mode 100644 index 00000000..74ac2cd7 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/perspective_transform.cpp @@ -0,0 +1,120 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "perspective_transform.hpp" + + +namespace zxing { + +// This class implements a perspective transform in two dimensions. Given four +// source and four destination points, it will compute the transformation +// implied between them. The code is based directly upon section 3.4.2 of George +// Wolberg's "Digital Image Warping"; see pages 54-56 +PerspectiveTransform::PerspectiveTransform(float inA11, float inA21, float inA31, float inA12, + float inA22, float inA32, float inA13, float inA23, + float inA33) + : a11(inA11), + a12(inA12), + a13(inA13), + a21(inA21), + a22(inA22), + a23(inA23), + a31(inA31), + a32(inA32), + a33(inA33) {} + +Ref PerspectiveTransform::quadrilateralToQuadrilateral( + float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float x0p, + float y0p, float x1p, float y1p, float x2p, float y2p, float x3p, float y3p) { + Ref qToS = + PerspectiveTransform::quadrilateralToSquare(x0, y0, x1, y1, x2, y2, x3, y3); + Ref sToQ = + PerspectiveTransform::squareToQuadrilateral(x0p, y0p, x1p, y1p, x2p, y2p, x3p, y3p); + return sToQ->times(qToS); +} + +Ref PerspectiveTransform::squareToQuadrilateral(float x0, float y0, float x1, + float y1, float x2, float y2, + float x3, float y3) { + float dx3 = x0 - x1 + x2 - x3; + float dy3 = y0 - y1 + y2 - y3; + if (fabs(dx3) <= 1e-6 && fabs(dy3) <= 1e-6) { + Ref result( + new PerspectiveTransform(x1 - x0, x2 - x1, x0, y1 - y0, y2 - y1, y0, 0.0f, 0.0f, 1.0f)); + return result; + } else { + float dx1 = x1 - x2; + float dx2 = x3 - x2; + float dy1 = y1 - y2; + float dy2 = y3 - y2; + float denominator = dx1 * dy2 - dx2 * dy1; + float a13 = (dx3 * dy2 - dx2 * dy3) / denominator; + float a23 = (dx1 * dy3 - dx3 * dy1) / denominator; + Ref result( + new PerspectiveTransform(x1 - x0 + a13 * x1, x3 - x0 + a23 * x3, x0, y1 - y0 + a13 * y1, + y3 - y0 + a23 * y3, y0, a13, a23, 1.0f)); + return result; + } +} + +Ref PerspectiveTransform::quadrilateralToSquare(float x0, float y0, float x1, + float y1, float x2, float y2, + float x3, float y3) { + // Here, the adjoint serves as the inverse: + return squareToQuadrilateral(x0, y0, x1, y1, x2, y2, x3, y3)->buildAdjoint(); +} + +Ref PerspectiveTransform::buildAdjoint() { + // Adjoint is the transpose of the cofactor matrix: + Ref result(new PerspectiveTransform( + a22 * a33 - a23 * a32, a23 * a31 - a21 * a33, a21 * a32 - a22 * a31, a13 * a32 - a12 * a33, + a11 * a33 - a13 * a31, a12 * a31 - a11 * a32, a12 * a23 - a13 * a22, a13 * a21 - a11 * a23, + a11 * a22 - a12 * a21)); + return result; +} + +Ref PerspectiveTransform::times(Ref other) { + Ref result( + new PerspectiveTransform(a11 * other->a11 + a21 * other->a12 + a31 * other->a13, + a11 * other->a21 + a21 * other->a22 + a31 * other->a23, + a11 * other->a31 + a21 * other->a32 + a31 * other->a33, + a12 * other->a11 + a22 * other->a12 + a32 * other->a13, + a12 * other->a21 + a22 * other->a22 + a32 * other->a23, + a12 * other->a31 + a22 * other->a32 + a32 * other->a33, + a13 * other->a11 + a23 * other->a12 + a33 * other->a13, + a13 * other->a21 + a23 * other->a22 + a33 * other->a23, + a13 * other->a31 + a23 * other->a32 + a33 * other->a33)); + return result; +} + +void PerspectiveTransform::transformPoints(vector& points) { + int max = points.size(); + + // Modified to support stlport + float* pts = NULL; + + if (points.size() > 0) { + pts = &points[0]; + } + + for (int i = 0; i < max; i += 2) { + float x = pts[i]; + float y = pts[i + 1]; + + float denominator = a13 * x + a23 * y + a33; + + float w = 1.0f / denominator; + + pts[i] = (a11 * x + a21 * y + a31) * w; + pts[i + 1] = (a12 * x + a22 * y + a32) * w; + } +} + +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/perspective_transform.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/perspective_transform.hpp new file mode 100644 index 00000000..59e1db6f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/perspective_transform.hpp @@ -0,0 +1,39 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_PERSPECTIVETRANSFORM_HPP__ +#define __ZXING_COMMON_PERSPECTIVETRANSFORM_HPP__ + +#include "counted.hpp" + +#include + +namespace zxing { +class PerspectiveTransform : public Counted { +private: + float a11, a12, a13, a21, a22, a23, a31, a32, a33; + PerspectiveTransform(float a11, float a21, float a31, float a12, float a22, float a32, + float a13, float a23, float a33); + +public: + static Ref quadrilateralToQuadrilateral( + float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float x0p, + float y0p, float x1p, float y1p, float x2p, float y2p, float x3p, float y3p); + static Ref squareToQuadrilateral(float x0, float y0, float x1, float y1, + float x2, float y2, float x3, float y3); + static Ref quadrilateralToSquare(float x0, float y0, float x1, float y1, + float x2, float y2, float x3, float y3); + Ref buildAdjoint(); + Ref times(Ref other); + void transformPoints(std::vector& points); +}; +} // namespace zxing + +#endif // __ZXING_COMMON_PERSPECTIVETRANSFORM_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgf.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgf.cpp new file mode 100644 index 00000000..8e3ad184 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgf.cpp @@ -0,0 +1,99 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "genericgf.hpp" +#include "genericgfpoly.hpp" + +using zxing::ErrorHandler; +using zxing::GenericGF; +using zxing::GenericGFPoly; +using zxing::Ref; + +GenericGF::GenericGF(int primitive_, int size_, int b, ErrorHandler &err_handler) + : size(size_), primitive(primitive_), generatorBase(b) { + expTable.resize(size); + logTable.resize(size); + + int x = 1; + + for (int i = 0; i < size; i++) { + expTable[i] = x; + x <<= 1; // x = x * 2; we're assuming the generator alpha is 2 + if (x >= size) { + x ^= primitive; + x &= size - 1; + } + } + for (int i = 0; i < size - 1; i++) { + logTable[expTable[i]] = i; + } + // logTable[0] == 0 but this should never be used + zero = + Ref(new GenericGFPoly(*this, ArrayRef(new Array(1)), err_handler)); + zero->getCoefficients()[0] = 0; + one = + Ref(new GenericGFPoly(*this, ArrayRef(new Array(1)), err_handler)); + one->getCoefficients()[0] = 1; + if (err_handler.ErrCode()) return; + // initialized = true; +} + +Ref GenericGF::getZero() { return zero; } + +Ref GenericGF::getOne() { return one; } + +Ref GenericGF::buildMonomial(int degree, int coefficient, + ErrorHandler &err_handler) { + if (degree < 0) { + err_handler = IllegalArgumentErrorHandler("Degree must be non-negative"); + return Ref(); + } + if (coefficient == 0) { + return zero; + } + ArrayRef coefficients(new Array(degree + 1)); + coefficients[0] = coefficient; + + Ref gfpoly(new GenericGFPoly(*this, coefficients, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return gfpoly; +} + +int GenericGF::addOrSubtract(int a, int b) { return a ^ b; } + +int GenericGF::exp(int a) { return expTable[a]; } + +int GenericGF::log(int a, ErrorHandler &err_handler) { + if (a == 0) { + err_handler = IllegalArgumentErrorHandler("cannot give log(0)"); + return -1; + } + return logTable[a]; +} + +int GenericGF::inverse(int a, ErrorHandler &err_handler) { + if (a == 0) { + err_handler = IllegalArgumentErrorHandler("Cannot calculate the inverse of 0"); + return -1; + } + return expTable[size - logTable[a] - 1]; +} + +int GenericGF::multiply(int a, int b) { + if (a == 0 || b == 0) { + return 0; + } + + return expTable[(logTable[a] + logTable[b]) % (size - 1)]; +} + +int GenericGF::getSize() { return size; } + +int GenericGF::getGeneratorBase() { return generatorBase; } diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgf.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgf.hpp new file mode 100644 index 00000000..aabbeaf0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgf.hpp @@ -0,0 +1,58 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_REEDSOLOMON_GENERICGF_HPP__ +#define __ZXING_COMMON_REEDSOLOMON_GENERICGF_HPP__ + +#include "../../errorhandler.hpp" +#include "../counted.hpp" + +namespace zxing { +class GenericGFPoly; + +static zxing::ErrorHandler gf_err_handler_; +#define GF_AZTEC_DATA_12 (new GenericGF(0x1069, 4096, 1, gf_err_handler_)) +#define GF_AZTEC_DATA_10 (new GenericGF(0x409, 1024, 1, gf_err_handler_)) +#define GF_AZTEC_DATA_6 (new GenericGF(0x43, 64, 1, gf_err_handler_)) +#define GF_AZTEC_PARAM (new GenericGF(0x13, 16, 1, gf_err_handler_)) +#define GF_QR_CODE_FIELD_256 (new GenericGF(0x011D, 256, 0, gf_err_handler_)) +#define GF_DATA_MATRIX_FIELD_256 (new GenericGF(0x012D, 256, 1, gf_err_handler_)) +#define GF_AZTEC_DATA_8 (new GenericGF(0x012D, 256, 1, gf_err_handler_)) +#define GF_MAXICODE_FIELD_64 (new GenericGF(0x43, 64, 1, gf_err_handler_)) +// #define GF_WXCODE (new GenericGF(0x011D, 256, 0, gf_err_handler_)) + +class GenericGF : public Counted { +private: + std::vector expTable; + std::vector logTable; + Ref zero; + Ref one; + int size; + int primitive; + int generatorBase; + +public: + GenericGF(int primitive, int size, int b, ErrorHandler &err_handler); + + Ref getZero(); + Ref getOne(); + int getSize(); + int getGeneratorBase(); + Ref buildMonomial(int degree, int coefficient, ErrorHandler &err_handler); + + static int addOrSubtract(int a, int b); + int exp(int a); + int log(int a, ErrorHandler &err_handler); + int inverse(int a, ErrorHandler &err_handler); + int multiply(int a, int b); +}; +} // namespace zxing + +#endif // __ZXING_COMMON_REEDSOLOMON_GENERICGF_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgfpoly.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgfpoly.cpp new file mode 100644 index 00000000..4098a379 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgfpoly.cpp @@ -0,0 +1,231 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "genericgfpoly.hpp" +#include "genericgf.hpp" + +using zxing::ArrayRef; +using zxing::ErrorHandler; +using zxing::GenericGFPoly; +using zxing::Ref; + +// VC++ +using zxing::GenericGF; + +GenericGFPoly::GenericGFPoly(GenericGF &field, ArrayRef coefficients, + ErrorHandler &err_handler) + : field_(field) { + if (coefficients->size() == 0) { + err_handler = IllegalArgumentErrorHandler("need coefficients"); + return; + } + int coefficientsLength = coefficients->size(); + if (coefficientsLength > 1 && coefficients[0] == 0) { + // Leading term must be non-zero for anything except the constant + // polynomial "0" + int firstNonZero = 1; + while (firstNonZero < coefficientsLength && coefficients[firstNonZero] == 0) { + firstNonZero++; + } + if (firstNonZero == coefficientsLength) { + coefficients_ = field.getZero()->getCoefficients(); + } else { + coefficients_ = ArrayRef(new Array(coefficientsLength - firstNonZero)); + for (int i = 0; i < (int)coefficients_->size(); i++) { + coefficients_[i] = coefficients[i + firstNonZero]; + } + } + } else { + coefficients_ = coefficients; + } +} + +ArrayRef GenericGFPoly::getCoefficients() { return coefficients_; } + +int GenericGFPoly::getDegree() { return coefficients_->size() - 1; } + +bool GenericGFPoly::isZero() { return coefficients_[0] == 0; } + +int GenericGFPoly::getCoefficient(int degree) { + return coefficients_[coefficients_->size() - 1 - degree]; +} + +int GenericGFPoly::evaluateAt(int a) { + if (a == 0) { + // Just return the x^0 coefficient + return getCoefficient(0); + } + + int size = coefficients_->size(); + if (a == 1) { + // Just the sum of the coefficients + int result = 0; + for (int i = 0; i < size; i++) { + result = GenericGF::addOrSubtract(result, coefficients_[i]); + } + return result; + } + int result = coefficients_[0]; + for (int i = 1; i < size; i++) { + result = GenericGF::addOrSubtract(field_.multiply(a, result), coefficients_[i]); + } + return result; +} + +Ref GenericGFPoly::addOrSubtract(Ref other, + ErrorHandler &err_handler) { + if (!(&field_ == &other->field_)) { + err_handler = + IllegalArgumentErrorHandler("GenericGFPolys do not have same GenericGF field"); + return Ref(); + } + if (isZero()) { + return other; + } + if (other->isZero()) { + return Ref(this); + } + + ArrayRef smallerCoefficients = coefficients_; + ArrayRef largerCoefficients = other->getCoefficients(); + if (smallerCoefficients->size() > largerCoefficients->size()) { + ArrayRef temp = smallerCoefficients; + smallerCoefficients = largerCoefficients; + largerCoefficients = temp; + } + + ArrayRef sumDiff(new Array(largerCoefficients->size())); + int lengthDiff = largerCoefficients->size() - smallerCoefficients->size(); + // Copy high-order terms only found in higher-degree polynomial's + // coefficients + for (int i = 0; i < lengthDiff; i++) { + sumDiff[i] = largerCoefficients[i]; + } + + for (int i = lengthDiff; i < (int)largerCoefficients->size(); i++) { + sumDiff[i] = + GenericGF::addOrSubtract(smallerCoefficients[i - lengthDiff], largerCoefficients[i]); + } + + // return Ref(new GenericGFPoly(field_, sumDiff)); + Ref gfpoly(new GenericGFPoly(field_, sumDiff, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return gfpoly; +} + +Ref GenericGFPoly::multiply(Ref other, + ErrorHandler &err_handler) { + if (!(&field_ == &other->field_)) { + err_handler = + IllegalArgumentErrorHandler("GenericGFPolys do not have same GenericGF field"); + return Ref(); + } + + if (isZero() || other->isZero()) { + return field_.getZero(); + } + + ArrayRef aCoefficients = coefficients_; + int aLength = aCoefficients->size(); + + ArrayRef bCoefficients = other->getCoefficients(); + int bLength = bCoefficients->size(); + + ArrayRef product(new Array(aLength + bLength - 1)); + for (int i = 0; i < aLength; i++) { + int aCoeff = aCoefficients[i]; + for (int j = 0; j < bLength; j++) { + product[i + j] = + GenericGF::addOrSubtract(product[i + j], field_.multiply(aCoeff, bCoefficients[j])); + } + } + + // return Ref(new GenericGFPoly(field_, product)); + Ref gfpoly(new GenericGFPoly(field_, product, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return gfpoly; +} + +Ref GenericGFPoly::multiply(int scalar, ErrorHandler &err_handler) { + if (scalar == 0) { + return field_.getZero(); + } + if (scalar == 1) { + return Ref(this); + } + int size = coefficients_->size(); + ArrayRef product(new Array(size)); + for (int i = 0; i < size; i++) { + product[i] = field_.multiply(coefficients_[i], scalar); + } + // return Ref(new GenericGFPoly(field_, product)); + Ref gfpoly(new GenericGFPoly(field_, product, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return gfpoly; +} + +Ref GenericGFPoly::multiplyByMonomial(int degree, int coefficient, + ErrorHandler &err_handler) { + if (degree < 0) { + err_handler = IllegalArgumentErrorHandler("degree must not be less then 0"); + return Ref(); + } + if (coefficient == 0) { + return field_.getZero(); + } + int size = coefficients_->size(); + ArrayRef product(new Array(size + degree)); + for (int i = 0; i < size; i++) { + product[i] = field_.multiply(coefficients_[i], coefficient); + } + // return Ref(new GenericGFPoly(field_, product)); + Ref gfpoly(new GenericGFPoly(field_, product, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return gfpoly; +} + +std::vector> GenericGFPoly::divide(Ref other, + ErrorHandler &err_handler) { + if (!(&field_ == &other->field_)) { + err_handler = + IllegalArgumentErrorHandler("GenericGFPolys do not have same GenericGF field"); + return std::vector>(); + } + if (other->isZero()) { + err_handler = IllegalArgumentErrorHandler("divide by 0"); + return std::vector>(); + } + + Ref quotient = field_.getZero(); + Ref remainder = Ref(this); + + int denominatorLeadingTerm = other->getCoefficient(other->getDegree()); + int inverseDenominatorLeadingTerm = field_.inverse(denominatorLeadingTerm, err_handler); + if (err_handler.ErrCode()) return std::vector>(); + + while (remainder->getDegree() >= other->getDegree() && !remainder->isZero()) { + int degreeDifference = remainder->getDegree() - other->getDegree(); + int scale = field_.multiply(remainder->getCoefficient(remainder->getDegree()), + inverseDenominatorLeadingTerm); + Ref term = other->multiplyByMonomial(degreeDifference, scale, err_handler); + if (err_handler.ErrCode()) return std::vector>(); + Ref iterationQuotiont = + field_.buildMonomial(degreeDifference, scale, err_handler); + if (err_handler.ErrCode()) return std::vector>(); + quotient = quotient->addOrSubtract(iterationQuotiont, err_handler); + remainder = remainder->addOrSubtract(term, err_handler); + if (err_handler.ErrCode()) return std::vector>(); + } + + std::vector> returnValue(2); + returnValue[0] = quotient; + returnValue[1] = remainder; + return returnValue; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgfpoly.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgfpoly.hpp new file mode 100644 index 00000000..031c0b7b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/genericgfpoly.hpp @@ -0,0 +1,43 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_REEDSOLOMON_GENERICGFPOLY_HPP__ +#define __ZXING_COMMON_REEDSOLOMON_GENERICGFPOLY_HPP__ + +#include "../../errorhandler.hpp" +#include "../array.hpp" +#include "../counted.hpp" + +namespace zxing { + +class GenericGF; + +class GenericGFPoly : public Counted { +private: + GenericGF &field_; + ArrayRef coefficients_; + +public: + GenericGFPoly(GenericGF &field, ArrayRef coefficients, ErrorHandler &err_handler); + ArrayRef getCoefficients(); + int getDegree(); + bool isZero(); + int getCoefficient(int degree); + int evaluateAt(int a); + Ref addOrSubtract(Ref other, ErrorHandler &err_handler); + Ref multiply(Ref other, ErrorHandler &err_handler); + Ref multiply(int scalar, ErrorHandler &err_handler); + Ref multiplyByMonomial(int degree, int coefficient, ErrorHandler &err_handler); + std::vector> divide(Ref other, ErrorHandler &err_handler); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_REEDSOLOMON_GENERICGFPOLY_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/reed_solomon_decoder.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/reed_solomon_decoder.cpp new file mode 100644 index 00000000..fec9c828 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/reed_solomon_decoder.cpp @@ -0,0 +1,185 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "reed_solomon_decoder.hpp" + +using zxing::ArrayRef; +using zxing::ErrorHandler; +using zxing::GenericGFPoly; +using zxing::ReedSolomonDecoder; +using zxing::Ref; +using zxing::GenericGF; + +ReedSolomonDecoder::ReedSolomonDecoder(Ref field_) : field(field_) {} + +ReedSolomonDecoder::~ReedSolomonDecoder() {} + +void ReedSolomonDecoder::decode(ArrayRef received, int twoS, ErrorHandler &err_handler) { + Ref poly(new GenericGFPoly(*field, received, err_handler)); + if (err_handler.ErrCode()) return; + ArrayRef syndromeCoefficients(twoS); + bool noError = true; + for (int i = 0; i < twoS; i++) { + int eval = poly->evaluateAt(field->exp(i + field->getGeneratorBase())); + syndromeCoefficients[syndromeCoefficients->size() - 1 - i] = eval; + if (eval != 0) { + noError = false; + } + } + if (noError) { + return; + } + Ref syndrome(new GenericGFPoly(*field, syndromeCoefficients, err_handler)); + Ref monomial = field->buildMonomial(twoS, 1, err_handler); + if (!monomial || err_handler.ErrCode()) { + err_handler = ErrorHandler("buildMonomial was zero"); + return; + } + vector> sigmaOmega = + runEuclideanAlgorithm(monomial, syndrome, twoS, err_handler); + if (err_handler.ErrCode()) return; + + Ref sigma = sigmaOmega[0]; + Ref omega = sigmaOmega[1]; + ArrayRef errorLocations = findErrorLocations(sigma, err_handler); + if (err_handler.ErrCode()) return; + + ArrayRef errorMagitudes = findErrorMagnitudes(omega, errorLocations, err_handler); + if (err_handler.ErrCode()) return; + for (int i = 0; i < errorLocations->size(); i++) { + int position = received->size() - 1 - field->log(errorLocations[i], err_handler); + if (position < 0 || err_handler.ErrCode()) { + err_handler = ErrorHandler("Bad error location"); + return; + } + received[position] = GenericGF::addOrSubtract(received[position], errorMagitudes[i]); + } +} + +vector> ReedSolomonDecoder::runEuclideanAlgorithm(Ref a, + Ref b, int R, + ErrorHandler &err_handler) { + vector> result(2); + + // Assume a's degree is >= b's + if (a->getDegree() < b->getDegree()) { + Ref tmp = a; + a = b; + b = tmp; + } + + Ref rLast(a); + Ref r(b); + Ref tLast(field->getZero()); + Ref t(field->getOne()); + + // Run Euclidean algorithm until r's degree is less than R/2 + while (r->getDegree() >= R / 2) { + Ref rLastLast(rLast); + Ref tLastLast(tLast); + rLast = r; + tLast = t; + + // Divide rLastLast by rLast, with quotient q and remainder r + if (rLast->isZero()) { + // Oops, Euclidean algorithm already terminated? + err_handler = ErrorHandler("r_{i-1} was zero"); + return vector>(); + } + r = rLastLast; + Ref q = field->getZero(); + int denominatorLeadingTerm = rLast->getCoefficient(rLast->getDegree()); + int dltInverse = field->inverse(denominatorLeadingTerm, err_handler); + if (err_handler.ErrCode()) return vector>(); + while (r->getDegree() >= rLast->getDegree() && !r->isZero()) { + int degreeDiff = r->getDegree() - rLast->getDegree(); + int scale = field->multiply(r->getCoefficient(r->getDegree()), dltInverse); + q = q->addOrSubtract(field->buildMonomial(degreeDiff, scale, err_handler), err_handler); + r = r->addOrSubtract(rLast->multiplyByMonomial(degreeDiff, scale, err_handler), + err_handler); + if (err_handler.ErrCode()) return vector>(); + } + + Ref tmp = q->multiply(tLast, err_handler); + if (err_handler.ErrCode()) return vector>(); + t = tmp->addOrSubtract(tLastLast, err_handler); + if (err_handler.ErrCode()) return vector>(); + + if (r->getDegree() >= rLast->getDegree()) { + err_handler = ErrorHandler("Division algorithm failed to reduce polynomial?"); + return vector>(); + } + } + + int sigmaTildeAtZero = t->getCoefficient(0); + if (sigmaTildeAtZero == 0) { + err_handler = ErrorHandler("sigmaTilde(0) was zero"); + return vector>(); + } + + int inverse = field->inverse(sigmaTildeAtZero, err_handler); + Ref sigma(t->multiply(inverse, err_handler)); + Ref omega(r->multiply(inverse, err_handler)); + if (err_handler.ErrCode()) return vector>(); + + result[0] = sigma; + result[1] = omega; + return result; +} + +ArrayRef ReedSolomonDecoder::findErrorLocations(Ref errorLocator, + ErrorHandler &err_handler) { + // This is a direct application of Chien's search + int numErrors = errorLocator->getDegree(); + if (numErrors == 1) { // shortcut + ArrayRef result(new Array(1)); + result[0] = errorLocator->getCoefficient(1); + return result; + } + ArrayRef result(new Array(numErrors)); + int e = 0; + for (int i = 1; i < field->getSize() && e < numErrors; i++) { + if (errorLocator->evaluateAt(i) == 0) { + result[e] = field->inverse(i, err_handler); + e++; + } + } + if (e != numErrors || err_handler.ErrCode()) { + err_handler = ErrorHandler("Error locator degree does not match number of root"); + return ArrayRef(); + } + return result; +} + +ArrayRef ReedSolomonDecoder::findErrorMagnitudes(Ref errorEvaluator, + ArrayRef errorLocations, + ErrorHandler &err_handler) { + // This is directly applying Forney's Formula + int s = errorLocations->size(); + ArrayRef result(new Array(s)); + for (int i = 0; i < s; i++) { + int xiInverse = field->inverse(errorLocations[i], err_handler); + int denominator = 1; + for (int j = 0; j < s; j++) { + if (i != j) { + int term = field->multiply(errorLocations[j], xiInverse); + int termPlus1 = (term & 0x1) == 0 ? term | 1 : term & ~1; + denominator = field->multiply(denominator, termPlus1); + } + } + result[i] = field->multiply(errorEvaluator->evaluateAt(xiInverse), + field->inverse(denominator, err_handler)); + if (field->getGeneratorBase() != 0) { + result[i] = field->multiply(result[i], xiInverse); + } + } + if (err_handler.ErrCode()) return ArrayRef(); + return result; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/reed_solomon_decoder.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/reed_solomon_decoder.hpp new file mode 100644 index 00000000..15271dd0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/reedsolomon/reed_solomon_decoder.hpp @@ -0,0 +1,43 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_REEDSOLOMON_REEDSOLOMONDECODER_HPP__ +#define __ZXING_COMMON_REEDSOLOMON_REEDSOLOMONDECODER_HPP__ + +#include "../../errorhandler.hpp" +#include "../array.hpp" +#include "../counted.hpp" +#include "genericgf.hpp" +#include "genericgfpoly.hpp" + +namespace zxing { +class GenericGFPoly; +class GenericGF; + +class ReedSolomonDecoder { +private: + Ref field; + +public: + explicit ReedSolomonDecoder(Ref fld); + ~ReedSolomonDecoder(); + void decode(ArrayRef received, int twoS, ErrorHandler &err_handler); + std::vector> runEuclideanAlgorithm(Ref a, + Ref b, int R, + ErrorHandler &err_handler); + +private: + ArrayRef findErrorLocations(Ref errorLocator, ErrorHandler &err_handler); + ArrayRef findErrorMagnitudes(Ref errorEvaluator, + ArrayRef errorLocations, ErrorHandler &err_handler); +}; +} // namespace zxing + +#endif // __ZXING_COMMON_REEDSOLOMON_REEDSOLOMONDECODER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/str.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/str.cpp new file mode 100644 index 00000000..1de3dd02 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/str.cpp @@ -0,0 +1,96 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "str.hpp" + +using zxing::Ref; +using zxing::String; +using zxing::StrUtil; +String::String(const std::string& text) : text_(text) {} + +String::String(int capacity) { text_.reserve(capacity); } + +const std::string& String::getText() const { return text_; } + +char String::charAt(int i) const { return text_[i]; } + +int String::size() const { return text_.size(); } + +int String::length() const { return text_.size(); } + +Ref String::substring(int i) const { return Ref(new String(text_.substr(i))); } + +Ref String::substring(int start, int end) const { + return Ref(new String(text_.substr(start, (end - start)))); +} + +void String::append(const std::string& tail) { text_.append(tail); } + +void String::append(char c) { text_.append(1, c); } + +void String::append(int d) { + string str = StrUtil::numberToString(d); + text_.append(str); +} + +void String::append(Ref str) { append(str->getText()); } + +string StrUtil::COMBINE_STRING(string str1, string str2) { + string str = str1; + str += str2; + return str; +} + +string StrUtil::COMBINE_STRING(string str1, char c) { + string str = str1; + str += c; + return str; +} + +string StrUtil::COMBINE_STRING(string str1, int d) { + string str = str1; + str += numberToString(d); + return str; +} + +Ref StrUtil::COMBINE_STRING(char c1, Ref content, char c2) { + Ref str(new String(0)); + str->append(c1); + str->append(content); + str->append(c2); + + return str; +} + +template +string StrUtil::numberToString(T Number) { + ostringstream ss; + ss << Number; + return ss.str(); +} + +template +T StrUtil::stringToNumber(const string& Text) { + std::istringstream ss(Text); + T result; + return ss >> result ? result : 0; +} + +int StrUtil::indexOf(const char* str, char c) { + int len = strlen(str); + + for (int i = 0; i < len; i++) { + if (str[i] == c) { + return i; + } + } + + return -1; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/str.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/str.hpp new file mode 100644 index 00000000..3d20910f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/str.hpp @@ -0,0 +1,58 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_STR_HPP__ +#define __ZXING_COMMON_STR_HPP__ + +#include "counted.hpp" + +#include +#include +using std::string; +namespace zxing { + +class String : public Counted { +private: + std::string text_; + +public: + explicit String(const std::string& text); + explicit String(int); + char charAt(int) const; + Ref substring(int) const; + Ref substring(int, int) const; + const std::string& getText() const; + int size() const; + void append(std::string const& tail); + void append(char c); + void append(int d); + void append(Ref str); + int length() const; +}; + +class StrUtil { +public: + static string COMBINE_STRING(string str1, string str2); + static string COMBINE_STRING(string str1, char c); + static string COMBINE_STRING(string str1, int d); + static Ref COMBINE_STRING(char c1, Ref content, char c2); + + template + static string numberToString(T Number); + + template + static T stringToNumber(const string& Text); + + static int indexOf(const char* str, char c); +}; + +} // namespace zxing + +#endif // __ZXING_COMMON_STR_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/stringutils.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/stringutils.cpp new file mode 100644 index 00000000..ed9fe3e9 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/stringutils.cpp @@ -0,0 +1,683 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "../common/stringutils.hpp" +#include "../decodehints.hpp" + +using namespace zxing::common; + +// N.B.: these are the iconv strings for at least some versions of iconv + +char const* const StringUtils::PLATFORM_DEFAULT_ENCODING = "ANY"; +char const* const StringUtils::ASCII = "ASCII"; +char const* const StringUtils::SHIFT_JIS = "SHIFT-JIS"; +char const* const StringUtils::GBK = "GBK"; +char const* const StringUtils::EUC_JP = "EUC-JP"; +char const* const StringUtils::UTF8 = "UTF-8"; +char const* const StringUtils::ISO88591 = "ISO8859-1"; +char const* const StringUtils::GB2312 = "GB2312"; +char const* const StringUtils::BIG5 = "BIG5"; +char const* const StringUtils::GB18030 = "GB18030"; + +const bool StringUtils::ASSUME_SHIFT_JIS = false; + +#ifdef USE_UCHARDET +#include "uchardet/uchardet.h" +#endif + +// Added convertString +#ifndef NO_ICONV +#include + +// Required for compatibility. TODO: test on Symbian +//#ifdef ZXING_ICONV_CONST +#undef ICONV_CONST +#define ICONV_CONST const +//#endif + +#ifndef ICONV_CONST +#define ICONV_CONST /**/ +#endif + +// Add this to fix both Mac and Windows compilers +// by Skylook +template +class sloppy {}; + +// convert between T** and const T** +template +class sloppy { + T** t; + +public: + sloppy(T** mt) : t(mt) {} + sloppy(const T** mt) : t(const_cast(mt)) {} + + operator T* *() const { return t; } + operator const T* *() const { return const_cast(t); } +}; +#endif + +string StringUtils::convertString(const char* rawData, int length, const char* fromCharset, + const char* toCharset) { + string result; + const char* bufIn = rawData; + int nIn = length; + + // If from and to charset are the same, return + int ret = strcmp(fromCharset, toCharset); + if (ret == 0) { + result.append((const char*)bufIn, nIn); + return result; + } + +#ifndef NO_ICONV + if (nIn == 0) { + return ""; + } + iconv_t cd; + // cout< 0) { + // size_t oneway = iconv(cd, &fromPtr, &nFrom, &toPtr, &nTo); + oneway = iconv(cd, sloppy(&fromPtr), &nFrom, sloppy(&toPtr), &nTo); + } + iconv_close(cd); + + int nResult = maxOut - nTo; + bufOut[nResult] = '\0'; + result.append((const char*)bufOut); + delete[] bufOut; + + // Cannot convert string + if (oneway == (size_t)(-1)) { + // result.append((const char *)bufIn, nIn); + result = ""; + } +#else + result.append((const char*)bufIn, nIn); +#endif + + return result; +} + +string StringUtils::guessEncoding(char* bytes, int length) { +#ifdef USE_UCHARDET + if (length < 10) { + return guessEncodingZXing(bytes, length); + } else { + return guessEncodingUCharDet(bytes, length); + } +#else + return guessEncodingZXing(bytes, length); +#endif +} + +#ifdef USE_UCHARDET + +string StringUtils::guessEncodingUCharDet(char* bytes, int length) { + uchardet_t handle = uchardet_new(); + + int retval = uchardet_handle_data(handle, bytes, length); + + if (retval != 0) { + fprintf(stderr, "Handle data error.\n"); + exit(0); + } + + uchardet_data_end(handle); + + const char* charset = uchardet_get_charset(handle); + + string charsetStr(charset); + + uchardet_delete(handle); + + if (charsetStr.size() != 0) { + return charsetStr; + } else { + return guessEncodingZXing(bytes, length); + } + + // Otherwise, we take a wild guess with platform encoding + // return PLATFORM_DEFAULT_ENCODING; +} +#endif + +string StringUtils::guessEncodingZXing(char* bytes, int length) { + // + // typedef bool boolean; + // For now, merely tries to distinguish ISO-8859-1, UTF-8 and Shift_JIS, + // which should be by far the most common encodings. + bool canBeISO88591 = true; + bool canBeShiftJIS = true; + bool canBeUTF8 = true; + bool canBeGB2312 = true; + bool canBeGBK = true; + bool canBeBIG5 = true; + bool canBeASCII = true; + + int utf8BytesLeft = 0; + int utf2BytesChars = 0; + int utf3BytesChars = 0; + int utf4BytesChars = 0; + int sjisBytesLeft = 0; + int sjisKatakanaChars = 0; + int sjisCurKatakanaWordLength = 0; + int sjisCurDoubleBytesWordLength = 0; + int sjisMaxKatakanaWordLength = 0; + int sjisMaxDoubleBytesWordLength = 0; + int isoHighOther = 0; + + int gb2312SCByteChars = 0; + int big5TWBytesChars = 0; + + bool utf8bom = length > 3 && (unsigned char)bytes[0] == 0xEF && + (unsigned char)bytes[1] == 0xBB && (unsigned char)bytes[2] == 0xBF; + + for (int i = 0; i < length && (canBeISO88591 || canBeShiftJIS || canBeUTF8 || canBeGBK); i++) { + int value = bytes[i] & 0xFF; + + // UTF-8 stuff + if (canBeUTF8) { + if (utf8BytesLeft > 0) { + if ((value & 0x80) == 0) { + canBeUTF8 = false; + } else { + utf8BytesLeft--; + } + } else if ((value & 0x80) != 0) { + if ((value & 0x40) == 0) { + canBeUTF8 = false; + } else { + utf8BytesLeft++; + if ((value & 0x20) == 0) { + utf2BytesChars++; + } else { + utf8BytesLeft++; + if ((value & 0x10) == 0) { + utf3BytesChars++; + } else { + utf8BytesLeft++; + if ((value & 0x08) == 0) { + utf4BytesChars++; + } else { + canBeUTF8 = false; + } + } + } + } + } + } + // Shift_JIS stuff + if (canBeShiftJIS) { + if (sjisBytesLeft > 0) { + if (value < 0x40 || value == 0x7F || value > 0xFC) { + canBeShiftJIS = false; + } else { + sjisBytesLeft--; + } + } else if (value == 0x80 || value == 0xA0 || value > 0xEF) { + canBeShiftJIS = false; + } else if (value > 0xA0 && value < 0xE0) { + sjisKatakanaChars++; + sjisCurDoubleBytesWordLength = 0; + sjisCurKatakanaWordLength++; + if (sjisCurKatakanaWordLength > sjisMaxKatakanaWordLength) { + sjisMaxKatakanaWordLength = sjisCurKatakanaWordLength; + } + } else if (value > 0x7F) { + sjisBytesLeft++; + // sjisDoubleBytesChars++; + sjisCurKatakanaWordLength = 0; + sjisCurDoubleBytesWordLength++; + if (sjisCurDoubleBytesWordLength > sjisMaxDoubleBytesWordLength) { + sjisMaxDoubleBytesWordLength = sjisCurDoubleBytesWordLength; + } + } else { + // sjisLowChars++; + sjisCurKatakanaWordLength = 0; + sjisCurDoubleBytesWordLength = 0; + } + } + + // ISO-8859-1 stuff + if (canBeISO88591) { + if (value > 0x7F && value < 0xA0) { + canBeISO88591 = false; + } else if (value > 0x9F) { + if (value < 0xC0 || value == 0xD7 || value == 0xF7) { + isoHighOther++; + } + } + } + } + + // Get how many chinese sc & tw words + gb2312SCByteChars = is_gb2312_code(bytes, length); + big5TWBytesChars = is_big5_code(bytes, length); + + if (gb2312SCByteChars <= 0) { + canBeGB2312 = false; + } + + if (big5TWBytesChars <= 0) { + canBeBIG5 = false; + } + + if (!is_gbk_code(bytes, length)) { + canBeGBK = false; + } + + if (canBeUTF8 && utf8BytesLeft > 0) { + canBeUTF8 = false; + } + if (canBeShiftJIS && sjisBytesLeft > 0) { + canBeShiftJIS = false; + } + + if (is_ascii_code(bytes, length) <= 0) { + canBeASCII = false; + } + + // Easy -- if there is BOM or at least 1 valid not-single byte character + // (and no evidence it can't be UTF-8), done + if (canBeUTF8 && (utf8bom || utf2BytesChars + utf3BytesChars + utf4BytesChars > 0)) { + return UTF8; + } + + // if ( canBeBIG5 == false && canBeGB2312 == false ) + int chineseWordLen = + gb2312SCByteChars > big5TWBytesChars ? gb2312SCByteChars : big5TWBytesChars; + int chineseByteLen = chineseWordLen * 2; + int japaneseByteLen = sjisMaxKatakanaWordLength + sjisMaxDoubleBytesWordLength * 2; + + // if ( chineseByteLen < japaneseByteLen || (japaneseByteLen == 0 && + // chineseByteLen == 0) ) if ( (gb2312SCByteChars < sjisKatakanaChars) && + // (big5TWBytesChars < sjisKatakanaChars) ) + //{ + // Easy -- if assuming Shift_JIS or at least 3 valid consecutive not-ascii + // characters (and no evidence it can't be), done + if (canBeShiftJIS && + (ASSUME_SHIFT_JIS || sjisMaxKatakanaWordLength >= 3 || sjisMaxDoubleBytesWordLength >= 3)) { + // return SHIFT_JIS; + if (chineseByteLen <= japaneseByteLen) { + if (chineseByteLen == japaneseByteLen) { + if (chineseWordLen < sjisKatakanaChars) { + return SHIFT_JIS; + } + } else { + return SHIFT_JIS; + } + } + } + + // Distinguishing Shift_JIS and ISO-8859-1 can be a little tough for short + // words. The crude heuristic is: + // - If we saw + // - only two consecutive katakana chars in the whole text, or + // - at least 10% of bytes that could be "upper" not-alphanumeric Latin1, + // - then we conclude Shift_JIS, else ISO-8859-1 + if (canBeISO88591 && canBeShiftJIS) { + if ((sjisMaxKatakanaWordLength == 2 && sjisKatakanaChars == 2) || + isoHighOther * 10 >= length) { + /* + if ( chineseByteLen < japaneseByteLen ) + { + return SHIFT_JIS; + } + */ + if (chineseByteLen <= japaneseByteLen) { + if (chineseByteLen == japaneseByteLen) { + if (chineseWordLen < sjisKatakanaChars) { + return SHIFT_JIS; + } + } else { + return SHIFT_JIS; + } + } + } else { + if (chineseByteLen <= 0 && !canBeGB2312 && !canBeBIG5) { + return ISO88591; + } + } + } + //} + + // Otherwise, try in order ISO-8859-1, Shift JIS, UTF-8 and fall back to + // default platform encoding + if (canBeGB2312) { + return GB2312; + } + + if (canBeBIG5) { + return BIG5; + } + + if (canBeShiftJIS) { + return SHIFT_JIS; + } + + if (canBeGBK) { + return GBK; + } + + if (canBeISO88591) { + return ISO88591; + } + + if (canBeUTF8) { + return UTF8; + } + + if (canBeASCII) { + return ASCII; + } + + // Otherwise, we take a wild guess with platform encoding + return PLATFORM_DEFAULT_ENCODING; +} + +// judge the byte whether begin with binary 10 +int StringUtils::is_utf8_special_byte(unsigned char c) { + unsigned char special_byte = 0X02; // binary 00000010 + if (c >> 6 == special_byte) { + return 1; + } else { + return 0; + } +} + +int StringUtils::is_utf8_code(char* str, int length) { + unsigned char one_byte = 0X00; // binary 00000000 + unsigned char two_byte = 0X06; // binary 00000110 + unsigned char three_byte = 0X0E; // binary 00001110 + unsigned char four_byte = 0X1E; // binary 00011110 + unsigned char five_byte = 0X3E; // binary 00111110 + unsigned char six_byte = 0X7E; // binary 01111110 + + int utf8_yes = 0; + int utf8_no = 0; + + unsigned char k = 0; + unsigned char m = 0; + unsigned char n = 0; + unsigned char p = 0; + unsigned char q = 0; + + unsigned char c = 0; + for (int i = 0; i < length;) { + c = (unsigned char)str[i]; + if (c >> 7 == one_byte) { + i++; + continue; + } else if (c >> 5 == two_byte) { + k = (unsigned char)str[i + 1]; + if (is_utf8_special_byte(k)) { + utf8_yes++; + i += 2; + continue; + } + } else if (c >> 4 == three_byte) { + m = (unsigned char)str[i + 1]; + n = (unsigned char)str[i + 2]; + if (is_utf8_special_byte(m) && is_utf8_special_byte(n)) { + utf8_yes++; + i += 3; + continue; + } + } else if (c >> 3 == four_byte) { + k = (unsigned char)str[i + 1]; + m = (unsigned char)str[i + 2]; + n = (unsigned char)str[i + 3]; + if (is_utf8_special_byte(k) && is_utf8_special_byte(m) && is_utf8_special_byte(n)) { + utf8_yes++; + i += 4; + continue; + } + } else if (c >> 2 == five_byte) { + k = (unsigned char)str[i + 1]; + m = (unsigned char)str[i + 2]; + n = (unsigned char)str[i + 3]; + p = (unsigned char)str[i + 4]; + if (is_utf8_special_byte(k) && is_utf8_special_byte(m) && is_utf8_special_byte(n) && + is_utf8_special_byte(p)) { + utf8_yes++; + i += 5; + continue; + } + } else if (c >> 1 == six_byte) { + k = (unsigned char)str[i + 1]; + m = (unsigned char)str[i + 2]; + n = (unsigned char)str[i + 3]; + p = (unsigned char)str[i + 4]; + q = (unsigned char)str[i + 5]; + if (is_utf8_special_byte(k) && is_utf8_special_byte(m) && is_utf8_special_byte(n) && + is_utf8_special_byte(p) && is_utf8_special_byte(q)) { + utf8_yes++; + i += 6; + continue; + } + } + + utf8_no++; + i++; + } + + // printf("uft8_yes: %d utf8_no:%d\n", utf8_yes, utf8_no); + if ((utf8_yes + utf8_no) != 0) { + int ret = (100 * utf8_yes) / (utf8_yes + utf8_no); + if (ret > 90) { + return 1; + } else { + return 0; + } + } + return 0; +} +int StringUtils::is_gb2312_code(char* str, int length) { + unsigned char one_byte = 0X00; // binary 00000000 + + int gb2312_yes = 0; + int gb2312_no = 0; + + unsigned char k = 0; + + unsigned char c = 0; + for (int i = 0; i < length;) { + c = (unsigned char)str[i]; + if (c >> 7 == one_byte) { + i++; + continue; + } else if (c >= 0XA1 && c <= 0XF7) { + k = (unsigned char)str[i + 1]; + if (k >= 0XA1 && k <= 0XFE) { + gb2312_yes++; + i += 2; + continue; + } + } + + gb2312_no++; + i += 2; + } + + // printf("gb2312_yes: %d gb2312_no:%d\n", gb2312_yes, gb2312_no); + if ((gb2312_yes + gb2312_no) > 0) { + int ret = (100 * gb2312_yes) / (gb2312_yes + gb2312_no); + if (ret == 100) { + // if (ret > 90) { + // gb2312SCByteChars = gb2312_yes; + return gb2312_yes; + } else { + return 0; + } + } + return 0; +} + +int StringUtils::is_big5_code(char* str, int length) { + unsigned char one_byte = 0X00; // binary 00000000 + + int big5_yes = 0; + int big5_no = 0; + + unsigned char k = 0; + + unsigned char c = 0; + for (int i = 0; i < length;) { + c = (unsigned char)str[i]; + if (c >> 7 == one_byte) { + i++; + continue; + } else if (c >= 0XA1 && c <= 0XF9) { + k = (unsigned char)str[i + 1]; + if ((k >= 0X40 && k <= 0X7E) || (k >= 0XA1 && k <= 0XFE)) { + big5_yes++; + i += 2; + continue; + } + } + + big5_no++; + i += 2; + } + + // printf("%d %d\n", big5_yes, big5_no); + if ((big5_yes + big5_no) > 0) { + int ret = (100 * big5_yes) / (big5_yes + big5_no); + if (ret == 100) { + // if (ret > 90) { + // big5TWBytesChars = big5_yes; + return big5_yes; + } else { + return 0; + } + } + return 0; +} + +int StringUtils::is_gbk_code(char* str, int length) { + unsigned char one_byte = 0X00; // binary 00000000 + + int gbk_yes = 0; + int gbk_no = 0; + + unsigned char k = 0; + + unsigned char c = 0; + for (int i = 0; i < length;) { + c = (unsigned char)str[i]; + if (c >> 7 == one_byte) { + i++; + continue; + } else if (c >= 0X81 && c <= 0XFE) { + k = (unsigned char)str[i + 1]; + if (k >= 0X40 && k <= 0XFE) { + gbk_yes++; + i += 2; + continue; + } + } + + gbk_no++; + i += 2; + } + + // printf("gbk_yes: %d gbk_no:%d\n", gbk_yes, gbk_no); + if ((gbk_yes + gbk_no) > 0) { + int ret = (100 * gbk_yes) / (gbk_yes + gbk_no); + if (ret == 100) { + // if (ret > 90) { + return 1; + } else { + return 0; + } + } + return 0; +} + +int StringUtils::is_ascii_code(char* str, int length) { + unsigned char c = 0; + + bool isASCII = true; + + for (int i = 0; i < length; i++) { + c = (unsigned char)str[i]; + + if ((c > 127)) { + isASCII = false; + } + } + return (isASCII ? 1 : -1); +} + +//#define DEBUG + +int StringUtils::shift_jis_to_jis(const unsigned char* may_be_shift_jis, int* jis_first_ptr, + int* jis_second_ptr) { + int status = 0; + unsigned char first = may_be_shift_jis[0]; + unsigned char second = may_be_shift_jis[1]; + int jis_first = 0; + int jis_second = 0; + /* Check first byte is valid shift JIS. */ + if ((first >= 0x81 && first <= 0x84) || (first >= 0x87 && first <= 0x9f)) { + jis_first = 2 * (first - 0x70) - 1; + if (second >= 0x40 && second <= 0x9e) { + jis_second = second - 31; + if (jis_second > 95) { + jis_second -= 1; + } + + status = 1; + } else if (second >= 0x9f && second <= 0xfc) { + jis_second = second - 126; + jis_first += 1; + status = 1; + } else { + } + } else if (first >= 0xe0 && first <= 0xef) { + jis_first = 2 * (first - 0xb0) - 1; + if (second >= 0x40 && second <= 0x9e) { + jis_second = second - 31; + if (jis_second > 95) { + jis_second -= 1; + } + status = 1; + } else if (second >= 0x9f && second <= 0xfc) { + jis_second = second - 126; + jis_first += 1; + status = 1; + } + } else { + } + *jis_first_ptr = jis_first; + *jis_second_ptr = jis_second; + return status; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/stringutils.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/stringutils.hpp new file mode 100644 index 00000000..cf58cbd4 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/stringutils.hpp @@ -0,0 +1,65 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_COMMON_STRINGUTILS_HPP__ +#define __ZXING_COMMON_STRINGUTILS_HPP__ + +#include "../decodehints.hpp" +#include "../zxing.hpp" + +#include +#include + +namespace zxing { +namespace common { +class StringUtils; +} +} // namespace zxing +using namespace std; + +class zxing::common::StringUtils { +private: + static char const* const PLATFORM_DEFAULT_ENCODING; + +public: + static char const* const ASCII; + static char const* const SHIFT_JIS; + static char const* const GB2312; + static char const* const EUC_JP; + static char const* const UTF8; + static char const* const ISO88591; + static char const* const GBK; + static char const* const GB18030; + static char const* const BIG5; + + static const bool ASSUME_SHIFT_JIS; + + static std::string guessEncoding(char* bytes, int length); + static std::string guessEncodingZXing(char* bytes, int length); + +#ifdef USE_UCHARDET + static std::string guessEncodingUCharDet(char* bytes, int length); +#endif + + static int is_utf8_special_byte(unsigned char c); + // static int is_utf8_code(const string& str); + static int is_utf8_code(char* str, int length); + static int is_gb2312_code(char* str, int length); + static int is_big5_code(char* str, int length); + static int is_gbk_code(char* str, int length); + static int is_ascii_code(char* str, int length); + static int shift_jis_to_jis(const unsigned char* may_be_shift_jis, int* jis_first_ptr, + int* jis_second_ptr); + + static std::string convertString(const char* rawData, int length, const char* fromCharset, + const char* toCharset); +}; + +#endif // __ZXING_COMMON_STRINGUTILS_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/unicomblock.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/unicomblock.cpp new file mode 100644 index 00000000..652458e9 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/unicomblock.cpp @@ -0,0 +1,127 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../../precomp.hpp" +#include "unicomblock.hpp" + +namespace zxing { +short UnicomBlock::SEARCH_POS[4][2] = {{1, 0}, {-1, 0}, {0, 1}, {0, -1}}; +UnicomBlock::UnicomBlock(int iMaxHeight, int iMaxWidth) + : m_iHeight(iMaxHeight), m_iWidth(iMaxWidth), m_bInit(false) {} + +UnicomBlock::~UnicomBlock() {} + +void UnicomBlock::Init() { + if (m_bInit) return; + m_vcIndex = std::vector(m_iHeight * m_iWidth, 0); + m_vcCount = std::vector(m_iHeight * m_iWidth, 0); + m_vcMinPnt = std::vector(m_iHeight * m_iWidth, 0); + m_vcMaxPnt = std::vector(m_iHeight * m_iWidth, 0); + m_vcQueue = std::vector(m_iHeight * m_iWidth, 0); + m_bInit = true; +} + +void UnicomBlock::Reset(Ref poImage) { + m_poImage = poImage; + memset(&m_vcIndex[0], 0, m_vcIndex.size() * sizeof(short)); + m_iNowIdx = 0; +} + +unsigned short UnicomBlock::GetUnicomBlockIndex(int y, int x) { + if (y >= m_iHeight || x >= m_iWidth) return 0; + if (m_vcIndex[y * m_iWidth + x]) return m_vcIndex[y * m_iWidth + x]; + Bfs(y, x); + return m_vcIndex[y * m_iWidth + x]; +} + +int UnicomBlock::GetUnicomBlockSize(int y, int x) { + if (y >= m_iHeight || x >= m_iWidth) return 0; + if (m_vcIndex[y * m_iWidth + x]) return m_vcCount[y * m_iWidth + x]; + Bfs(y, x); + return m_vcCount[y * m_iWidth + x]; +} + +int UnicomBlock::GetMinPoint(int y, int x, int &iMinY, int &iMinX) { + if (y >= m_iHeight || x >= m_iWidth) return -1; + if (m_vcIndex[y * m_iWidth + x]) { + iMinY = m_vcMinPnt[y * m_iWidth + x] >> 16; + iMinX = m_vcMinPnt[y * m_iWidth + x] & (0xFFFF); + return 0; + } + Bfs(y, x); + iMinY = m_vcMinPnt[y * m_iWidth + x] >> 16; + iMinX = m_vcMinPnt[y * m_iWidth + x] & (0xFFFF); + return 0; +} + +int UnicomBlock::GetMaxPoint(int y, int x, int &iMaxY, int &iMaxX) { + if (y >= m_iHeight || x >= m_iWidth) return -1; + if (m_vcIndex[y * m_iWidth + x]) { + iMaxY = m_vcMaxPnt[y * m_iWidth + x] >> 16; + iMaxX = m_vcMaxPnt[y * m_iWidth + x] & (0xFFFF); + return 0; + } + Bfs(y, x); + iMaxY = m_vcMaxPnt[y * m_iWidth + x] >> 16; + iMaxX = m_vcMaxPnt[y * m_iWidth + x] & (0xFFFF); + return 0; +} + +void UnicomBlock::Bfs(int y, int x) { + m_iNowIdx++; + + int iFront = 0; + int iTail = 0; + int iCount = 1; + + int iMaxX = x, iMaxY = y; + int iMinX = x, iMinY = y; + + const bool bValue = (m_poImage->get(x, y) != (unsigned char)0); + + m_vcIndex[y * m_iWidth + x] = m_iNowIdx; + m_vcQueue[iTail++] = y << 16 | x; + + while (iFront < iTail) { + int iNode = m_vcQueue[iFront++]; + int iX = iNode & (0xFFFF); + int iY = iNode >> 16; + iMaxX = max(iX, iMaxX); + iMaxY = max(iY, iMaxY); + iMinX = min(iX, iMinX); + iMinY = min(iY, iMinY); + + iCount++; + + for (int i = 0; i < 4; ++i) { + const int iNextX = iX + SEARCH_POS[i][0], iNextY = iY + SEARCH_POS[i][1]; + const int iPosition = iNextY * m_iWidth + iNextX; + + if (iPosition >= 0 && iPosition < int(m_vcIndex.size()) && 0 == m_vcIndex[iPosition]) { + if (iNextX < 0 || iNextX >= m_poImage->getWidth() || iNextY < 0 || + iNextY >= m_poImage->getHeight() || + bValue != (m_poImage->get(iNextX, iNextY) != (unsigned char)0)) + continue; + + m_vcIndex[iPosition] = m_iNowIdx; + m_vcQueue[iTail++] = iNextY << 16 | iNextX; + } + } + } + + if (iCount >= (1 << 16) - 1) iCount = 0xFFFF; + + const int iMinCombine = iMinY << 16 | iMinX; + const int iMaxCombine = iMaxY << 16 | iMaxX; + for (int i = 0; i < iTail; ++i) { + const int iPosition = (m_vcQueue[i] >> 16) * m_iWidth + (m_vcQueue[i] & (0xFFFF)); + + m_vcCount[iPosition] = iCount; + m_vcMinPnt[iPosition] = iMinCombine; + m_vcMaxPnt[iPosition] = iMaxCombine; + } +} +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/unicomblock.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/unicomblock.hpp new file mode 100644 index 00000000..28eedbdd --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/common/unicomblock.hpp @@ -0,0 +1,47 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __ZXING_COMMON_UNICOMBLOCK_HPP__ +#define __ZXING_COMMON_UNICOMBLOCK_HPP__ +#include "bitmatrix.hpp" +#include "counted.hpp" + +namespace zxing { +class UnicomBlock : public Counted { +public: + UnicomBlock(int iMaxHeight, int iMaxWidth); + ~UnicomBlock(); + + void Init(); + void Reset(Ref poImage); + + unsigned short GetUnicomBlockIndex(int y, int x); + + int GetUnicomBlockSize(int y, int x); + + int GetMinPoint(int y, int x, int &iMinY, int &iMinX); + int GetMaxPoint(int y, int x, int &iMaxY, int &iMaxX); + +private: + void Bfs(int y, int x); + + int m_iHeight; + int m_iWidth; + + unsigned int m_iNowIdx; + bool m_bInit; + std::vector m_vcIndex; + std::vector m_vcCount; + std::vector m_vcMinPnt; + std::vector m_vcMaxPnt; + std::vector m_vcQueue; + static short SEARCH_POS[4][2]; + + Ref m_poImage; +}; +} // namespace zxing +#endif // __ZXING_COMMON_UNICOMBLOCK_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/decodehints.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/decodehints.hpp new file mode 100644 index 00000000..ed1830b5 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/decodehints.hpp @@ -0,0 +1,30 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_DECODEHINTS_HPP__ +#define __ZXING_DECODEHINTS_HPP__ + +#include "errorhandler.hpp" + +namespace zxing { +class DecodeHints { +private: + bool use_nn_detector_; + +public: + explicit DecodeHints(bool use_nn_detector = false) : use_nn_detector_(use_nn_detector){}; + + bool getUseNNDetector() const { return use_nn_detector_; } + void setUseNNDetector(bool use_nn_detector) { use_nn_detector_ = use_nn_detector; } +}; + +} // namespace zxing + +#endif // __ZXING_DECODEHINTS_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/errorhandler.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/errorhandler.cpp new file mode 100644 index 00000000..c8604ab6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/errorhandler.cpp @@ -0,0 +1,49 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +#include "../precomp.hpp" +#include "errorhandler.hpp" + +namespace zxing { + +ErrorHandler::ErrorHandler() : err_code_(0), err_msg_("") { Init(); } + +ErrorHandler::ErrorHandler(const char* err_msg) : err_code_(-1), err_msg_(err_msg) { Init(); } + +ErrorHandler::ErrorHandler(std::string& err_msg) : err_code_(-1), err_msg_(err_msg) { Init(); } + +ErrorHandler::ErrorHandler(int err_code) : err_code_(err_code), err_msg_("error") { Init(); } + +ErrorHandler::ErrorHandler(int err_code, const char* err_msg) + : err_code_(err_code), err_msg_(err_msg) { + Init(); +} + +ErrorHandler::ErrorHandler(const ErrorHandler& other) { + err_code_ = other.ErrCode(); + err_msg_.assign(other.ErrMsg()); + Init(); +} + +ErrorHandler& ErrorHandler::operator=(const ErrorHandler& other) { + err_code_ = other.ErrCode(); + err_msg_.assign(other.ErrMsg()); + Init(); + return *this; +} + +void ErrorHandler::Init() { handler_type_ = KErrorHandler; } + +void ErrorHandler::Reset() { + err_code_ = 0; + err_msg_.assign(""); +} + +void ErrorHandler::PrintInfo() { + printf("handler_tpye %d, error code %d, errmsg %s\n", handler_type_, err_code_, + err_msg_.c_str()); +} +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/errorhandler.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/errorhandler.hpp new file mode 100644 index 00000000..486786b0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/errorhandler.hpp @@ -0,0 +1,89 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __ZXING_ERRORHANDLER_HPP__ +#define __ZXING_ERRORHANDLER_HPP__ + +#include + +namespace zxing { + +enum { + KErrorHandler = 0, + KErrorHandler_NotFound = 1, + KErrorHandler_CheckSum = 2, + KErrorHandler_Reader = 3, + KErrorHandler_IllegalArgument = 4, + KErrorHandler_ReedSolomon = 5, + KErrorHandler_Format = 6, + KErrorHandler_Detector = 7, + KErrorHandler_IllegalState = 8, +}; + +class ErrorHandler { +public: + ErrorHandler(); + explicit ErrorHandler(std::string& err_msg); + explicit ErrorHandler(const char* err_msg); + explicit ErrorHandler(int err_code); + ErrorHandler(int err_code, std::string& err_msg); + ErrorHandler(int err_code, const char* err_msg); + + virtual ~ErrorHandler(){}; + + virtual inline int ErrCode() const { return err_code_; } + virtual inline const std::string& ErrMsg() const { return err_msg_; } + virtual inline int HandlerType() const { return handler_type_; } + + virtual void Init(); + ErrorHandler(const ErrorHandler& other); + ErrorHandler& operator=(const ErrorHandler& other); + + virtual void PrintInfo(); + virtual void Reset(); + +protected: + int handler_type_; + +private: + int err_code_; + std::string err_msg_; +}; + +#define DECLARE_ERROR_HANDLER(__HANDLER__) \ + class __HANDLER__##ErrorHandler : public ErrorHandler { \ + public: \ + __HANDLER__##ErrorHandler() : ErrorHandler() { Init(); }; \ + __HANDLER__##ErrorHandler(std::string& err_msg) : ErrorHandler(err_msg) { Init(); }; \ + __HANDLER__##ErrorHandler(const char* err_msg) : ErrorHandler(err_msg) { Init(); }; \ + __HANDLER__##ErrorHandler(int err_code) : ErrorHandler(err_code) { Init(); }; \ + __HANDLER__##ErrorHandler(int err_code, std::string& err_msg) \ + : ErrorHandler(err_code, err_msg) { \ + Init(); \ + }; \ + __HANDLER__##ErrorHandler(int err_code, const char* err_msg) \ + : ErrorHandler(err_code, err_msg) { \ + Init(); \ + }; \ + __HANDLER__##ErrorHandler(const ErrorHandler& other) : ErrorHandler(other) { Init(); }; \ + void Init() override { handler_type_ = KErrorHandler_##__HANDLER__; } \ + }; + +DECLARE_ERROR_HANDLER(Reader) +DECLARE_ERROR_HANDLER(IllegalArgument) +DECLARE_ERROR_HANDLER(ReedSolomon) +DECLARE_ERROR_HANDLER(Format) +DECLARE_ERROR_HANDLER(Detector) +DECLARE_ERROR_HANDLER(NotFound) +DECLARE_ERROR_HANDLER(CheckSum) +DECLARE_ERROR_HANDLER(IllegalState) + +#undef DECLARE_ERROR_HANDLER + +} // namespace zxing + +#endif // __ZXING_ERRORHANDLER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/luminance_source.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/luminance_source.cpp new file mode 100644 index 00000000..bb0b41b5 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/luminance_source.cpp @@ -0,0 +1,59 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../precomp.hpp" +#include "luminance_source.hpp" +#include + +using zxing::LuminanceSource; +using zxing::Ref; + +LuminanceSource::LuminanceSource(int width, int height) + : width_(width), height_(height) {} + +LuminanceSource::~LuminanceSource() {} + +bool LuminanceSource::isCropSupported() const { return false; } + +Ref LuminanceSource::crop(int, int, int, int, zxing::ErrorHandler&) const { + return Ref(); +} + +bool LuminanceSource::isRotateSupported() const { return false; } + +Ref LuminanceSource::rotateCounterClockwise(zxing::ErrorHandler&) const { + return Ref(); +} + +LuminanceSource::operator std::string() const { + ArrayRef row; + std::ostringstream oss; + zxing::ErrorHandler err_handler; + for (int y = 0; y < getHeight(); y++) { + err_handler.Reset(); + row = getRow(y, row, err_handler); + if (err_handler.ErrCode()) continue; + for (int x = 0; x < getWidth(); x++) { + int luminance = row[x] & 0xFF; + char c; + if (luminance < 0x40) { + c = '#'; + } else if (luminance < 0x80) { + c = '+'; + } else if (luminance < 0xC0) { + c = '.'; + } else { + c = ' '; + } + oss << c; + } + oss << '\n'; + } + return oss.str(); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/luminance_source.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/luminance_source.hpp new file mode 100644 index 00000000..e3435723 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/luminance_source.hpp @@ -0,0 +1,57 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_LUMINANCE_SOURCE_HPP__ +#define __ZXING_LUMINANCE_SOURCE_HPP__ + +#include +#include "common/array.hpp" +#include "common/bytematrix.hpp" +#include "common/counted.hpp" +#include "errorhandler.hpp" + +namespace zxing { + +class LuminanceSource : public Counted { +protected: + int width_; + int height_; + +public: + LuminanceSource(int width, int height); + virtual ~LuminanceSource(); + + int getWidth() const { return width_; } + int getHeight() const { return height_; } + void setWidth(int w) { width_ = w; } + void setHeight(int h) { height_ = h; } + void filter(); + + // Callers take ownership of the returned memory and must call delete [] on + // it themselves. + virtual ArrayRef getRow(int y, ArrayRef row, + zxing::ErrorHandler& err_handler) const = 0; + virtual ArrayRef getMatrix() const = 0; + virtual Ref getByteMatrix() const = 0; + + virtual bool isCropSupported() const; + virtual Ref crop(int left, int top, int width, int height, + zxing::ErrorHandler& err_handler) const; + + virtual bool isRotateSupported() const; + + virtual Ref rotateCounterClockwise(zxing::ErrorHandler& err_handler) const; + + operator std::string() const; +}; + +} // namespace zxing + +#endif // __ZXING_LUMINANCE_SOURCE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/bitmatrixparser.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/bitmatrixparser.cpp new file mode 100644 index 00000000..6d426d07 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/bitmatrixparser.cpp @@ -0,0 +1,240 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "bitmatrixparser.hpp" +#include "datamask.hpp" + +using zxing::ErrorHandler; + +namespace zxing { +namespace qrcode { + +int BitMatrixParser::copyBit(size_t x, size_t y, int versionBits) { + bool bit = ((mirror_ ? bitMatrix_->get(y, x) : bitMatrix_->get(x, y)) != (unsigned char)0); + return bit ? (versionBits << 1) | 0x1 : versionBits << 1; +} + +BitMatrixParser::BitMatrixParser(Ref bitMatrix, ErrorHandler &err_handler) + : bitMatrix_(bitMatrix), parsedVersion_(0), parsedFormatInfo_() { + mirror_ = false; + size_t dimension = bitMatrix->getHeight(); + + if ((dimension < 21) || (dimension & 0x03) != 1) { + err_handler = zxing::ReaderErrorHandler("Dimension must be 1 mod 4 and >= 21"); + return; + } +} + +Ref BitMatrixParser::readFormatInformation(ErrorHandler &err_handler) { + if (parsedFormatInfo_ != 0) { + return parsedFormatInfo_; + } + + // Read top-left format info bits + int formatInfoBits1 = 0; + for (int i = 0; i < 6; i++) { + formatInfoBits1 = copyBit(i, 8, formatInfoBits1); + } + // .. and skip a bit in the timing pattern ... + formatInfoBits1 = copyBit(7, 8, formatInfoBits1); + formatInfoBits1 = copyBit(8, 8, formatInfoBits1); + formatInfoBits1 = copyBit(8, 7, formatInfoBits1); + // .. and skip a bit in the timing pattern ... + for (int j = 5; j >= 0; j--) { + formatInfoBits1 = copyBit(8, j, formatInfoBits1); + } + + // Read the top-right/bottom-left pattern + int dimension = bitMatrix_->getHeight(); + int formatInfoBits2 = 0; + int jMin = dimension - 7; + for (int j = dimension - 1; j >= jMin; j--) { + formatInfoBits2 = copyBit(8, j, formatInfoBits2); + } + for (int i = dimension - 8; i < dimension; i++) { + formatInfoBits2 = copyBit(i, 8, formatInfoBits2); + } + + parsedFormatInfo_ = + FormatInformation::decodeFormatInformation(formatInfoBits1, formatInfoBits2); + if (parsedFormatInfo_ != 0) { + return parsedFormatInfo_; + } + err_handler = zxing::ReaderErrorHandler("Could not decode format information"); + return Ref(); +} + +Version *BitMatrixParser::readVersion(ErrorHandler &err_handler) { + if (parsedVersion_ != 0) { + return parsedVersion_; + } + + int dimension = bitMatrix_->getHeight(); + + int provisionalVersion = (dimension - 17) >> 2; + if (provisionalVersion <= 6) { + Version *version = Version::getVersionForNumber(provisionalVersion, err_handler); + if (err_handler.ErrCode()) return NULL; + return version; + } + + // Read top-right version info: 3 wide by 6 tall + int versionBits = 0; + for (int y = 5; y >= 0; y--) { + int xMin = dimension - 11; + for (int x = dimension - 9; x >= xMin; x--) { + versionBits = copyBit(x, y, versionBits); + } + } + + parsedVersion_ = Version::decodeVersionInformation(versionBits); + if (parsedVersion_ != 0 && parsedVersion_->getDimensionForVersion(err_handler) == dimension) { + return parsedVersion_; + } + + // Hmm, failed. Try bottom left: 6 wide by 3 tall + versionBits = 0; + for (int x = 5; x >= 0; x--) { + int yMin = dimension - 11; + for (int y = dimension - 9; y >= yMin; y--) { + versionBits = copyBit(x, y, versionBits); + } + } + + parsedVersion_ = Version::decodeVersionInformation(versionBits); + if (parsedVersion_ == NULL) { + err_handler = zxing::ReaderErrorHandler("Could not decode version"); + return NULL; + } + + if (parsedVersion_ != 0 && parsedVersion_->getDimensionForVersion(err_handler) == dimension) { + return parsedVersion_; + } + + err_handler = zxing::ReaderErrorHandler("Could not decode version"); + return NULL; +} + +/** + *

Reads the bits in the {@link BitMatrix} representing the finder pattern in + * the correct order in order to reconstruct the codewords bytes contained + * within the QR Code.

+ * + * @return bytes encoded within the QR Code + */ +ArrayRef BitMatrixParser::readCodewords(ErrorHandler &err_handler) { + Ref formatInfo = readFormatInformation(err_handler); + if (err_handler.ErrCode()) return ArrayRef(); + + Version *version = readVersion(err_handler); + if (err_handler.ErrCode()) return ArrayRef(); + + DataMask &dataMask = DataMask::forReference((int)formatInfo->getDataMask(), err_handler); + if (err_handler.ErrCode()) return ArrayRef(); + // cout << (int)formatInfo->getDataMask() << endl; + int dimension = bitMatrix_->getHeight(); + + dataMask.unmaskBitMatrix(*bitMatrix_, dimension); + + // cerr << *bitMatrix_ << endl; + // cerr << version->getTotalCodewords() << endl; + + Ref functionPattern = version->buildFunctionPattern(err_handler); + if (err_handler.ErrCode()) return ArrayRef(); + + // cout << *functionPattern << endl; + + bool readingUp = true; + ArrayRef result(version->getTotalCodewords()); + int resultOffset = 0; + int currentByte = 0; + int bitsRead = 0; + // Read columns in pairs, from right to left + for (int x = dimension - 1; x > 0; x -= 2) { + if (x == 6) { + // Skip whole column with vertical alignment pattern; + // saves time and makes the other code proceed more cleanly + x--; + } + // Read alternatingly from bottom to top then top to bottom + for (int counter = 0; counter < dimension; counter++) { + int y = readingUp ? dimension - 1 - counter : counter; + for (int col = 0; col < 2; col++) { + // Ignore bits covered by the function pattern + if (!functionPattern->get(x - col, y)) { + // Read a bit + bitsRead++; + currentByte <<= 1; + if (bitMatrix_->get(x - col, y)) { + currentByte |= 1; + } + // If we've made a whole byte, save it off + if (bitsRead == 8) { + result[resultOffset++] = (char)currentByte; + bitsRead = 0; + currentByte = 0; + } + } + } + } + readingUp = !readingUp; // switch directions + } + + if (resultOffset != version->getTotalCodewords()) { + err_handler = zxing::ReaderErrorHandler("Did not read all codewords"); + return ArrayRef(); + } + + return result; +} + +/** + * Revert the mask removal done while reading the code words. The bit matrix + * should revert to its original state. + */ +void BitMatrixParser::remask() { + if (parsedFormatInfo_ == NULL) { + return; // We have no format information, and have no data mask + } + ErrorHandler err_handler; + DataMask &dataMask = DataMask::forReference(parsedFormatInfo_->getDataMask(), err_handler); + if (err_handler.ErrCode()) return; + int dimension = bitMatrix_->getHeight(); + dataMask.unmaskBitMatrix(*bitMatrix_, dimension); +} + +/** + * Prepare the parser for a mirrored operation. + * This flag has effect only on the {@link #readFormatInformation()} and the + * {@link #readVersion()}. Before proceeding with {@link #readCodewords()} the + * {@link #mirror()} method should be called. + * + * @param mirror Whether to read version and format information mirrored. + */ +void BitMatrixParser::setMirror(bool mirror) { + parsedVersion_ = NULL; + parsedFormatInfo_ = NULL; + mirror_ = mirror; +} + +/** Mirror the bit matrix in order to attempt a second reading. */ +void BitMatrixParser::mirror() { + for (int x = 0; x < bitMatrix_->getWidth(); x++) { + for (int y = x + 1; y < bitMatrix_->getHeight(); y++) { + if (bitMatrix_->get(x, y) != bitMatrix_->get(y, x)) { + bitMatrix_->flip(y, x); + bitMatrix_->flip(x, y); + } + } + } +} + +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/bitmatrixparser.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/bitmatrixparser.hpp new file mode 100644 index 00000000..ff5beec1 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/bitmatrixparser.hpp @@ -0,0 +1,53 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_BITMATRIXPARSER_HPP__ +#define __ZXING_QRCODE_DECODER_BITMATRIXPARSER_HPP__ + +#include "../../common/array.hpp" +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../errorhandler.hpp" +#include "../format_information.hpp" +#include "../version.hpp" + +namespace zxing { +namespace qrcode { + +class BitMatrixParser : public Counted { +private: + Ref bitMatrix_; + Version *parsedVersion_; + Ref parsedFormatInfo_; + bool mirror_; + + int copyBit(size_t x, size_t y, int versionBits); + +public: + BitMatrixParser(Ref bitMatrix, ErrorHandler &err_handler); + Ref readFormatInformation(ErrorHandler &err_handler); + Version *readVersion(ErrorHandler &err_handler); + ArrayRef readCodewords(ErrorHandler &err_handler); + +public: + void remask(); + void setMirror(bool mirror); + void mirror(); + void mirrorH(); + +private: + BitMatrixParser(const BitMatrixParser &); + BitMatrixParser &operator=(const BitMatrixParser &); +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_BITMATRIXPARSER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datablock.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datablock.cpp new file mode 100644 index 00000000..da6afaa5 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datablock.cpp @@ -0,0 +1,103 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "datablock.hpp" +namespace zxing { +namespace qrcode { + +using zxing::ErrorHandler; + +DataBlock::DataBlock(int numDataCodewords, ArrayRef codewords) + : numDataCodewords_(numDataCodewords), codewords_(codewords) {} + +int DataBlock::getNumDataCodewords() { return numDataCodewords_; } + +ArrayRef DataBlock::getCodewords() { return codewords_; } + +std::vector > DataBlock::getDataBlocks(ArrayRef rawCodewords, Version *version, + ErrorCorrectionLevel &ecLevel, + ErrorHandler &err_handler) { + // Figure out the number and size of data blocks used by this version and + // error correction level + ECBlocks &ecBlocks = version->getECBlocksForLevel(ecLevel); + + // First count the total number of data blocks + int totalBlocks = 0; + vector ecBlockArray = ecBlocks.getECBlocks(); + for (size_t i = 0; i < ecBlockArray.size(); i++) { + totalBlocks += ecBlockArray[i]->getCount(); + } + + // Now establish DataBlocks of the appropriate size and number of data + // codewords + std::vector > result(totalBlocks); + int numResultBlocks = 0; + for (size_t j = 0; j < ecBlockArray.size(); j++) { + ECB *ecBlock = ecBlockArray[j]; + for (int i = 0; i < ecBlock->getCount(); i++) { + int numDataCodewords = ecBlock->getDataCodewords(); + int numBlockCodewords = ecBlocks.getECCodewords() + numDataCodewords; + ArrayRef buffer(numBlockCodewords); + Ref blockRef(new DataBlock(numDataCodewords, buffer)); + result[numResultBlocks++] = blockRef; + } + } + + // All blocks have the same amount of data, except that the last n + // (where n may be 0) have 1 more byte. Figure out where these start. + int shorterBlocksTotalCodewords = result[0]->codewords_->size(); + int longerBlocksStartAt = result.size() - 1; + while (longerBlocksStartAt >= 0) { + int numCodewords = result[longerBlocksStartAt]->codewords_->size(); + if (numCodewords == shorterBlocksTotalCodewords) { + break; + } + if (numCodewords != shorterBlocksTotalCodewords + 1) { + err_handler = + zxing::IllegalArgumentErrorHandler("Data block sizes differ by more than 1"); + return std::vector >(); + } + longerBlocksStartAt--; + } + longerBlocksStartAt++; + + int shorterBlocksNumDataCodewords = shorterBlocksTotalCodewords - ecBlocks.getECCodewords(); + // The last elements of result may be 1 element longer; + // first fill out as many elements as all of them have + int rawCodewordsOffset = 0; + for (int i = 0; i < shorterBlocksNumDataCodewords; i++) { + for (int j = 0; j < numResultBlocks; j++) { + result[j]->codewords_[i] = rawCodewords[rawCodewordsOffset++]; + } + } + // Fill out the last data block in the longer ones + for (int j = longerBlocksStartAt; j < numResultBlocks; j++) { + result[j]->codewords_[shorterBlocksNumDataCodewords] = rawCodewords[rawCodewordsOffset++]; + } + // Now add in error correction blocks + int max = result[0]->codewords_->size(); + for (int i = shorterBlocksNumDataCodewords; i < max; i++) { + for (int j = 0; j < numResultBlocks; j++) { + int iOffset = j < longerBlocksStartAt ? i : i + 1; + result[j]->codewords_[iOffset] = rawCodewords[rawCodewordsOffset++]; + } + } + + if (rawCodewordsOffset != rawCodewords->size()) { + err_handler = + zxing::IllegalArgumentErrorHandler("rawCodewordsOffset != rawCodewords.length"); + return std::vector >(); + } + + return result; +} + +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datablock.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datablock.hpp new file mode 100644 index 00000000..f95a29e4 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datablock.hpp @@ -0,0 +1,43 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_DATABLOCK_HPP__ +#define __ZXING_QRCODE_DECODER_DATABLOCK_HPP__ + +#include "../../common/array.hpp" +#include "../../common/counted.hpp" +#include "../../errorhandler.hpp" +#include "../error_correction_level.hpp" +#include "../version.hpp" + + +namespace zxing { +namespace qrcode { + +class DataBlock : public Counted { +private: + int numDataCodewords_; + ArrayRef codewords_; + + DataBlock(int numDataCodewords, ArrayRef codewords); + +public: + static std::vector > getDataBlocks(ArrayRef rawCodewords, Version *version, + ErrorCorrectionLevel &ecLevel, + ErrorHandler &err_handler); + + int getNumDataCodewords(); + ArrayRef getCodewords(); +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_DATABLOCK_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datamask.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datamask.cpp new file mode 100644 index 00000000..68ccb6ff --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datamask.cpp @@ -0,0 +1,120 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "datamask.hpp" +namespace zxing { +namespace qrcode { + +using zxing::ErrorHandler; + +DataMask::DataMask() {} + +DataMask::~DataMask() {} + +DataMask& DataMask::forReference(int reference, ErrorHandler& err_handler) { + if (reference < 0 || reference > 7) { + err_handler = zxing::IllegalArgumentErrorHandler("reference must be between 0 and 7"); + return *DATA_MASKS[0]; + } + return *DATA_MASKS[reference]; +} + +void DataMask::unmaskBitMatrix(BitMatrix& bits, size_t dimension) { + for (size_t y = 0; y < dimension; y++) { + for (size_t x = 0; x < dimension; x++) { + // TODO: check why the coordinates have to be swapped + if (isMasked(y, x)) { + bits.flip(x, y); + } + } + } +} + +/** + * 000: mask bits for which (x + y) mod 2 == 0 + */ +class DataMask000 : public DataMask { +public: + bool isMasked(size_t x, size_t y) override { return ((x + y) % 2) == 0; } +}; + +/** + * 001: mask bits for which x mod 2 == 0 + */ +class DataMask001 : public DataMask { +public: + bool isMasked(size_t x, size_t) override { return (x % 2) == 0; } +}; + +/** + * 010: mask bits for which y mod 3 == 0 + */ +class DataMask010 : public DataMask { +public: + bool isMasked(size_t, size_t y) override { return y % 3 == 0; } +}; + +/** + * 011: mask bits for which (x + y) mod 3 == 0 + */ +class DataMask011 : public DataMask { +public: + bool isMasked(size_t x, size_t y) override { return (x + y) % 3 == 0; } +}; + +/** + * 100: mask bits for which (x/2 + y/3) mod 2 == 0 + */ +class DataMask100 : public DataMask { +public: + bool isMasked(size_t x, size_t y) override { return (((x >> 1) + (y / 3)) % 2) == 0; } +}; + +/** + * 101: mask bits for which xy mod 2 + xy mod 3 == 0 + */ +class DataMask101 : public DataMask { +public: + bool isMasked(size_t x, size_t y) override { + size_t temp = x * y; + return (temp % 2) + (temp % 3) == 0; + } +}; + +/** + * 110: mask bits for which (xy mod 2 + xy mod 3) mod 2 == 0 + */ +class DataMask110 : public DataMask { +public: + bool isMasked(size_t x, size_t y) override { + size_t temp = x * y; + return (((temp % 2) + (temp % 3)) % 2) == 0; + } +}; + +/** + * 111: mask bits for which ((x+y)mod 2 + xy mod 3) mod 2 == 0 + */ +class DataMask111 : public DataMask { +public: + bool isMasked(size_t x, size_t y) override { + return ((((x + y) % 2) + ((x * y) % 3)) % 2) == 0; + } +}; + +vector > DataMask::DATA_MASKS = { + Ref(new DataMask000()), Ref(new DataMask001()), + Ref(new DataMask010()), Ref(new DataMask011()), + Ref(new DataMask100()), Ref(new DataMask101()), + Ref(new DataMask110()), Ref(new DataMask111()), +}; + +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datamask.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datamask.hpp new file mode 100644 index 00000000..c8f276fd --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/datamask.hpp @@ -0,0 +1,37 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_DATAMASK_HPP__ +#define __ZXING_QRCODE_DECODER_DATAMASK_HPP__ + +#include "../../common/array.hpp" +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../errorhandler.hpp" +namespace zxing { +namespace qrcode { + +class DataMask : public Counted { +private: + static std::vector > DATA_MASKS; + +protected: +public: + DataMask(); + virtual ~DataMask(); + void unmaskBitMatrix(BitMatrix& matrix, size_t dimension); + virtual bool isMasked(size_t x, size_t y) = 0; + static DataMask& forReference(int reference, ErrorHandler& err_handler); +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_DATAMASK_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoded_bit_stream_parser.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoded_bit_stream_parser.cpp new file mode 100644 index 00000000..05de793c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoded_bit_stream_parser.cpp @@ -0,0 +1,490 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "decoded_bit_stream_parser.hpp" +#include "../../common/stringutils.hpp" +#include "../../zxing.hpp" +#ifndef NO_ICONV_INSIDE +#include +#endif +#include + +#undef ICONV_CONST +#define ICONV_CONST const + +#ifndef ICONV_CONST +#define ICONV_CONST /**/ +#endif + +using zxing::ErrorHandler; + +// Add this to fix both Mac and Windows compilers +template +class sloppy {}; + +// convert between T** and const T** +template +class sloppy { + T** t; + +public: + explicit sloppy(T** mt) : t(mt) {} + explicit sloppy(const T** mt) : t(const_cast(mt)) {} + + operator T* *() const { return t; } + operator const T* *() const { return const_cast(t); } +}; + +using namespace std; +using namespace zxing; +using namespace zxing::qrcode; +using namespace zxing::common; + +const char DecodedBitStreamParser::ALPHANUMERIC_CHARS[] = { + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', + 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', ' ', '$', '%', '*', '+', '-', '.', '/', ':'}; + +// string DecodedBitStreamParser::outputCharset = "UTF-8"; + +namespace { +int GB2312_SUBSET = 1; +} + +void DecodedBitStreamParser::append(std::string& result, string const& in, + ErrorHandler& err_handler) { + append(result, (char const*)in.c_str(), in.length(), err_handler); +} + +void DecodedBitStreamParser::append(std::string& result, const char* bufIn, size_t nIn, + ErrorHandler& err_handler) { + if (err_handler.ErrCode()) return; +#ifndef NO_ICONV_INSIDE + if (nIn == 0) { + return; + } + iconv_t cd; + // cout< 0) { + size_t oneway = iconv(cd, sloppy(&fromPtr), &nFrom, sloppy(&toPtr), &nTo); + + if (oneway == (size_t)(-1)) { + iconv_close(cd); + delete[] bufOut; + err_handler = zxing::ReaderErrorHandler("error converting characters"); + return; + } + } + iconv_close(cd); + + int nResult = maxOut - nTo; + bufOut[nResult] = '\0'; + result.append((const char*)bufOut); + delete[] bufOut; +#else + result.append((const char*)bufIn, nIn); +#endif +} + +void DecodedBitStreamParser::decodeHanziSegment(Ref bits_, string& result, int count, + ErrorHandler& err_handler) { + BitSource& bits(*bits_); + // Don't crash trying to read more bits than we have available. + if (count * 13 > bits.available()) { + err_handler = zxing::FormatErrorHandler("decodeKanjiSegment"); + return; + } + + // Each character will require 2 bytes. Read the characters as 2-byte pairs + // and decode as GB2312 afterwards + size_t nBytes = 2 * count; + char* buffer = new char[nBytes]; + int offset = 0; + while (count > 0) { + // Each 13 bits encodes a 2-byte character + int twoBytes = bits.readBits(13, err_handler); + if (err_handler.ErrCode()) return; + int assembledTwoBytes = ((twoBytes / 0x060) << 8) | (twoBytes % 0x060); + if (assembledTwoBytes < 0x003BF) { + // In the 0xA1A1 to 0xAAFE range + assembledTwoBytes += 0x0A1A1; + } else { + // In the 0xB0A1 to 0xFAFE range + assembledTwoBytes += 0x0A6A1; + } + buffer[offset] = (char)((assembledTwoBytes >> 8) & 0xFF); + buffer[offset + 1] = (char)(assembledTwoBytes & 0xFF); + offset += 2; + count--; + } + // for(int i=0;i bits, std::string& result, int count, + ErrorHandler& err_handler) { + // Each character will require 2 bytes. Read the characters as 2-byte pairs + // and decode as Shift_JIS afterwards + size_t nBytes = 2 * count; + char* buffer = new char[nBytes]; + int offset = 0; + while (count > 0) { + // Each 13 bits encodes a 2-byte character + + int twoBytes = bits->readBits(13, err_handler); + if (err_handler.ErrCode()) return; + int assembledTwoBytes = ((twoBytes / 0x0C0) << 8) | (twoBytes % 0x0C0); + if (assembledTwoBytes < 0x01F00) { + // In the 0x8140 to 0x9FFC range + assembledTwoBytes += 0x08140; + } else { + // In the 0xE040 to 0xEBBF range + assembledTwoBytes += 0x0C140; + } + buffer[offset] = (char)(assembledTwoBytes >> 8); + buffer[offset + 1] = (char)assembledTwoBytes; + offset += 2; + count--; + } + + append(result, buffer, nBytes, err_handler); + if (err_handler.ErrCode()) { + delete[] buffer; + return; + } + // cout< bits_, string& result, int count, + CharacterSetECI* currentCharacterSetECI, + ArrayRef >& byteSegments, + ErrorHandler& err_handler) { + int nBytes = count; + BitSource& bits(*bits_); + // Don't crash trying to read more bits than we have available. + int available = bits.available(); + // try to repair count data if count data is invalid + if (count * 8 > available) { + count = (available + 7 / 8); + } + + ArrayRef bytes_(count); + char* readBytes = &(*bytes_)[0]; + for (int i = 0; i < count; i++) { + // readBytes[i] = (char) bits.readBits(8); + int readBits = available < 8 ? available : 8; + readBytes[i] = (char)bits.readBits(readBits, err_handler); + } + if (err_handler.ErrCode()) return; + // vector encoding; + string encoding; + + if (currentCharacterSetECI == 0) { + // The spec isn't clear on this mode; see + // section 6.4.5: t does not say which encoding to assuming + // upon decoding. I have seen ISO-8859-1 used as well as + // Shift_JIS -- without anything like an ECI designator to + // give a hint. + encoding = outputCharset; + + } else { + // encoding .push_back(currentCharacterSetECI->name()); + encoding = currentCharacterSetECI->name(); + } + // cout<<"encoding: "<values().push_back(bytes_); +} + +void DecodedBitStreamParser::decodeNumericSegment(Ref bits, std::string& result, + int count, ErrorHandler& err_handler) { + int nBytes = count; + // char* bytes = new char[nBytes]; + ArrayRef bytes = ArrayRef(new Array(nBytes)); + int i = 0; + // Read three digits at a time + while (count >= 3) { + // Each 10 bits encodes three digits + if (bits->available() < 10) { + err_handler = zxing::ReaderErrorHandler("format exception"); + return; + } + int threeDigitsBits = bits->readBits(10, err_handler); + if (err_handler.ErrCode()) return; + if (threeDigitsBits >= 1000) { + ostringstream s; + s << "Illegal value for 3-digit unit: " << threeDigitsBits; + err_handler = zxing::ReaderErrorHandler(s.str().c_str()); + return; + } + bytes[i++] = ALPHANUMERIC_CHARS[threeDigitsBits / 100]; + bytes[i++] = ALPHANUMERIC_CHARS[(threeDigitsBits / 10) % 10]; + bytes[i++] = ALPHANUMERIC_CHARS[threeDigitsBits % 10]; + count -= 3; + } + if (count == 2) { + if (bits->available() < 7) { + err_handler = zxing::ReaderErrorHandler("format exception"); + return; + } + // Two digits left over to read, encoded in 7 bits + int twoDigitsBits = bits->readBits(7, err_handler); + if (err_handler.ErrCode()) return; + if (twoDigitsBits >= 100) { + ostringstream s; + s << "Illegal value for 2-digit unit: " << twoDigitsBits; + err_handler = zxing::ReaderErrorHandler(s.str().c_str()); + return; + } + bytes[i++] = ALPHANUMERIC_CHARS[twoDigitsBits / 10]; + bytes[i++] = ALPHANUMERIC_CHARS[twoDigitsBits % 10]; + } else if (count == 1) { + if (bits->available() < 4) { + err_handler = zxing::ReaderErrorHandler("format exception"); + return; + } + // One digit left over to read + int digitBits = bits->readBits(4, err_handler); + if (err_handler.ErrCode()) return; + if (digitBits >= 10) { + ostringstream s; + s << "Illegal value for digit unit: " << digitBits; + err_handler = zxing::ReaderErrorHandler(s.str().c_str()); + return; + } + bytes[i++] = ALPHANUMERIC_CHARS[digitBits]; + } + append(result, bytes->data(), nBytes, err_handler); + if (err_handler.ErrCode()) return; +} + +char DecodedBitStreamParser::toAlphaNumericChar(size_t value, ErrorHandler& err_handler) { + if (value >= sizeof(DecodedBitStreamParser::ALPHANUMERIC_CHARS)) { + err_handler = zxing::FormatErrorHandler("toAlphaNumericChar"); + return 0; + } + return ALPHANUMERIC_CHARS[value]; +} + +void DecodedBitStreamParser::decodeAlphanumericSegment(Ref bits_, string& result, + int count, bool fc1InEffect, + ErrorHandler& err_handler) { + BitSource& bits(*bits_); + ostringstream bytes; + // Read two characters at a time + while (count > 1) { + if (bits.available() < 11) { + err_handler = zxing::FormatErrorHandler("decodeAlphanumericSegment"); + return; + } + int nextTwoCharsBits = bits.readBits(11, err_handler); + bytes << toAlphaNumericChar(nextTwoCharsBits / 45, err_handler); + bytes << toAlphaNumericChar(nextTwoCharsBits % 45, err_handler); + if (err_handler.ErrCode()) return; + count -= 2; + } + if (count == 1) { + // special case: one character left + if (bits.available() < 6) { + err_handler = zxing::FormatErrorHandler("decodeAlphanumericSegment"); + return; + } + bytes << toAlphaNumericChar(bits.readBits(6, err_handler), err_handler); + if (err_handler.ErrCode()) return; + } + // See section 6.4.8.1, 6.4.8.2 + string s = bytes.str(); + if (fc1InEffect) { + // We need to massage the result a bit if in an FNC1 mode: + ostringstream r; + for (size_t i = 0; i < s.length(); i++) { + if (s[i] != '%') { + r << s[i]; + } else { + if (i < s.length() - 1 && s[i + 1] == '%') { + // %% is rendered as % + r << s[i++]; + } else { + // In alpha mode, % should be converted to FNC1 separator + // 0x1D + r << (char)0x1D; + } + } + } + s = r.str(); + } + append(result, s, err_handler); + if (err_handler.ErrCode()) return; +} + +namespace { +int parseECIValue(BitSource& bits, ErrorHandler& err_handler) { + int firstByte = bits.readBits(8, err_handler); + if (err_handler.ErrCode()) return 0; + if ((firstByte & 0x80) == 0) { + // just one byte + return firstByte & 0x7F; + } + if ((firstByte & 0xC0) == 0x80) { + // two bytes + int secondByte = bits.readBits(8, err_handler); + if (err_handler.ErrCode()) return 0; + return ((firstByte & 0x3F) << 8) | secondByte; + } + if ((firstByte & 0xE0) == 0xC0) { + // three bytes + int secondThirdBytes = bits.readBits(16, err_handler); + if (err_handler.ErrCode()) return 0; + return ((firstByte & 0x1F) << 16) | secondThirdBytes; + } + err_handler = zxing::FormatErrorHandler("parseECIValue"); + return 0; +} +} // namespace + +Ref DecodedBitStreamParser::decode(ArrayRef bytes, Version* version, + ErrorCorrectionLevel const& ecLevel, + ErrorHandler& err_handler, int iVersion) { + Ref bits_(new BitSource(bytes)); + BitSource& bits(*bits_); + string result; + result.reserve(50); + Mode* mode = 0; + string modeName; + ArrayRef > byteSegments(0); + + CharacterSetECI* currentCharacterSetECI = 0; + bool fc1InEffect = false; + + outputCharset = "UTF-8"; + do { + // While still another segment to read... + if (bits.available() < 4) { + // OK, assume we're done. Really, a TERMINATOR mode should have been + // recorded here + mode = &Mode::TERMINATOR; + } else { + mode = &Mode::forBits(bits.readBits(4, err_handler), + err_handler); // mode is encoded by 4 bits + if (err_handler.ErrCode()) return Ref(); + } + + if (mode != &Mode::TERMINATOR) { + if ((mode == &Mode::FNC1_FIRST_POSITION) || (mode == &Mode::FNC1_SECOND_POSITION)) { + // We do little with FNC1 except alter the parsed result a bit + // according to the spec + fc1InEffect = true; + } else if (mode == &Mode::STRUCTURED_APPEND) { + if (bits.available() < 16) { + err_handler = zxing::FormatErrorHandler("decode"); + return Ref(); + } + // not really supported; all we do is ignore it + // Read next 8 bits (symbol sequence #) and 8 bits (parity + // data), then continue + bits.readBits(16, err_handler); + if (err_handler.ErrCode()) return Ref(); + } else if (mode == &Mode::ECI) { + // Count doesn't apply to ECI + int value = parseECIValue(bits, err_handler); + if (err_handler.ErrCode()) Ref(); + currentCharacterSetECI = CharacterSetECI::getCharacterSetECIByValueFind(value); + if (currentCharacterSetECI == 0) { + err_handler = zxing::FormatErrorHandler("decode"); + return Ref(); + } + } else { + // First handle Hanzi mode which does not start with character + // count + if (mode == &Mode::HANZI) { + // chinese mode contains a sub set indicator right after + // mode indicator + int subset = bits.readBits(4, err_handler); + int countHanzi = + bits.readBits(mode->getCharacterCountBits(version), err_handler); + if (err_handler.ErrCode()) return Ref(); + if (subset == GB2312_SUBSET) { + decodeHanziSegment(bits_, result, countHanzi, err_handler); + if (err_handler.ErrCode()) Ref(); + outputCharset = "GB2312"; + modeName = mode->getName(); + } + } else { + // "Normal" QR code modes: + // How many characters will follow, encoded in this mode? + int count = bits.readBits(mode->getCharacterCountBits(version), err_handler); + if (err_handler.ErrCode()) return Ref(); + if (mode == &Mode::NUMERIC) { + decodeNumericSegment(bits_, result, count, err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::FormatErrorHandler("decode"); + return Ref(); + } + modeName = mode->getName(); + } else if (mode == &Mode::ALPHANUMERIC) { + decodeAlphanumericSegment(bits_, result, count, fc1InEffect, err_handler); + if (err_handler.ErrCode()) Ref(); + modeName = mode->getName(); + } else if (mode == &Mode::BYTE) { + decodeByteSegment(bits_, result, count, currentCharacterSetECI, + byteSegments, err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::FormatErrorHandler("decode"); + return Ref(); + } + modeName = mode->getName(); + // outputCharset = getResultCharset(); + } else if (mode == &Mode::KANJI) { + // int countKanji = + // bits.readBits(mode->getCharacterCountBits(version)); + // cout<<"countKanji: "<(); + modeName = mode->getName(); + } else { + err_handler = zxing::FormatErrorHandler("decode"); + return Ref(); + } + } + } + } + } while (mode != &Mode::TERMINATOR); + return Ref(new DecoderResult(bytes, Ref(new String(result)), + byteSegments, (string)ecLevel, + (string)outputCharset, iVersion, modeName)); +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoded_bit_stream_parser.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoded_bit_stream_parser.hpp new file mode 100644 index 00000000..e8887ef6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoded_bit_stream_parser.hpp @@ -0,0 +1,66 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_DECODEDBITSTREAMPARSER_HPP__ +#define __ZXING_QRCODE_DECODER_DECODEDBITSTREAMPARSER_HPP__ + +#include "../../common/array.hpp" +#include "../../common/bitsource.hpp" +#include "../../common/characterseteci.hpp" +#include "../../common/counted.hpp" +#include "../../common/decoder_result.hpp" +#include "../../decodehints.hpp" +#include "../../errorhandler.hpp" +#include "mode.hpp" + + +namespace zxing { +namespace qrcode { + +class DecodedBitStreamParser { +public: + DecodedBitStreamParser() : outputCharset("UTF-8") {} + +private: + static char const ALPHANUMERIC_CHARS[]; + + string outputCharset; + // string outputCharset; + + char toAlphaNumericChar(size_t value, ErrorHandler& err_handler); + + void decodeHanziSegment(Ref bits, std::string& result, int count, + ErrorHandler& err_handler); + void decodeKanjiSegment(Ref bits, std::string& result, int count, + ErrorHandler& err_handler); + void decodeByteSegment(Ref bits, std::string& result, int count); + void decodeByteSegment(Ref bits_, std::string& result, int count, + zxing::common::CharacterSetECI* currentCharacterSetECI, + ArrayRef >& byteSegments, ErrorHandler& err_handler); + void decodeAlphanumericSegment(Ref bits, std::string& result, int count, + bool fc1InEffect, ErrorHandler& err_handler); + void decodeNumericSegment(Ref bits, std::string& result, int count, + ErrorHandler& err_handler); + + void append(std::string& ost, const char* bufIn, size_t nIn, ErrorHandler& err_handler); + void append(std::string& ost, std::string const& in, ErrorHandler& err_handler); + +public: + Ref decode(ArrayRef bytes, Version* version, + ErrorCorrectionLevel const& ecLevel, ErrorHandler& err_handler, + int iVersion = -1); + + // string getResultCharset(); +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_DECODEDBITSTREAMPARSER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoder.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoder.cpp new file mode 100644 index 00000000..4ebe4754 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoder.cpp @@ -0,0 +1,223 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "decoder.hpp" +#include "../error_correction_level.hpp" +#include "../version.hpp" +#include "datablock.hpp" +#include "decoded_bit_stream_parser.hpp" +#include "qrcode_decoder_metadata.hpp" + +using zxing::DecoderResult; +using zxing::Ref; +using zxing::qrcode::Decoder; + +// VC++ +// The main class which implements QR Code decoding -- as opposed to locating +// and extracting the QR Code from an image. +using zxing::ArrayRef; +using zxing::BitMatrix; +using zxing::DetectorResult; +using zxing::ErrorHandler; + +Decoder::Decoder() : rsDecoder_(Ref(GF_QR_CODE_FIELD_256)) { + possibleVersion_ = 0; + possibleFix_ = 0; + decoderState_ = NOTSTART; +} + +// Convenience method that can decode a QR Code represented as a 2D array of +// booleans. "true" is taken to mean a black module. +Ref Decoder::decode(Ref bits, ErrorHandler &err_handler) { + string errMsg = ""; + + // Used for mirrored qrcode + int width = bits->getWidth(); + int height = bits->getHeight(); + + Ref bits2(new BitMatrix(width, height, bits->getPtr(), err_handler)); + if (err_handler.ErrCode()) return Ref(); + Ref rst = decode(bits, false, err_handler); + if (err_handler.ErrCode() || rst == NULL) { + errMsg = err_handler.ErrMsg(); + } else { + return rst; + } + + err_handler.Reset(); + Ref result = decode(bits2, true, err_handler); + if (err_handler.ErrCode()) { + return Ref(); + } else { + // Success! Notify the caller that the code was mirrored. + result->setOther(Ref(new QRCodeDecoderMetaData(true))); + return result; + } +}; + +Ref Decoder::decode(Ref bits, bool isMirror, ErrorHandler &err_handler) { + // Ref Decoder::decode(BitMatrixParser& parser) { + // Construct a parser and read version, error-correction level + BitMatrixParser parser(bits, err_handler); + if (err_handler.ErrCode()) return Ref(); + + if (isMirror == true) { + // Revert the bit matrix + parser.remask(); + + // Will be attempting a mirrored reading of the version and format info. + parser.setMirror(true); + + // Preemptively read the version. + parser.readVersion(err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::ReaderErrorHandler("Decoder::decode mirror & no mirror"); + return Ref(); + } + + // Preemptively read the format information. + parser.readFormatInformation(err_handler); + if (err_handler.ErrCode()) return Ref(); + + /* + * Since we're here, this means we have successfully detected some kind + * of version and format information when mirrored. This is a good sign, + * that the QR code may be mirrored, and we should try once more with a + * mirrored content. + */ + // Prepare for a mirrored reading. + parser.mirror(); + } + + decoderState_ = START; + possibleFix_ = 0; + Version *version = parser.readVersion(err_handler); + if (err_handler.ErrCode() || version == NULL) { + err_handler = ReaderErrorHandler("Decoder::decode mirror & no mirror"); + return Ref(); + } + decoderState_ = READVERSION; + float fixedPatternScore = estimateFixedPattern(bits, version, err_handler); + if (err_handler.ErrCode()) return Ref(); + + Ref formatInfo = parser.readFormatInformation(err_handler); + if (err_handler.ErrCode()) return Ref(); + ErrorCorrectionLevel &ecLevel = formatInfo->getErrorCorrectionLevel(); + + decoderState_ = READERRORCORRECTIONLEVEL; + + // Read codewords + ArrayRef codewords(parser.readCodewords(err_handler)); + if (err_handler.ErrCode()) { + err_handler = zxing::ReaderErrorHandler("Decoder::decode mirror & no mirror"); + return Ref(); + } + + decoderState_ = READCODEWORDSORRECTIONLEVEL; + possibleFix_ = fixedPatternScore; + + // Separate into data blocks + std::vector > dataBlocks( + DataBlock::getDataBlocks(codewords, version, ecLevel, err_handler)); + if (err_handler.ErrCode()) return Ref(); + + // Count total number of data bytes + int totalBytes = 0; + for (size_t i = 0; i < dataBlocks.size(); i++) { + totalBytes += dataBlocks[i]->getNumDataCodewords(); + } + ArrayRef resultBytes(totalBytes); + int resultOffset = 0; + + // Error-correct and copy data blocks together into a stream of bytes + for (size_t j = 0; j < dataBlocks.size(); j++) { + err_handler.Reset(); + Ref dataBlock(dataBlocks[j]); + ArrayRef codewordBytes = dataBlock->getCodewords(); + int numDataCodewords = dataBlock->getNumDataCodewords(); + + correctErrors(codewordBytes, numDataCodewords, err_handler); + if (err_handler.ErrCode()) return Ref(); + + for (int i = 0; i < numDataCodewords; i++) { + resultBytes[resultOffset++] = codewordBytes[i]; + } + } + + decoderState_ = FINISH; + // return DecodedBitStreamParser::decode(resultBytes, + DecodedBitStreamParser dbs_parser; + Ref rst = + dbs_parser.decode(resultBytes, version, ecLevel, err_handler, version->getVersionNumber()); + + if (err_handler.ErrCode()) return Ref(); + return rst; +} + +// Given data and error-correction codewords received, possibly corrupted by +// errors, attempts to correct the errors in-place using Reed-Solomon error +// correction.

codewordBytes: data and error correction codewords +// numDataCodewords: number of codewords that are data bytes +void Decoder::correctErrors(ArrayRef codewordBytes, int numDataCodewords, + ErrorHandler &err_handler) { + // First read into an arrya of ints + int numCodewords = codewordBytes->size(); + ArrayRef codewordInts(numCodewords); + for (int i = 0; i < numCodewords; i++) { + codewordInts[i] = codewordBytes[i] & 0xff; + } + int numECCodewords = numCodewords - numDataCodewords; + bool correctErrorsFinishished = false; + + rsDecoder_.decode(codewordInts, numECCodewords, err_handler); + if (err_handler.ErrCode()) { + return; + } + + correctErrorsFinishished = true; + + // Copy back into array of bytes -- only need to worry about the bytes that + // were data We don't care about errors in the error-correction codewords + if (correctErrorsFinishished) { + for (int i = 0; i < numDataCodewords; i++) { + codewordBytes[i] = (char)codewordInts[i]; + } + } +} + +unsigned int Decoder::getPossibleVersion() { return possibleVersion_; } + +float Decoder::estimateFixedPattern(Ref bits, zxing::qrcode::Version *version, + ErrorHandler &err_handler) { + Ref fixedPatternValue = version->buildFixedPatternValue(err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::ReaderErrorHandler("Decoder::decode mirror & no mirror"); + return -1.0; + } + Ref fixedPatternTemplate = version->buildFixedPatternTemplate(err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::ReaderErrorHandler("Decoder::decode mirror & no mirror"); + return -1.0; + } + + int iSum = 0; + int iCount = 0; + for (int i = 0; i < bits->getHeight(); ++i) { + for (int j = 0; j < bits->getWidth(); ++j) { + if (fixedPatternTemplate->get(i, j)) { + iSum++; + if (bits->get(i, j) == fixedPatternValue->get(i, j)) iCount++; + } + } + } + + float possbielFix = 2.0 * iCount / iSum - 1; + return possbielFix > 0 ? possbielFix : 0; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoder.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoder.hpp new file mode 100644 index 00000000..3ce67f77 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/decoder.hpp @@ -0,0 +1,65 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_DECODER_HPP__ +#define __ZXING_QRCODE_DECODER_DECODER_HPP__ + +#include "../../common/array.hpp" +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../common/decoder_result.hpp" +#include "../../common/detector_result.hpp" +#include "../../common/reedsolomon/reed_solomon_decoder.hpp" +#include "../../errorhandler.hpp" +#include "../version.hpp" +#include "bitmatrixparser.hpp" + +namespace zxing { +namespace qrcode { + +class Decoder { +public: + enum DecoderState { + NOTSTART = 19, + START = 20, + READVERSION = 21, + READERRORCORRECTIONLEVEL = 22, + READCODEWORDSORRECTIONLEVEL = 23, + FINISH = 24 + }; + +private: + DecoderState decoderState_; + float possibleFix_; + ReedSolomonDecoder rsDecoder_; + void correctErrors(ArrayRef bytes, int numDataCodewords, ErrorHandler& err_handler); + +public: + Decoder(); + Ref decode(Ref bits, ErrorHandler& err_handler); + +private: + Ref decode(Ref bits, bool isMirror, ErrorHandler& err_handler); + + float estimateFixedPattern(Ref bits, Version* version, ErrorHandler& err_handler); + +private: + unsigned int possibleVersion_; + +public: + unsigned int getPossibleVersion(); + DecoderState getState() { return decoderState_; } + float getPossibleFix() { return possibleFix_; } +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_DECODER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/mode.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/mode.cpp new file mode 100644 index 00000000..f96b63b3 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/mode.cpp @@ -0,0 +1,91 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "mode.hpp" +#include "../../common/counted.hpp" +#include "../../zxing.hpp" +#include "../version.hpp" + +#include + +using std::ostringstream; +using zxing::qrcode::Mode; + +// VC++ +using zxing::ErrorHandler; +using zxing::qrcode::Version; + +Mode Mode::TERMINATOR(0, 0, 0, 0x00, "TERMINATOR"); +Mode Mode::NUMERIC(10, 12, 14, 0x01, "NUMERIC"); +Mode Mode::ALPHANUMERIC(9, 11, 13, 0x02, "ALPHANUMERIC"); +Mode Mode::STRUCTURED_APPEND(0, 0, 0, 0x03, "STRUCTURED_APPEND"); +Mode Mode::BYTE(8, 16, 16, 0x04, "BYTE"); +Mode Mode::ECI(0, 0, 0, 0x07, "ECI"); +Mode Mode::KANJI(8, 10, 12, 0x08, "KANJI"); +Mode Mode::FNC1_FIRST_POSITION(0, 0, 0, 0x05, "FNC1_FIRST_POSITION"); +Mode Mode::FNC1_SECOND_POSITION(0, 0, 0, 0x09, "FNC1_SECOND_POSITION"); +Mode Mode::HANZI(8, 10, 12, 0x0D, "HANZI"); + +// Mode::Mode(int cbv0_9, int cbv10_26, int cbv27, int /* bits */, char const* +// name) : +Mode::Mode(int cbv0_9, int cbv10_26, int cbv27, int bits, char const* name) + : characterCountBitsForVersions0To9_(cbv0_9), + characterCountBitsForVersions10To26_(cbv10_26), + // characterCountBitsForVersions27AndHigher_(cbv27), name_(name) { + characterCountBitsForVersions27AndHigher_(cbv27), + bits_(bits), + name_(name) {} + +Mode& Mode::forBits(int bits, ErrorHandler& err_handler) { + switch (bits) { + case 0x0: + return TERMINATOR; + case 0x1: + return NUMERIC; + case 0x2: + return ALPHANUMERIC; + case 0x3: + return STRUCTURED_APPEND; + case 0x4: + return BYTE; + case 0x5: + return FNC1_FIRST_POSITION; + case 0x7: + return ECI; + case 0x8: + return KANJI; + case 0x9: + return FNC1_SECOND_POSITION; + case 0xD: + // 0xD is defined in GBT 18284-2000, may not be supported in foreign + // country + return HANZI; + default: + ostringstream s; + s << "Illegal mode bits: " << bits; + err_handler = zxing::ReaderErrorHandler(s.str().c_str()); + return TERMINATOR; + } +} + +int Mode::getCharacterCountBits(Version* version) const { + int number = version->getVersionNumber(); + if (number <= 9) { + return characterCountBitsForVersions0To9_; + } else if (number <= 26) { + return characterCountBitsForVersions10To26_; + } else { + return characterCountBitsForVersions27AndHigher_; + } +} + +int Mode::getBits() const { return bits_; } + +string zxing::qrcode::Mode::getName() const { return name_; } diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/mode.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/mode.hpp new file mode 100644 index 00000000..2b3b146a --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/mode.hpp @@ -0,0 +1,52 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_MODE_HPP__ +#define __ZXING_QRCODE_DECODER_MODE_HPP__ + +#include "../../common/counted.hpp" +#include "../../errorhandler.hpp" +#include "../version.hpp" + +namespace zxing { +namespace qrcode { + +class Mode { +private: + int characterCountBitsForVersions0To9_; + int characterCountBitsForVersions10To26_; + int characterCountBitsForVersions27AndHigher_; + int bits_; + std::string name_; + + Mode(int cbv0_9, int cbv10_26, int cbv27, int bits, char const* name); + +public: + static Mode TERMINATOR; + static Mode NUMERIC; + static Mode ALPHANUMERIC; + static Mode STRUCTURED_APPEND; + static Mode BYTE; + static Mode ECI; + static Mode KANJI; + static Mode FNC1_FIRST_POSITION; + static Mode FNC1_SECOND_POSITION; + static Mode HANZI; + + static Mode& forBits(int bits, ErrorHandler& err_handler); + // int getCharacterCountBits(Version *version); + int getCharacterCountBits(Version* version) const; + int getBits() const; + string getName() const; +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_MODE_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/qrcode_decoder_metadata.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/qrcode_decoder_metadata.hpp new file mode 100644 index 00000000..e7718931 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/decoder/qrcode_decoder_metadata.hpp @@ -0,0 +1,64 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DECODER_QRCODEDECODERMETADATA_HPP__ +#define __ZXING_QRCODE_DECODER_QRCODEDECODERMETADATA_HPP__ + +#include "../../common/array.hpp" +#include "../../common/counted.hpp" +#include "../../resultpoint.hpp" + +// VC++ +// The main class which implements QR Code decoding -- as opposed to locating +// and extracting the QR Code from an image. + +namespace zxing { +namespace qrcode { + +/** + * Meta-data container for QR Code decoding. Instances of this class may be used + * to convey information back to the decoding caller. Callers are expected to + * process this. + * + * @see com.google.zxing.common.DecoderResult#getOther() + */ +class QRCodeDecoderMetaData : public Counted { +private: + bool mirrored_; + +public: + explicit QRCodeDecoderMetaData(bool mirrored) : mirrored_(mirrored) {} + +public: + /** + * @return true if the QR Code was mirrored. + */ + bool isMirrored() { return mirrored_; }; + + /** + * Apply the result points' order correction due to mirroring. + * + * @param points Array of points to apply mirror correction to. + */ + void applyMirroredCorrection(ArrayRef >& points) { + if (!mirrored_ || points->size() < 3) { + return; + } + Ref bottomLeft = points[0]; + points[0] = points[2]; + points[2] = bottomLeft; + // No need to 'fix' top-left and alignment pattern. + }; +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DECODER_QRCODEDECODERMETADATA_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern.cpp new file mode 100644 index 00000000..deefeeb6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern.cpp @@ -0,0 +1,42 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "alignment_pattern.hpp" +using zxing::Ref; +using zxing::qrcode::AlignmentPattern; +namespace zxing { +namespace qrcode { +AlignmentPattern::AlignmentPattern(float posX, float posY, float estimatedModuleSize) + : ResultPoint(posX, posY), estimatedModuleSize_(estimatedModuleSize) {} + +// Determines if this alignment pattern "about equals" an alignment pattern at +// the stated position and size -- meaning, it is at nearly the same center with +// nearly the same size. +bool AlignmentPattern::aboutEquals(float moduleSize, float i, float j) const { + if (abs(i - getY()) <= moduleSize && abs(j - getX()) <= moduleSize) { + float moduleSizeDiff = abs(moduleSize - estimatedModuleSize_); + return moduleSizeDiff <= 1.0f || moduleSizeDiff <= estimatedModuleSize_; + } + return false; +} + +// Combines this object's current estimate of a finder pattern position and +// module size with a new estimate. It returns a new {@code FinderPattern} +// containing an average of the two. +Ref AlignmentPattern::combineEstimate(float i, float j, + float newModuleSize) const { + float combinedX = (getX() + j) / 2.0f; + float combinedY = (getY() + i) / 2.0f; + float combinedModuleSize = (estimatedModuleSize_ + newModuleSize) / 2.0f; + Ref result(new AlignmentPattern(combinedX, combinedY, combinedModuleSize)); + return result; +} +} // namespace qrcode +} // namespace zxing \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern.hpp new file mode 100644 index 00000000..74a15e32 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern.hpp @@ -0,0 +1,34 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_ALIGNMENT_PATTERN_HPP_ +#define __ZXING_QRCODE_DETECTOR_ALIGNMENT_PATTERN_HPP_ + +#include "../../common/bitmatrix.hpp" +#include "../../resultpoint.hpp" +namespace zxing { +namespace qrcode { + +class AlignmentPattern : public ResultPoint { +private: + float estimatedModuleSize_; + +public: + AlignmentPattern(float posX, float posY, float estimatedModuleSize); + bool aboutEquals(float moduleSize, float i, float j) const; + float getModuleSize() { return estimatedModuleSize_; }; + + Ref combineEstimate(float i, float j, float newModuleSize) const; +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_ALIGNMENT_PATTERN_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern_finder.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern_finder.cpp new file mode 100644 index 00000000..072371da --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern_finder.cpp @@ -0,0 +1,231 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "alignment_pattern_finder.hpp" + +using zxing::ErrorHandler; +using zxing::ReaderErrorHandler; +using zxing::Ref; +using zxing::qrcode::AlignmentPattern; +using zxing::qrcode::AlignmentPatternFinder; +using zxing::qrcode::FinderPattern; + +// VC++ +// This class attempts to find alignment patterns in a QR Code. Alignment +// patterns look like finder patterns but are smaller and appear at regular +// intervals throughout the image. At the moment this only looks for the +// bottom-right alignment pattern. This is mostly a simplified copy of {@link +// FinderPatternFinder}. It is copied, pasted and stripped down here for maximum +// performance but does unfortunately duplicat some code. This class is +// thread-safe but not reentrant. Each thread must allocate its own object. +using zxing::BitMatrix; + +// Creates a finder that will look in a portion of the whole image. +AlignmentPatternFinder::AlignmentPatternFinder(Ref image, int startX, int startY, + int width, int height, float moduleSize) + : image_(image), + possibleCenters_(new vector()), + startX_(startX), + startY_(startY), + width_(width), + height_(height), + moduleSize_(moduleSize) {} + +AlignmentPatternFinder::AlignmentPatternFinder(Ref image, float moduleSize) + : image_(image), + moduleSize_(moduleSize) {} + +// This method attempts to find the bottom-right alignment pattern in the image. +// It is a bit messy since it's pretty performance-critical and so is written to +// be fast foremost. +Ref AlignmentPatternFinder::find(ErrorHandler &err_handler) { + int maxJ = startX_ + width_; + int middleI = startY_ + (height_ >> 1); + // We are looking for black/white/black modules in 1:1:1 ratio; + // this tracks the number of black/white/black modules seen so far + vector stateCount(3, 0); + for (int iGen = 0; iGen < height_; iGen++) { + // Search from middle outwards + int i = middleI + ((iGen & 0x01) == 0 ? ((iGen + 1) >> 1) : -((iGen + 1) >> 1)); + stateCount[0] = 0; + stateCount[1] = 0; + stateCount[2] = 0; + int j = startX_; + // Burn off leading white pixels before anything else; if we start in + // the middle of a white run, it doesn't make sense to count its length, + // since we don't know if the white run continued to the left of the + // start point + while (j < maxJ && !image_->get(j, i)) { + j++; + } + int currentState = 0; + while (j < maxJ) { + if (image_->get(j, i)) { + // Black pixel + if (currentState == 1) { // Counting black pixels + stateCount[currentState]++; + } else { // Counting white pixels + if (currentState == 2) { // A winner? + if (foundPatternCross(stateCount)) { // Yes + Ref confirmed(handlePossibleCenter(stateCount, i, j)); + if (confirmed != 0) { + return confirmed; + } + } + stateCount[0] = stateCount[2]; + stateCount[1] = 1; + stateCount[2] = 0; + currentState = 1; + } else { + stateCount[++currentState]++; + } + } + } else { // White pixel + if (currentState == 1) { // Counting black pixels + currentState++; + } + stateCount[currentState]++; + } + j++; + } + if (foundPatternCross(stateCount)) { + Ref confirmed(handlePossibleCenter(stateCount, i, maxJ)); + if (confirmed != 0) { + return confirmed; + } + } + } + // Nothing we saw was observed and confirmed twice. If we had any guess at + // all, return it. + if (possibleCenters_->size() > 0) { + Ref center((*possibleCenters_)[0]); + return center; + } + err_handler = ReaderErrorHandler("Could not find alignment pattern"); + return Ref(); +} + + +// Given a count of black/white/black pixels just seen and an end position, +// figures the location of the center of this black/white/black run. +float AlignmentPatternFinder::centerFromEnd(vector &stateCount, int end) { + return (float)(end - stateCount[2]) - stateCount[1] / 2.0f; +} + + + +bool AlignmentPatternFinder::foundPatternCross(vector &stateCount) { + float maxVariance = moduleSize_ / 2.0f; + for (int i = 0; i < 3; i++) { + if (abs(moduleSize_ - stateCount[i]) >= maxVariance) { + return false; + } + } + return true; +} + +// After a horizontal scan finds a potential alignment pattern, this method +// "cross-checks" by scanning down vertically through the center of the possible +// alignment pattern to see if the same proportion is detected. return vertical +// center of alignment pattern, or nan() if not found startI: row where an +// alignment pattern was detected centerJ: center of the section that appears to +// cross an alignment pattern +// maxCount: maximum reasonable number of modules that should be observed in any +// reading state, +// based on the results of the horizontal scan +float AlignmentPatternFinder::crossCheckVertical(int startI, int centerJ, int maxCount, + int originalStateCountTotal) { + // This is slightly faster than using the Ref. Efficiency is important here + BitMatrix &matrix = *image_; + + int maxI = matrix.getHeight(); + vector stateCount(3, 0); + // Start counting up from center + int i = startI; + while (i >= 0 && matrix.get(centerJ, i) && stateCount[1] <= maxCount) { + stateCount[1]++; + i--; + } + // If already too many modules in this state or ran off the edge: + if (i < 0 || stateCount[1] > maxCount) { + return nan(); + } + while (i >= 0 && !matrix.get(centerJ, i) && stateCount[0] <= maxCount) { + stateCount[0]++; + i--; + } + if (stateCount[0] > maxCount) { + return nan(); + } + + // Now also count down from center + i = startI + 1; + while (i < maxI && matrix.get(centerJ, i) && stateCount[1] <= maxCount) { + stateCount[1]++; + i++; + } + if (i == maxI || stateCount[1] > maxCount) { + return nan(); + } + while (i < maxI && !matrix.get(centerJ, i) && stateCount[2] <= maxCount) { + stateCount[2]++; + i++; + } + if (stateCount[2] > maxCount) { + return nan(); + } + + int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2]; + if (5 * abs(stateCountTotal - originalStateCountTotal) >= 2 * originalStateCountTotal) { + return nan(); + } + return foundPatternCross(stateCount) ? centerFromEnd(stateCount, i) : nan(); +} + + +// This is called when a horizontal scan finds a possible alignment pattern. It +// will cross check with a vertical scan, and if successful, will see if this +// pattern had been found on a previous horizontal scan. If so, we consider it +// confirmed and conclude we have found the alignment pattern. return {@link +// AlignmentPattern} if we have found the same pattern twice, or null if not i: +// row where alignment pattern may be found j: end of possible alignment pattern +// in row +Ref AlignmentPatternFinder::handlePossibleCenter(vector &stateCount, int i, + int j) { + int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2]; + float centerJ = centerFromEnd(stateCount, j); + float centerI = crossCheckVertical(i, (int)centerJ, 2 * stateCount[1], stateCountTotal); + if (!isnan(centerI)) { + float estimatedModuleSize = (float)(stateCount[0] + stateCount[1] + stateCount[2]) / 3.0f; + int max = possibleCenters_->size(); + for (int index = 0; index < max; index++) { + Ref center((*possibleCenters_)[index]); + // Look for about the same center and module size: + if (center->aboutEquals(estimatedModuleSize, centerI, centerJ)) { + return center->combineEstimate(centerI, centerJ, estimatedModuleSize); + } + } + // Hadn't found this before; save it + AlignmentPattern *tmp = new AlignmentPattern(centerJ, centerI, estimatedModuleSize); + tmp->retain(); + possibleCenters_->push_back(tmp); + } + Ref result; + return result; +} + + +AlignmentPatternFinder::~AlignmentPatternFinder() { + for (int i = 0; i < int(possibleCenters_->size()); i++) { + (*possibleCenters_)[i]->release(); + (*possibleCenters_)[i] = 0; + } + delete possibleCenters_; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern_finder.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern_finder.hpp new file mode 100644 index 00000000..ace3064a --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/alignment_pattern_finder.hpp @@ -0,0 +1,60 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_ALIGNMENT_PATTERN_FINDER_HPP_ +#define __ZXING_QRCODE_DETECTOR_ALIGNMENT_PATTERN_FINDER_HPP_ + +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../errorhandler.hpp" +#include "alignment_pattern.hpp" +#include "finder_pattern.hpp" + +namespace zxing { +namespace qrcode { + +class AlignmentPatternFinder : public Counted { +private: + static int CENTER_QUORUM; + static int MIN_SKIP; + static int MAX_MODULES; + + Ref image_; + std::vector *possibleCenters_; + + int startX_; + int startY_; + int width_; + int height_; + float moduleSize_; + static float centerFromEnd(std::vector &stateCount, int end); + float crossCheckVertical(int startI, int centerJ, int maxCount, int originalStateCountTotal); + + +public: + AlignmentPatternFinder(Ref image, int startX, int startY, int width, int height, + float moduleSize); + AlignmentPatternFinder(Ref image, float moduleSize); + ~AlignmentPatternFinder(); + + Ref find(ErrorHandler &err_handler); + bool foundPatternCross(std::vector &stateCount); + Ref handlePossibleCenter(std::vector &stateCount, int i, int j); + + +private: + AlignmentPatternFinder(const AlignmentPatternFinder &); + AlignmentPatternFinder &operator=(const AlignmentPatternFinder &); + +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_ALIGNMENT_PATTERN_FINDER_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/detector.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/detector.cpp new file mode 100644 index 00000000..e22a852f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/detector.cpp @@ -0,0 +1,1065 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "detector.hpp" +#include +#include "../../common/grid_sampler.hpp" +#include "../../common/mathutils.hpp" +#include "../../decodehints.hpp" +#include "../version.hpp" +#include "alignment_pattern.hpp" +#include "alignment_pattern_finder.hpp" +#include "finder_pattern.hpp" +#include "finder_pattern_finder.hpp" +#include "opencv2/core.hpp" + +using zxing::BitMatrix; +using zxing::DetectorResult; +using zxing::ErrorHandler; +using zxing::PerspectiveTransform; +using zxing::Ref; +using zxing::common::MathUtils; +using zxing::qrcode::AlignmentPattern; +using zxing::qrcode::Detector; +using zxing::qrcode::FinderPattern; + +// VC++ +using zxing::DecodeHints; +using zxing::ResultPoint; +using zxing::UnicomBlock; +using zxing::qrcode::FinderPatternFinder; +using zxing::qrcode::FinderPatternInfo; +using zxing::qrcode::PatternResult; + +// Encapsulates logic that can detect a QR Code in an image, +// even if the QR Code is rotated or skewed, or partially obscured. +Detector::Detector(Ref image, Ref block) : image_(image), block_(block) { + detectorState_ = START; + possiblePatternResults_.clear(); +} + +Ref Detector::getImage() const { return image_; } + +// Detects a QR Code in an image +void Detector::detect(DecodeHints const &hints, ErrorHandler &err_handler) { + FinderPatternFinder finder(image_, block_); + std::vector > finderInfos = finder.find(hints, err_handler); + if (err_handler.ErrCode()) return; + + // Get all possible results + possiblePatternResults_.clear(); + + for (size_t i = 0; i < finderInfos.size(); i++) { + Ref result(new PatternResult(finderInfos[i])); + result->possibleVersion = 0; + result->possibleFix = 0.0f; + result->possibleModuleSize = 0.0f; + + possiblePatternResults_.push_back(result); + } + detectorState_ = FINDFINDERPATTERN; +} + +int Detector::getPossibleAlignmentCount(int idx) { + if (idx >= int(possiblePatternResults_.size())) { + return -1; + } + + ErrorHandler err_handler; + // If it is first time to get, process it now + if (possiblePatternResults_[idx]->possibleAlignmentPatterns.size() == 0) { + Ref result = + processFinderPatternInfo(possiblePatternResults_[idx]->finderPatternInfo, err_handler); + if (err_handler.ErrCode()) return -1; + + possiblePatternResults_[idx] = result; + } + + return possiblePatternResults_[idx]->possibleAlignmentPatterns.size(); +} + +Ref Detector::getResultViaAlignment(int patternIdx, int alignmentIdx, + int possibleDimension, + ErrorHandler &err_handler) { + if (patternIdx >= int(possiblePatternResults_.size()) || patternIdx < 0) { + return Ref(NULL); + } + + if (alignmentIdx >= + int(possiblePatternResults_[patternIdx]->possibleAlignmentPatterns.size()) || + alignmentIdx < 0) { + return Ref(NULL); + } + + // Default is the dimension + if (possibleDimension <= 0) { + possibleDimension = possiblePatternResults_[patternIdx]->getDimension(); + } + + Ref topLeft( + possiblePatternResults_[patternIdx]->finderPatternInfo->getTopLeft()); + Ref topRight( + possiblePatternResults_[patternIdx]->finderPatternInfo->getTopRight()); + Ref bottomLeft( + possiblePatternResults_[patternIdx]->finderPatternInfo->getBottomLeft()); + + Ref alignment( + possiblePatternResults_[patternIdx]->possibleAlignmentPatterns[alignmentIdx]); + Ref transform = + createTransform(topLeft, topRight, bottomLeft, alignment, possibleDimension); + Ref bits(sampleGrid(image_, possibleDimension, transform, err_handler)); + if (err_handler.ErrCode()) return Ref(); + + ArrayRef > corrners(new Array >(4)); + vector points(8, 0.0f); + points[0] = 0.0f; + points[1] = possibleDimension; // bottomLeft + points[2] = 0.0f; + points[3] = 0.0f; // topLeft + points[4] = possibleDimension; + points[5] = 0.0f; // topRight + points[6] = possibleDimension; + points[7] = possibleDimension; // bottomRight + transform->transformPoints(points); + corrners[0].reset(Ref(new FinderPattern(points[0], points[1], 0))); + corrners[1].reset(Ref(new FinderPattern(points[2], points[3], 0))); + corrners[2].reset(Ref(new FinderPattern(points[4], points[5], 0))); + corrners[3].reset(Ref(new FinderPattern(points[6], points[7], 0))); + + Ref result(new DetectorResult(bits, corrners, possibleDimension)); + return result; +} + +bool Detector::hasSameResult(vector > possibleAlignmentPatterns, + Ref alignmentPattern) { + float moduleSize = alignmentPattern->getModuleSize() / 5.0; + + if (moduleSize < 1.0) { + moduleSize = 1.0; + } + + for (size_t i = 0; i < possibleAlignmentPatterns.size(); i++) { + if (possibleAlignmentPatterns[i]->aboutEquals(moduleSize, alignmentPattern->getY(), + alignmentPattern->getX())) { + return true; + } + } + return false; +} + +Ref Detector::getNearestAlignmentPattern(int tryFindRange, float moduleSize, + int estAlignmentX, int estAlignmentY) { + Ref alignmentPattern; + + ErrorHandler err_handler; + for (int i = 2; i <= tryFindRange; i <<= 1) { + err_handler.Reset(); + alignmentPattern = + findAlignmentInRegion(moduleSize, estAlignmentX, estAlignmentY, (float)i, err_handler); + if (err_handler.ErrCode() == 0) break; + } + + return alignmentPattern; +} + +Ref Detector::processFinderPatternInfo(Ref info, + ErrorHandler &err_handler) { + Ref topLeft(info->getTopLeft()); + Ref topRight(info->getTopRight()); + Ref bottomLeft(info->getBottomLeft()); + + Ref result(new PatternResult(info)); + result->finderPatternInfo = info; + result->possibleAlignmentPatterns.clear(); + + float moduleSizeX_ = calculateModuleSizeOneWay( + topLeft, topRight, topLeft->getHorizontalCheckState(), topRight->getHorizontalCheckState()); + float moduleSizeY_ = calculateModuleSizeOneWay( + topLeft, bottomLeft, topLeft->getVerticalCheckState(), bottomLeft->getVerticalCheckState()); + + if (moduleSizeX_ < 1.0f || moduleSizeY_ < 1.0f) { + err_handler = ReaderErrorHandler("bad midule size"); + return Ref(); + } + + float moduleSize = (moduleSizeX_ + moduleSizeY_) / 2.0f; + + if (moduleSize > topLeft->getEstimatedModuleSize() * 1.05 && + moduleSize > topRight->getEstimatedModuleSize() * 1.05 && + moduleSize > bottomLeft->getEstimatedModuleSize() * 1.05) { + moduleSize = (topLeft->getEstimatedModuleSize() + topRight->getEstimatedModuleSize() + + bottomLeft->getEstimatedModuleSize()) / + 3; + moduleSizeX_ = moduleSize; + moduleSizeY_ = moduleSize; + } + result->possibleModuleSize = moduleSize; + + if (moduleSize < 1.0f) { + err_handler = ReaderErrorHandler("bad midule size"); + return Ref(); + } + int dimension = computeDimension(topLeft, topRight, bottomLeft, moduleSizeX_, moduleSizeY_); + Version *provisionalVersion = NULL; + + // Try demension around if it cannot get a version + int dimensionDiff[5] = {0, 1, -1, 2, -2}; + + int oriDimension = dimension; + + for (int i = 0; i < 5; i++) { + err_handler.Reset(); + dimension = oriDimension + dimensionDiff[i]; + + provisionalVersion = Version::getProvisionalVersionForDimension(dimension, err_handler); + if (err_handler.ErrCode() == 0) break; + } + if (provisionalVersion == NULL) { + err_handler = zxing::ReaderErrorHandler("Cannot get version number"); + return Ref(); + } + + result->possibleDimension = dimension; + + result->possibleVersion = provisionalVersion->getVersionNumber(); + + int modulesBetweenFPCenters = provisionalVersion->getDimensionForVersion(err_handler) - 7; + if (err_handler.ErrCode()) return Ref(); + + Ref alignmentPattern; + + // Guess where a "bottom right" finder pattern would have been + float bottomRightX = topRight->getX() - topLeft->getX() + bottomLeft->getX(); + float bottomRightY = topRight->getY() - topLeft->getY() + bottomLeft->getY(); + // Estimate that alignment pattern is closer by 3 modules from "bottom + // right" to known top left location + float correctionToTopLeft = 1.0f - 3.0f / (float)modulesBetweenFPCenters; + int estAlignmentX = + (int)(topLeft->getX() + correctionToTopLeft * (bottomRightX - topLeft->getX())); + int estAlignmentY = + (int)(topLeft->getY() + correctionToTopLeft * (bottomRightY - topLeft->getY())); + + Ref estimateCenter( + new AlignmentPattern(estAlignmentX, estAlignmentY, moduleSize)); + + bool foundFitLine = false; + Ref fitLineCenter; + + fitLineCenter = + findAlignmentWithFitLine(topLeft, topRight, bottomLeft, moduleSize, err_handler); + if (err_handler.ErrCode() == 0) { + if (fitLineCenter != NULL && + MathUtils::isInRange(fitLineCenter->getX(), fitLineCenter->getY(), image_->getWidth(), + image_->getHeight())) { + foundFitLine = true; + } + } + err_handler.Reset(); + + Ref fitAP, estAP; + + // Anything above version 1 has an alignment pattern + if (provisionalVersion->getAlignmentPatternCenters().size()) { + // if(alignmentPattern!=NULL&&alignmentPattern->getX()>0&&alignmentPattern->getY()>0){ + int tryFindRange = provisionalVersion->getDimensionForVersion(err_handler) / 2; + if (err_handler.ErrCode()) return Ref(); + + if (foundFitLine == true) { + fitAP = getNearestAlignmentPattern(tryFindRange, moduleSize, fitLineCenter->getX(), + fitLineCenter->getY()); + + if (fitAP != NULL && !hasSameResult(result->possibleAlignmentPatterns, fitAP)) + // if (fitAP != NULL && + // !hasSameResult(result->possibleAlignmentPatterns, fitAP) && + // checkConvexQuadrilateral(topLeft, topRight, bottomLeft, fitAP)) + { + result->possibleAlignmentPatterns.push_back(fitAP); + } + } + + estAP = getNearestAlignmentPattern(tryFindRange, moduleSize, estimateCenter->getX(), + estimateCenter->getY()); + + if (estAP != NULL && !hasSameResult(result->possibleAlignmentPatterns, estAP)) + // if (estAP != NULL && + // !hasSameResult(result->possibleAlignmentPatterns, estAP) && + // checkConvexQuadrilateral(topLeft, topRight, bottomLeft, estAP)) + { + result->possibleAlignmentPatterns.push_back(estAP); + } + } + + // Any way use the fit line result + if (foundFitLine == true && !hasSameResult(result->possibleAlignmentPatterns, fitLineCenter)) { + float alignmentX = fitLineCenter->getX(); + float alignmentY = fitLineCenter->getY(); + fixAlignmentPattern(alignmentX, alignmentY, moduleSize); + Ref fitLineCenterFixed = + Ref(new AlignmentPattern(alignmentX, alignmentY, moduleSize)); + if (!hasSameResult(result->possibleAlignmentPatterns, fitLineCenterFixed)) { + result->possibleAlignmentPatterns.push_back(fitLineCenterFixed); + } + + if (!hasSameResult(result->possibleAlignmentPatterns, fitLineCenter)) { + result->possibleAlignmentPatterns.push_back(fitLineCenter); + } + } + + if (!hasSameResult(result->possibleAlignmentPatterns, estimateCenter)) { + float alignmentX = estimateCenter->getX(); + float alignmentY = estimateCenter->getY(); + fixAlignmentPattern(alignmentX, alignmentY, moduleSize); + Ref estimateCenterFixed = + Ref(new AlignmentPattern(alignmentX, alignmentY, moduleSize)); + if (!hasSameResult(result->possibleAlignmentPatterns, estimateCenterFixed)) { + result->possibleAlignmentPatterns.push_back(estimateCenterFixed); + } + + if (!hasSameResult(result->possibleAlignmentPatterns, estimateCenter)) { + result->possibleAlignmentPatterns.push_back(estimateCenter); + } + } + Ref NoneEstimateCenter = + Ref(new AlignmentPattern(0, 0, moduleSize)); + result->possibleAlignmentPatterns.push_back(NoneEstimateCenter); + + if (result->possibleAlignmentPatterns.size() > 0) { + result->confirmedAlignmentPattern = result->possibleAlignmentPatterns[0]; + } + detectorState_ = FINDALIGNPATTERN; + + return result; +} + +// Computes an average estimated module size based on estimated derived from the +// positions of the three finder patterns. +float Detector::calculateModuleSize(Ref topLeft, Ref topRight, + Ref bottomLeft) { + // Take the average + return (calculateModuleSizeOneWay(topLeft, topRight, NORMAL, NORMAL) + + calculateModuleSizeOneWay(topLeft, bottomLeft, NORMAL, NORMAL)) / + 2.0f; +} + +// Estimates module size based on two finder patterns +// it uses sizeOfBlackWhiteBlackRunBothWays() to figure the width of each, +// measuring along the axis between their centers. +float Detector::calculateModuleSizeOneWay(Ref pattern, Ref otherPattern, + int patternState, int otherPatternState) { + float moduleSizeEst1 = sizeOfBlackWhiteBlackRunBothWays( + (int)pattern->getX(), (int)pattern->getY(), (int)otherPattern->getX(), + (int)otherPattern->getY(), patternState, false); + float moduleSizeEst2 = sizeOfBlackWhiteBlackRunBothWays( + (int)otherPattern->getX(), (int)otherPattern->getY(), (int)pattern->getX(), + (int)pattern->getY(), otherPatternState, true); + if (zxing::isnan(moduleSizeEst1)) { + return moduleSizeEst2 / 7.0f; + } + if (zxing::isnan(moduleSizeEst2)) { + return moduleSizeEst1 / 7.0f; + } + // Average them, and divide by 7 since we've counted the width of 3 black + // modules, and 1 white and 1 black module on either side. Ergo, divide sum + // by 14. + return (moduleSizeEst1 + moduleSizeEst2) / 14.0f; +} + +// Computes the total width of a finder pattern by looking for a +// black-white-black run from the center in the direction of another point +// (another finder pattern center), and in the opposite direction too. +float Detector::sizeOfBlackWhiteBlackRunBothWays(int fromX, int fromY, int toX, int toY, + int patternState, bool isReverse) { + float result1 = sizeOfBlackWhiteBlackRun(fromX, fromY, toX, toY); + float result = 0.0; + // Now count other way -- don't run off image though of course + float scale = 1.0f; + int otherToX = fromX - (toX - fromX); + if (otherToX < 0) { + scale = (float)fromX / (float)(fromX - otherToX); + otherToX = 0; + } else if (otherToX >= (int)image_->getWidth()) { + scale = (float)(image_->getWidth() - 1 - fromX) / (float)(otherToX - fromX); + otherToX = image_->getWidth() - 1; + } + int otherToY = (int)(fromY - (toY - fromY) * scale); + + scale = 1.0f; + if (otherToY < 0) { + scale = (float)fromY / (float)(fromY - otherToY); + otherToY = 0; + } else if (otherToY >= (int)image_->getHeight()) { + scale = (float)(image_->getHeight() - 1 - fromY) / (float)(otherToY - fromY); + otherToY = image_->getHeight() - 1; + } + otherToX = (int)(fromX + (otherToX - fromX) * scale); + + float result2 = sizeOfBlackWhiteBlackRun(fromX, fromY, otherToX, otherToY); + + if (patternState == FinderPattern::HORIZONTAL_STATE_LEFT_SPILL || + patternState == FinderPattern::VERTICAL_STATE_UP_SPILL) { + if (!isReverse) + result = result1 * 2; + else + result = result2 * 2; + } else if (patternState == FinderPattern::HORIZONTAL_STATE_RIGHT_SPILL || + patternState == FinderPattern::VERTICAL_STATE_DOWN_SPILL) { + if (!isReverse) + result = result2 * 2; + else + result = result1 * 2; + } else { + result = result1 + result2; + } + // Middle pixel is double-counted this way; subtract 1 + return result - 1.0f; +} + +Ref Detector::sampleGrid(Ref image, int dimension, + Ref transform, + ErrorHandler &err_handler) { + GridSampler &sampler = GridSampler::getInstance(); + // return sampler.sampleGrid(image, dimension, transform); + Ref bits = sampler.sampleGrid(image, dimension, transform, err_handler); + if (err_handler.ErrCode()) return Ref(); + return bits; +} + +// This method traces a line from a point in the image, in the direction towards +// another point. It begins in a black region, and keeps going until it finds +// white, then black, then white again. It reports the distance from the start +// to this point. +float Detector::sizeOfBlackWhiteBlackRun(int fromX, int fromY, int toX, int toY) { + // Mild variant of Bresenham's algorithm; + // see http://en.wikipedia.org/wiki/Bresenham's_line_algorithm + bool steep = abs(toY - fromY) > abs(toX - fromX); + if (steep) { + // swap(fromX,fromY) + int temp = fromX; + fromX = fromY; + fromY = temp; + // swap(toX,toY) + temp = toX; + toX = toY; + toY = temp; + } + + int dx = abs(toX - fromX); + int dy = abs(toY - fromY); + int error = -dx >> 1; + int xstep = fromX < toX ? 1 : -1; + int ystep = fromY < toY ? 1 : -1; + // In black pixels, looking for white, first or second time. + int state = 0; + // Loop up until x == toX, but not beyond + int xLimit = toX + xstep; + for (int x = fromX, y = fromY; x != xLimit; x += xstep) { + int realX = steep ? y : x; + int realY = steep ? x : y; + + // Does current pixel mean we have moved white to black or vice versa? + // Scanning black in state 0,2 and white in state 1, so if we find the + // wrong color, advance to next state or end if we are in state 2 + // already + if (!((state == 1) ^ image_->get(realX, realY))) { + if (state == 2) { + return MathUtils::distance(x, y, fromX, fromY); + } + state++; + } + + error += dy; + if (error > 0) { + if (y == toY) { + break; + } + y += ystep; + error -= dx; + } + } + // Found black-white-black; give the benefit of the doubt that the next + // pixel outside the image is "white" so this last point at (toX+xStep,toY) + // is the right ending. This is really a small approximation; + // (toX+xStep,toY+yStep) might be really correct. Ignore this. + if (state == 2) { + return MathUtils::distance(toX + xstep, toY, fromX, fromY); + } + // else we didn't find even black-white-black; no estimate is really + // possible + return nan(); +} + +// Attempts to locate an alignment pattern in a limited region of the image, +// which is guessed to contain it. +Ref Detector::findAlignmentInRegion(float overallEstModuleSize, int estAlignmentX, + int estAlignmentY, float allowanceFactor, + ErrorHandler &err_handler) { + // Look for an alignment pattern (3 modules in size) around where it should + // be + int allowance = (int)(allowanceFactor * overallEstModuleSize); + int alignmentAreaLeftX = max(0, estAlignmentX - allowance); + int alignmentAreaRightX = min((int)(image_->getWidth() - 1), estAlignmentX + allowance); + if (alignmentAreaRightX - alignmentAreaLeftX < overallEstModuleSize * 3) { + err_handler = ReaderErrorHandler("region too small to hold alignment pattern"); + return Ref(); + } + int alignmentAreaTopY = max(0, estAlignmentY - allowance); + int alignmentAreaBottomY = min((int)(image_->getHeight() - 1), estAlignmentY + allowance); + if (alignmentAreaBottomY - alignmentAreaTopY < overallEstModuleSize * 3) { + err_handler = ReaderErrorHandler("region too small to hold alignment pattern"); + return Ref(); + } + + AlignmentPatternFinder alignmentFinder( + image_, alignmentAreaLeftX, alignmentAreaTopY, alignmentAreaRightX - alignmentAreaLeftX, + alignmentAreaBottomY - alignmentAreaTopY, overallEstModuleSize); + + Ref ap = alignmentFinder.find(err_handler); + if (err_handler.ErrCode()) return Ref(); + return ap; + +} + +Ref Detector::findAlignmentWithFitLine(Ref topLeft, + Ref topRight, + Ref bottomLeft, + float moduleSize, + ErrorHandler &err_handler) { + float alignmentX = 0.0f, alignmentY = 0.0f; + int imgWidth = image_->getWidth(); + int imgHeight = image_->getHeight(); + Rect bottomLeftRect, topRightRect; + double rectSize = moduleSize * 7; + bottomLeftRect.x = + (bottomLeft->getX() - rectSize / 2.0f) > 0 ? (bottomLeft->getX() - rectSize / 2.0f) : 0; + bottomLeftRect.y = + (bottomLeft->getY() - rectSize / 2.0f) > 0 ? (bottomLeft->getY() - rectSize / 2.0f) : 0; + bottomLeftRect.width = (bottomLeft->getX() - bottomLeftRect.x) * 2; + if (bottomLeftRect.x + bottomLeftRect.width > imgWidth) + bottomLeftRect.width = imgWidth - bottomLeftRect.x; + bottomLeftRect.height = (bottomLeft->getY() - bottomLeftRect.y) * 2; + if (bottomLeftRect.y + bottomLeftRect.height > imgHeight) + bottomLeftRect.height = imgHeight - bottomLeftRect.y; + + topRightRect.x = + (topRight->getX() - rectSize / 2.0f) > 0 ? (topRight->getX() - rectSize / 2.0f) : 0; + topRightRect.y = + (topRight->getY() - rectSize / 2.0f) > 0 ? (topRight->getY() - rectSize / 2.0f) : 0; + topRightRect.width = (topRight->getX() - topRightRect.x) * 2; + if (topRightRect.x + topRightRect.width > imgWidth) + topRightRect.width = imgWidth - topRightRect.x; + topRightRect.height = (topRight->getY() - topRightRect.y) * 2; + if (topRightRect.y + topRightRect.height > imgHeight) + topRightRect.height = imgHeight - topRightRect.y; + + vector > topRightPoints; + vector > bottomLeftPoints; + + findPointsForLine(topLeft, topRight, bottomLeft, topRightRect, bottomLeftRect, topRightPoints, + bottomLeftPoints, moduleSize); + + int a1; + float k1, b1; + int fitResult = fitLine(topRightPoints, k1, b1, a1); + if (fitResult < 0) { + err_handler = ReaderErrorHandler("Cannot find a valid divide for line fit"); + return Ref(); + } + + int a2; + float k2, b2; + int fitResult2 = fitLine(bottomLeftPoints, k2, b2, a2); + if (fitResult2 < 0) { + err_handler = ReaderErrorHandler("Cannot find a valid divide for line fit"); + return Ref(); + } + + int hasResult = 1; + if (a1 == 0) { + if (a2 == 0) { + hasResult = 0; + } else { + alignmentX = -b1; + alignmentY = b2 - b1 * k2; + } + } else { + if (a2 == 0) { + alignmentX = -b2; + alignmentY = b1 - b2 * k1; + } else { + if (k1 == k2) { + hasResult = 0; + } else { + alignmentX = (b2 - b1) / (k1 - k2); + alignmentY = k1 * alignmentX + b1; + } + } + } + + // Donot have a valid divide + if (hasResult == 0) { + err_handler = ReaderErrorHandler("Cannot find a valid divide for line fit"); + return Ref(); + } + Ref result(new AlignmentPattern(alignmentX, alignmentY, moduleSize)); + return result; +} + +void Detector::fixAlignmentPattern(float &alignmentX, float &alignmentY, float moduleSize) { + int imgWidth = image_->getWidth(); + int imgHeight = image_->getHeight(); + int maxFixStep = moduleSize * 2; + int fixStep = 0; + while (alignmentX < imgWidth && alignmentY < imgHeight && alignmentX > 0 && alignmentY > 0 && + !image_->get(alignmentX, alignmentY) && fixStep < maxFixStep) { + ++fixStep; + // Newest Version: The fix process is like this: + // 1 2 3 + // 4 0 5 + // 6 7 8 + for (int y = alignmentY - fixStep; y <= alignmentY + fixStep; y++) { + if (y == alignmentY - fixStep || y == alignmentY + fixStep) { + for (int x = alignmentX - fixStep; x <= alignmentX + fixStep; x++) { + if (x < imgWidth && y < imgHeight && x > 0 && y > 0 && image_->get(x, y)) { + alignmentX = x; + alignmentY = y; + return; + } + } + } else { + int x = alignmentX - fixStep; + if (x < imgWidth && y < imgHeight && x > 0 && y > 0 && image_->get(x, y)) { + alignmentX = x; + alignmentY = y; + return; + } + x = alignmentX + fixStep; + if (x < imgWidth && y < imgHeight && x > 0 && y > 0 && image_->get(x, y)) { + alignmentX = x; + alignmentY = y; + return; + } + } + } + } + + return; +} + +int Detector::fitLine(vector > &oldPoints, float &k, float &b, int &a) { + a = 1; + k = 0.0f; + b = 0.0f; + int old_num = oldPoints.size(); + if (old_num < 2) { + return -1; + } + float tolerance = 2.0f; + vector > fitPoints; + float pre_diff = -1; + for (vector >::iterator it = oldPoints.begin() + 1; it != oldPoints.end() - 1; + it++) { + float diff_x = 0.0f, diff_y = 0.0f, diff = 0.0f; + if (pre_diff < 0) { + diff_x = (*(it - 1))->getX() - (*it)->getX(); + diff_y = (*(it - 1))->getY() - (*it)->getY(); + diff = (diff_x * diff_x + diff_y * diff_y); + pre_diff = diff; + } + diff_x = (*(it + 1))->getX() - (*it)->getX(); + diff_y = (*(it + 1))->getY() - (*it)->getY(); + diff = (diff_x * diff_x + diff_y * diff_y); + if (pre_diff <= tolerance && diff <= tolerance) { + fitPoints.push_back(*(it)); + } + pre_diff = diff; + } + + int num = fitPoints.size(); + if (num < 2) return -1; + + double x = 0, y = 0, xx = 0, xy = 0, yy = 0, tem = 0; + for (int i = 0; i < num; i++) { + int point_x = fitPoints[i]->getX(); + int point_y = fitPoints[i]->getY(); + x += point_x; + y += point_y; + xx += point_x * point_x; + xy += point_x * point_y; + yy += point_y * point_y; + } + + tem = xx * num - x * x; + if (abs(tem) < 0.0000001) { + // Set b as average x + b = -x / num; + a = 0; + k = 1; + + return 1; + } + + k = (num * xy - x * y) / tem; + b = (y - k * x) / num; + a = 1; + if (abs(k) < 0.01) k = 0; + return 1; +} + +bool Detector::checkTolerance(Ref &topLeft, Ref &topRight, + Rect &topRightRect, double modelSize, Ref &p, int flag) { + int topLeftX = topLeft->getX(), topLeftY = topLeft->getY(), topRightX = topRight->getX(), + topRightY = topRight->getY(); + double left_right_k = 0.0f, left_right_b = 0.0f, left_right_b_tolerance, tolerance_b1 = 0.0f, + tolerance_b2 = 0.0f; + if (flag < 2) { + double tolerance_y1 = 0.0f, tolerance_y2 = 0.0f; + double tolerance_x = topRightRect.x; + if (flag == 1) tolerance_x = topRightRect.x + topRightRect.width; + if (topRightX != topLeftX) { + left_right_k = (topRightY - topLeftY) / (double)(topRightX - topLeftX); + left_right_b = (topRightY - left_right_k * topRightX); + double tmp_1 = modelSize * 2.5f; + double tmp_2 = tmp_1 * left_right_k; + + left_right_b_tolerance = sqrt(tmp_1 * tmp_1 + tmp_2 * tmp_2); + tolerance_b1 = left_right_b - left_right_b_tolerance; + tolerance_b2 = left_right_b + left_right_b_tolerance; + tolerance_y1 = left_right_k * tolerance_x + tolerance_b1; + tolerance_y2 = left_right_k * tolerance_x + tolerance_b2; + } else { + return false; + } + if (p->getY() < tolerance_y1 || p->getY() > tolerance_y2) return false; + return true; + } else { + double tolerance_x1 = 0.0f, tolerance_x2 = 0.0f; + if (topRightY != topLeftY) { + double tolerance_y = topRightRect.y; + if (flag == 3) tolerance_y = topRightRect.y + topRightRect.height; + left_right_k = (topRightX - topLeftX) / (double)(topRightY - topLeftY); + left_right_b = (topRightX - left_right_k * topRightY); + double tmp_1 = modelSize * 2.5f; + double tmp_2 = tmp_1 / left_right_k; + left_right_b_tolerance = sqrt(tmp_1 * tmp_1 + tmp_2 * tmp_2); + tolerance_b1 = left_right_b - left_right_b_tolerance; + tolerance_b2 = left_right_b + left_right_b_tolerance; + tolerance_x1 = left_right_k * tolerance_y + tolerance_b1; + tolerance_x2 = left_right_k * tolerance_y + tolerance_b2; + if (p->getX() < tolerance_x1 || p->getX() > tolerance_x2) return false; + return true; + } else { + return false; + } + } +} + +void Detector::findPointsForLine(Ref &topLeft, Ref &topRight, + Ref &bottomLeft, Rect topRightRect, + Rect bottomLeftRect, vector > &topRightPoints, + vector > &bottomLeftPoints, float modelSize) { + int topLeftX = topLeft->getX(), topLeftY = topLeft->getY(), topRightX = topRight->getX(), + topRightY = topRight->getY(); + if (!topRightPoints.empty()) topRightPoints.clear(); + if (!bottomLeftPoints.empty()) bottomLeftPoints.clear(); + + int xMin = 0; + int xMax = 0; + int yMin = 0; + int yMax = 0; + + int imgWidth = image_->getWidth(); + int imgHeight = image_->getHeight(); + + // [-45, 45] or [135, 180) or [-180, -45) + if (topLeftY == topRightY || abs((topRightX - topLeftX) / (topRightY - topLeftY)) >= 1) { + if (topLeftX < topRightX) { + xMin = topRightRect.x; + xMax = topRightRect.x + modelSize * 2; + yMin = topRightRect.y + modelSize; + yMax = topRightRect.y - modelSize + topRightRect.height; + // [-45, 45] TopRight: left, black->white points; BottomLeft: top, black->white points + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int i = yMin; i < yMax; i++) { + for (int j = xMin; j < xMax; j++) { + // left->right, black->white + if (image_->get(j, i) && !image_->get(j + 1, i)) { + Ref topRightPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, topRight, topRightRect, modelSize, + topRightPoint, 0)) { + topRightPoints.push_back(topRightPoint); + break; + } + } + } + } + + xMin = bottomLeftRect.x + modelSize; + xMax = bottomLeftRect.x - modelSize + bottomLeftRect.width; + yMin = bottomLeftRect.y; + yMax = bottomLeftRect.y + 2 * modelSize; + + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int j = xMin; j < xMax; j++) { + for (int i = yMin; i < yMax; i++) { + // top to down, black->white + if (image_->get(j, i) && !image_->get(j, i + 1)) { + Ref bottomLeftPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, bottomLeft, bottomLeftRect, modelSize, + bottomLeftPoint, 2)) { + bottomLeftPoints.push_back(bottomLeftPoint); + break; + } + } + } + } + } else { + // white->black points + xMin = topRightRect.x + topRightRect.width - 2 * modelSize; + xMax = topRightRect.x + topRightRect.width; + yMin = topRightRect.y + modelSize; + yMax = topRightRect.y - modelSize + topRightRect.height; + // [135, 180) or [-180, -45) TopRight: right, white->black points; BottomLeft: bottom, + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int i = yMin; i < yMax; i++) { + for (int j = xMin; j < xMax; j++) { + // left->right, white->black + if (!image_->get(j, i) && image_->get(j + 1, i)) { + Ref topRightPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, topRight, topRightRect, modelSize, + topRightPoint, 1)) { + topRightPoints.push_back(topRightPoint); + break; + } + } + } + } + + xMin = bottomLeftRect.x + modelSize; + xMax = bottomLeftRect.x - modelSize + bottomLeftRect.width; + yMin = bottomLeftRect.y + bottomLeftRect.height - 2 * modelSize; + yMax = bottomLeftRect.y + bottomLeftRect.height; + + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int j = xMin; j < xMax; j++) { + for (int i = yMin; i < yMax; i++) { + // top to down, white->black + if (!image_->get(j, i) && image_->get(j, i + 1)) { + Ref bottomLeftPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, bottomLeft, bottomLeftRect, modelSize, + bottomLeftPoint, 3)) { + bottomLeftPoints.push_back(bottomLeftPoint); + break; + } + } + } + } + } + } else { + // (45, 135) or (-45, -135) + // (45, 135) TopRight: top, black->white; BottomRight: right, black->white + if (topLeftY < topRightY) { + xMin = topRightRect.x + modelSize; + xMax = topRightRect.x - modelSize + topRightRect.width; + yMin = topRightRect.y; + yMax = topRightRect.y + 2 * modelSize; + + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int j = xMin; j < xMax; j++) { + for (int i = yMin; i < yMax; i++) { + // top to down, black->white + if (image_->get(j, i) && !image_->get(j, i + 1)) { + Ref topRightPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, topRight, topRightRect, modelSize, + topRightPoint, 2)) { + topRightPoints.push_back(topRightPoint); + break; + } + } + } + } + + xMin = topRightRect.x + topRightRect.width - 2 * modelSize; + xMax = topRightRect.x + topRightRect.width; + yMin = topRightRect.y + modelSize; + yMax = topRightRect.y - modelSize + topRightRect.height; + + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int i = yMin; i < yMax; i++) { + for (int j = xMin; j < xMax; j++) { + // left to right, white-> black + if (!image_->get(j, i) && image_->get(j + 1, i)) { + Ref bottomLeftPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, bottomLeft, bottomLeftRect, modelSize, + bottomLeftPoint, 1)) { + bottomLeftPoints.push_back(bottomLeftPoint); + break; + } + } + } + } + } else { + // (-45, -135) TopRight: bottom, white->black; BottomRight: left, black->white + xMin = topRightRect.x + modelSize; + xMax = topRightRect.x - modelSize + topRightRect.width; + yMin = topRightRect.y + topRightRect.height - 2 * modelSize; + yMax = topRightRect.y + topRightRect.height; + + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int j = xMin; j < xMax; j++) { + for (int i = yMin; i < yMax; i++) { + // top to down, white->balck + if (!image_->get(j, i) && image_->get(j, i + 1)) { + Ref topRightPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, topRight, topRightRect, modelSize, + topRightPoint, 3)) { + topRightPoints.push_back(topRightPoint); + break; + } + } + } + } + + xMin = bottomLeftRect.x; + xMax = bottomLeftRect.x + 2 * modelSize; + yMin = bottomLeftRect.y + modelSize; + yMax = bottomLeftRect.y + bottomLeftRect.height - modelSize; + + MathUtils::getRangeValues(xMin, xMax, 0, imgWidth - 1); + MathUtils::getRangeValues(yMin, yMax, 0, imgHeight - 1); + + for (int i = yMin; i < yMax; i++) { + for (int j = xMin; j < xMax; j++) { + // left to right, black->white + if (image_->get(j, i) && !image_->get(j + 1, i)) { + Ref bottomLeftPoint(new ResultPoint(j, i)); + if (checkTolerance(topLeft, bottomLeft, bottomLeftRect, modelSize, + bottomLeftPoint, 0)) { + bottomLeftPoints.push_back(bottomLeftPoint); + break; + } + } + } + } + } + } +} + +Ref Detector::createTransform(Ref info, + Ref alignmentPattern, + int dimension) { + Ref topLeft(info->getTopLeft()); + Ref topRight(info->getTopRight()); + Ref bottomLeft(info->getBottomLeft()); + Ref transform = + createTransform(topLeft, topRight, bottomLeft, alignmentPattern, dimension); + return transform; +} + +Ref Detector::createTransform(Ref topLeft, + Ref topRight, + Ref bottomLeft, + Ref alignmentPattern, + int dimension) { + float dimMinusThree = (float)dimension - 3.5f; + float bottomRightX; + float bottomRightY; + float sourceBottomRightX; + float sourceBottomRightY; + if (alignmentPattern && alignmentPattern->getX()) { + bottomRightX = alignmentPattern->getX(); + bottomRightY = alignmentPattern->getY(); + sourceBottomRightX = dimMinusThree - 3.0f; + sourceBottomRightY = sourceBottomRightX; + } else { + // Don't have an alignment pattern, just make up the bottom-right point + bottomRightX = (topRight->getX() - topLeft->getX()) + bottomLeft->getX(); + bottomRightY = (topRight->getY() - topLeft->getY()) + bottomLeft->getY(); + float deltaX = topLeft->getX() - bottomLeft->getX(); + float deltaY = topLeft->getY() - bottomLeft->getY(); + if (fabs(deltaX) < fabs(deltaY)) + deltaY = topLeft->getY() - topRight->getY(); + else + deltaX = topLeft->getX() - topRight->getX(); + bottomRightX += 2 * deltaX; + bottomRightY += 2 * deltaY; + sourceBottomRightX = dimMinusThree; + sourceBottomRightY = dimMinusThree; + } + Ref transform(PerspectiveTransform::quadrilateralToQuadrilateral( + 3.5f, 3.5f, dimMinusThree, 3.5f, sourceBottomRightX, sourceBottomRightY, 3.5f, + dimMinusThree, topLeft->getX(), topLeft->getY(), topRight->getX(), topRight->getY(), + bottomRightX, bottomRightY, bottomLeft->getX(), bottomLeft->getY())); + return transform; +} + +// Computes the dimension (number of modules on a size) of the QR code based on +// the position of the finder patterns and estimated module size. +int Detector::computeDimension(Ref topLeft, Ref topRight, + Ref bottomLeft, float moduleSizeX, float moduleSizeY) { + int tltrCentersDimension = ResultPoint::distance(topLeft, topRight) / moduleSizeX; + int tlblCentersDimension = ResultPoint::distance(topLeft, bottomLeft) / moduleSizeY; + + float tmp_dimension = ((tltrCentersDimension + tlblCentersDimension) / 2.0) + 7.0; + int dimension = cvRound(tmp_dimension); + int mod = dimension & 0x03; // mod 4 + + switch (mod) { // mod 4 + case 0: + dimension++; + break; + // 1? do nothing + case 2: + dimension--; + break; + } + return dimension; +} + +bool Detector::checkConvexQuadrilateral(Ref topLeft, Ref topRight, + Ref bottomLeft, Ref bottomRight) { + float v1[2]; + float v2[2]; + float v3[2]; + float v4[2]; + + v1[0] = topLeft->getX() - topRight->getX(); + v1[1] = topLeft->getY() - topRight->getY(); + v2[0] = topRight->getX() - bottomRight->getX(); + v2[1] = topRight->getY() - bottomRight->getY(); + v3[0] = bottomRight->getX() - bottomLeft->getX(); + v3[1] = bottomRight->getY() - bottomLeft->getY(); + v4[0] = bottomLeft->getX() - topLeft->getX(); + v4[1] = bottomLeft->getY() - topLeft->getY(); + + float c1 = MathUtils::VecCross(v1, v2); + float c2 = MathUtils::VecCross(v2, v3); + float c3 = MathUtils::VecCross(v3, v4); + float c4 = MathUtils::VecCross(v4, v1); + + if ((c1 < 0.0 && c2 < 0.0 && c3 < 0.0 && c4 < 0.0) || + (c1 > 0.0 && c2 > 0.0 && c3 > 0.0 && c4 > 0.0)) + return true; + else + return false; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/detector.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/detector.hpp new file mode 100644 index 00000000..fbb8147d --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/detector.hpp @@ -0,0 +1,145 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_DETECTOR_HPP_ +#define __ZXING_QRCODE_DETECTOR_DETECTOR_HPP_ + +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../common/detector_result.hpp" +#include "../../common/perspective_transform.hpp" +#include "../../common/unicomblock.hpp" +#include "../../errorhandler.hpp" +#include "alignment_pattern.hpp" +#include "finder_pattern.hpp" +#include "finder_pattern_info.hpp" +#include "pattern_result.hpp" + +namespace zxing { +class DecodeHints; + +namespace qrcode { + +// Possible Detect Result + +class Detector : public Counted { +public: + enum DetectorState { + START = 10, + FINDFINDERPATTERN = 11, + FINDALIGNPATTERN = 12, + }; + + // Fix module size error when LEFT_SPILL or RIGHT_SPILL + enum FinderPatternMode { + NORMAL = 0, + LEFT_SPILL = 1, + RIGHT_SPILL = 2, + UP_SPILL = 3, + DOWN_SPILL = 4, + }; + + typedef struct Rect_ { + int x; + int y; + int width; + int height; + } Rect; + +private: + Ref image_; + Ref block_; + + vector > possiblePatternResults_; + + + DetectorState detectorState_; + +protected: + Ref getImage() const; + static int computeDimension(Ref topLeft, Ref topRight, + Ref bottomLeft, float moduleSizeX, float moduleSizeY); + float calculateModuleSize(Ref topLeft, Ref topRight, + Ref bottomLeft); + float calculateModuleSizeOneWay(Ref pattern, Ref otherPattern, + int patternState, int otherPatternState); + float sizeOfBlackWhiteBlackRunBothWays(int fromX, int fromY, int toX, int toY, int patternState, + bool isReverse); + float sizeOfBlackWhiteBlackRunBothWays(int fromX, int fromY, int toX, int toY); + float sizeOfBlackWhiteBlackRun(int fromX, int fromY, int toX, int toY); + Ref findAlignmentInRegion(float overallEstModuleSize, int estAlignmentX, + int estAlignmentY, float allowanceFactor, + ErrorHandler &err_handler); + Ref findAlignmentWithFitLine(Ref topLeft, + Ref topRight, + Ref bottomLeft, float moduleSize, + ErrorHandler &err_handler); + int fitLine(vector > &oldPoints, float &k, float &b, int &a); + bool checkTolerance(Ref &topLeft, Ref &topRight, Rect &topRightRect, + double modelSize, Ref &p, int flag); + void findPointsForLine(Ref &topLeft, Ref &topRight, + Ref &bottomLeft, Rect topRightRect, Rect bottomLeftRect, + vector > &topRightPoints, + vector > &bottomLeftPoints, float modelSize); + bool checkConvexQuadrilateral(Ref topLeft, Ref topRight, + Ref bottomLeft, Ref bottomRight); + +public: + virtual Ref createTransform(Ref topLeft, + Ref topRight, + Ref bottomLeft, + Ref alignmentPattern, + int dimension); + Ref createTransform(Ref finderPatternInfo, + Ref alignmentPattern, int dimension); + + static Ref sampleGrid(Ref image, int dimension, Ref, + ErrorHandler &err_handler); + + Detector(Ref image, Ref block); + void detect(DecodeHints const &hints, ErrorHandler &err_handler); + Ref getResultViaAlignment(int patternIdx, int alignmentIndex, + int possibleDimension, ErrorHandler &err_handler); + + int getPossiblePatternCount() { return possiblePatternResults_.size(); } + int getPossibleAlignmentCount(int idx); + + Ref getNearestAlignmentPattern(int tryFindRange, float moduleSize, + int estAlignmentX, int estAlignmentY); + bool hasSameResult(vector > possibleAlignmentPatterns, + Ref alignmentPattern); + void fixAlignmentPattern(float &alignmentX, float &alignmentY, float moduleSize); + + Ref processFinderPatternInfo(Ref info, + ErrorHandler &err_handler); + +public: + Ref getFinderPatternInfo(int idx) { + return possiblePatternResults_[idx]->finderPatternInfo; + } + Ref getAlignmentPattern(int patternIdx, int alignmentIdx) { + return possiblePatternResults_[patternIdx]->possibleAlignmentPatterns[alignmentIdx]; + } + + DetectorState getState() { return detectorState_; } + + unsigned int getPossibleVersion(int idx) { + return possiblePatternResults_[idx]->possibleVersion; + } + float getPossibleFix(int idx) { return possiblePatternResults_[idx]->possibleFix; } + float getPossibleModuleSize(int idx) { + return possiblePatternResults_[idx]->possibleModuleSize; + } + int getDimension(int idx) { return possiblePatternResults_[idx]->possibleDimension; } +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_DETECTOR_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern.cpp new file mode 100644 index 00000000..c357f312 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern.cpp @@ -0,0 +1,91 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "finder_pattern.hpp" + +using zxing::Ref; + +namespace zxing { +namespace qrcode { + +FinderPattern::FinderPattern(float posX, float posY, float estimatedModuleSize) + : ResultPoint(posX, posY), + estimatedModuleSize_(estimatedModuleSize), + count_(1), + horizontalState_(FinderPattern::HORIZONTAL_STATE_NORMAL), + verticalState_(FinderPattern::VERTICAL_STATE_NORMAL) { + fix_ = -1.0f; +} + +FinderPattern::FinderPattern(float posX, float posY, float estimatedModuleSize, int count) + : ResultPoint(posX, posY), + estimatedModuleSize_(estimatedModuleSize), + count_(count), + horizontalState_(FinderPattern::HORIZONTAL_STATE_NORMAL), + verticalState_(FinderPattern::VERTICAL_STATE_NORMAL) { + fix_ = -1.0f; +} +int FinderPattern::getCount() const { return count_; } +void FinderPattern::incrementCount() { count_++; } + +bool FinderPattern::aboutEquals(float moduleSize, float i, float j) const { + if (abs(i - getY()) <= moduleSize && abs(j - getX()) <= moduleSize) { + float moduleSizeDiff = abs(moduleSize - estimatedModuleSize_); + return moduleSizeDiff <= 1.0f || moduleSizeDiff <= estimatedModuleSize_; + } + return false; +} + +float FinderPattern::getEstimatedModuleSize() const { return estimatedModuleSize_; } + +Ref FinderPattern::combineEstimate(float i, float j, float newModuleSize) const { + int combinedCount = count_ + 1; + float combinedX = getX(); + float combinedY = getY(); + float combinedModuleSize = getEstimatedModuleSize(); + if (combinedCount <= 3) { + combinedX = (count_ * getX() + j) / combinedCount; + combinedY = (count_ * getY() + i) / combinedCount; + combinedModuleSize = (count_ * getEstimatedModuleSize() + newModuleSize) / combinedCount; + } + return Ref( + new FinderPattern(combinedX, combinedY, combinedModuleSize, combinedCount)); +} + +void FinderPattern::setHorizontalCheckState(int state) { + switch (state) { + case 0: + horizontalState_ = FinderPattern::HORIZONTAL_STATE_NORMAL; + break; + case 1: + horizontalState_ = FinderPattern::HORIZONTAL_STATE_LEFT_SPILL; + break; + case 2: + horizontalState_ = FinderPattern::HORIZONTAL_STATE_RIGHT_SPILL; + break; + } + return; +} +void FinderPattern::setVerticalCheckState(int state) { + switch (state) { + case 0: + verticalState_ = FinderPattern::VERTICAL_STATE_NORMAL; + break; + case 1: + verticalState_ = FinderPattern::VERTICAL_STATE_UP_SPILL; + break; + case 2: + verticalState_ = FinderPattern::VERTICAL_STATE_DOWN_SPILL; + break; + } + return; +} +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern.hpp new file mode 100644 index 00000000..d7733e0c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern.hpp @@ -0,0 +1,59 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_HPP_ +#define __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_HPP_ + +#include "../../common/bitmatrix.hpp" +#include "../../resultpoint.hpp" + +namespace zxing { +namespace qrcode { + +class FinderPattern : public ResultPoint { +public: + enum CheckState { + HORIZONTAL_STATE_NORMAL = 0, + HORIZONTAL_STATE_LEFT_SPILL = 1, + HORIZONTAL_STATE_RIGHT_SPILL = 2, + VERTICAL_STATE_NORMAL = 3, + VERTICAL_STATE_UP_SPILL = 4, + VERTICAL_STATE_DOWN_SPILL = 5 + }; + +private: + float estimatedModuleSize_; + int count_; + + FinderPattern(float posX, float posY, float estimatedModuleSize, int count); + +public: + FinderPattern(float posX, float posY, float estimatedModuleSize); + int getCount() const; + float getEstimatedModuleSize() const; + void incrementCount(); + bool aboutEquals(float moduleSize, float i, float j) const; + Ref combineEstimate(float i, float j, float newModuleSize) const; + + void setHorizontalCheckState(int state); + void setVerticalCheckState(int state); + + int getHorizontalCheckState() { return horizontalState_; } + int getVerticalCheckState() { return verticalState_; } + +private: + float fix_; + CheckState horizontalState_; + CheckState verticalState_; +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_finder.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_finder.cpp new file mode 100644 index 00000000..438928c0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_finder.cpp @@ -0,0 +1,1508 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "finder_pattern_finder.hpp" +#include "../../common/kmeans.hpp" +#include "../../common/mathutils.hpp" +#include "../../decodehints.hpp" +#include "../../errorhandler.hpp" + +using zxing::Ref; +using zxing::qrcode::FinderPattern; +using zxing::qrcode::FinderPatternFinder; +using zxing::qrcode::FinderPatternInfo; + +// VC++ + +using zxing::BitMatrix; +using zxing::DecodeHints; +using zxing::ResultPoint; + +namespace zxing { + +namespace qrcode { + + +namespace { +class FurthestFromAverageComparator { +private: + const float averageModuleSize_; + +public: + explicit FurthestFromAverageComparator(float averageModuleSize) + : averageModuleSize_(averageModuleSize) {} + bool operator()(Ref a, Ref b) { + float dA = abs(a->getEstimatedModuleSize() - averageModuleSize_); + float dB = abs(b->getEstimatedModuleSize() - averageModuleSize_); + return dA > dB; + } +}; + +// Orders by furthes from average +class CenterComparator { + const float averageModuleSize_; + +public: + explicit CenterComparator(float averageModuleSize) : averageModuleSize_(averageModuleSize) {} + bool operator()(Ref a, Ref b) { + // N.B.: we want the result in descending order ... + if (a->getCount() != b->getCount()) { + return a->getCount() > b->getCount(); + } else { + float dA = abs(a->getEstimatedModuleSize() - averageModuleSize_); + float dB = abs(b->getEstimatedModuleSize() - averageModuleSize_); + return dA < dB; + } + } +}; + +class CountComparator { +public: + bool operator()(Ref a, Ref b) { + return a->getCount() > b->getCount(); + } +}; + +class ModuleSizeComparator { +public: + bool operator()(Ref a, Ref b) { + return a->getEstimatedModuleSize() > b->getEstimatedModuleSize(); + } +}; + +class BestComparator { +public: + bool operator()(Ref a, Ref b) { + if (a->getCount() != b->getCount()) { + return a->getCount() > b->getCount(); + } else { + return a->getEstimatedModuleSize() > b->getEstimatedModuleSize(); + } + } +}; +class BestComparator2 { +public: + bool operator()(Ref a, Ref b) { + if (a->getCount() != b->getCount()) { + return a->getCount() > b->getCount(); + } else { + int aErr = 0, bErr = 0; + if (a->getHorizontalCheckState() != FinderPattern::HORIZONTAL_STATE_NORMAL) aErr++; + if (a->getVerticalCheckState() != FinderPattern::VERTICAL_STATE_NORMAL) aErr++; + if (b->getHorizontalCheckState() != FinderPattern::HORIZONTAL_STATE_NORMAL) bErr++; + if (b->getVerticalCheckState() != FinderPattern::VERTICAL_STATE_NORMAL) bErr++; + + if (aErr != bErr) { + return aErr < bErr; + } else { + return a->getEstimatedModuleSize() > b->getEstimatedModuleSize(); + } + } + } +}; + +class XComparator { +public: + bool operator()(Ref a, Ref b) { return a->getX() < b->getX(); } +}; + +class YComparator { +public: + bool operator()(Ref a, Ref b) { return a->getY() < b->getY(); } +}; + +} // namespace + +int FinderPatternFinder::CENTER_QUORUM = 2; +int FinderPatternFinder::MIN_SKIP = 1; // 1 pixel/module times MIN_SKIP modules/center +int FinderPatternFinder::MAX_MODULES = 177; // support up to version 40 which has 177 modules +int FinderPatternFinder::INTEGER_MATH_SHIFT = 8; +int FinderPatternFinder::FP_INPUT_CNN_MAX_NUM = 10; +int FinderPatternFinder::FP_IS_SELECT_BEST = 1; +int FinderPatternFinder::FP_IS_SELECT_FILE_BEST = 1; +int FinderPatternFinder::FP_INPUT_MAX_NUM = 100; +int FinderPatternFinder::FP_FILTER_SIZE = 100; +int FinderPatternFinder::FPS_CLUSTER_MAX = 4; +int FinderPatternFinder::FPS_RESULT_MAX = 12; +int FinderPatternFinder::K_FACTOR = 2; + +float FinderPatternFinder::FPS_MS_VAL = 1.0f; +float FinderPatternFinder::FP_COUNT_MIN = 2.0f; +float FinderPatternFinder::FP_MS_MIN = 1.0f; +float FinderPatternFinder::FP_RIGHT_ANGLE = 0.342f; +float FinderPatternFinder::FP_SMALL_ANGLE1 = 0.8191f; +float FinderPatternFinder::FP_SMALL_ANGLE2 = 0.5736f; +float FinderPatternFinder::QR_MIN_FP_AREA_ERR = 3; +float FinderPatternFinder::QR_MIN_FP_MS_ERR = 1; +int FinderPatternFinder::QR_MIN_FP_ACCEPT = 4; + +std::vector> FinderPatternFinder::find(DecodeHints const& hints, + ErrorHandler& err_handler) { + bool tryHarder = true; + + size_t maxI = image_->getHeight(); + size_t maxJ = image_->getWidth(); + // Init pre check result + _horizontalCheckedResult.clear(); + _horizontalCheckedResult.resize(maxJ); + // As this is used often, we use an integer array instead of vector + int stateCount[5]; + + // Let's assume that the maximum version QR Code we support + // (Version 40, 177modules, and finder pattern start at: 0~7) takes up 1/4 + // the height of the image, and then account for the center being 3 + // modules in size. This gives the smallest number of pixels the center + // could be, so skip this often. When trying harder, look for all + // QR versions regardless of how dense they are. + int iSkip = (3 * maxI) / (4 * MAX_MODULES); + if (iSkip < MIN_SKIP || tryHarder) { + iSkip = MIN_SKIP; + } + + // This is slightly faster than using the Ref. Efficiency is important here + BitMatrix& matrix = *image_; + + // If we need to use getRowRecords or getRowCounterOffsetEnd, we should call + // initRowCounters first + matrix.initRowCounters(); + + // scan line algorithm + for (size_t i = iSkip - 1; i < maxI; i += iSkip) { + COUNTER_TYPE* irow_states = matrix.getRowRecords(i); + COUNTER_TYPE* irow_offsets = matrix.getRowRecordsOffset(i); + + size_t rj = matrix.getRowFirstIsWhite(i) ? 1 : 0; + COUNTER_TYPE row_counter_width = matrix.getRowCounterOffsetEnd(i); + // because the rj is black, rj+1 must be white, so we can skip it by +2 + for (; (rj + 4) < size_t(row_counter_width) && (rj + 4) < maxJ; rj += 2) { + stateCount[0] = irow_states[rj]; + stateCount[1] = irow_states[rj + 1]; + stateCount[2] = irow_states[rj + 2]; + stateCount[3] = irow_states[rj + 3]; + stateCount[4] = irow_states[rj + 4]; + + size_t j = irow_offsets[rj + 4] + stateCount[4]; + if (j > maxJ) { + rj = row_counter_width - 1; + continue; + } + if (foundPatternCross(stateCount)) { + if (j == maxJ) { + // check whether it is the "true" central + bool confirmed = handlePossibleCenter(stateCount, i, maxJ); + if (confirmed) { + iSkip = int(possibleCenters_.back()->getEstimatedModuleSize()); + if (iSkip < 1) iSkip = 1; + } + rj = row_counter_width - 1; + continue; + } else { + bool confirmed = handlePossibleCenter(stateCount, i, j); + if (confirmed) { + // Start examining every other line. Checking each line + // turned out to be too expensive and didn't improve + // performance. + iSkip = 2; + if (!hasSkipped_) { + int rowSkip = findRowSkip(); + if (rowSkip > stateCount[2]) { + // Skip rows between row of lower confirmed + // center and top of presumed third confirmed + // center but back up a bit to get a full chance + // of detecting it, entire width of center of + // finder pattern Skip by rowSkip, but back off + // by stateCount[2] (size of last center of + // pattern we saw) to be conservative, and also + // back off by iSkip which is about to be + // re-added + i += rowSkip - stateCount[2] - iSkip; + rj = row_counter_width - 1; + j = maxJ - 1; + } + } + } else { + continue; + } + rj += 4; + } + } + } + } + // use connected cells algorithm + { + for (size_t i = iSkip - 1; i < maxI; i += iSkip) { + COUNTER_TYPE* irow_states = matrix.getRowRecords(i); + COUNTER_TYPE* irow_offsets = matrix.getRowRecordsOffset(i); + COUNTER_TYPE row_counter_width = matrix.getRowCounterOffsetEnd(i); + + for (size_t rj = matrix.getRowFirstIsWhite(i) ? 1 : 0; + (rj + 4) < size_t(row_counter_width); rj += 2) { + if (block_->GetUnicomBlockIndex(i, irow_offsets[rj]) == + block_->GetUnicomBlockIndex(i, irow_offsets[rj + 4]) && + block_->GetUnicomBlockIndex(i, irow_offsets[rj + 1]) == + block_->GetUnicomBlockIndex(i, irow_offsets[rj + 3]) && + block_->GetUnicomBlockIndex(i, irow_offsets[rj]) != + block_->GetUnicomBlockIndex(i, irow_offsets[rj + 2])) { + const int iBlackCir = block_->GetUnicomBlockSize(i, irow_offsets[rj]); + const int iWhiteCir = block_->GetUnicomBlockSize(i, irow_offsets[rj + 1]); + const int iBlackPnt = block_->GetUnicomBlockSize(i, irow_offsets[rj + 2]); + + if (-1 == iBlackCir || -1 == iWhiteCir) continue; + + const float fBlackCir = sqrt(iBlackCir / 24.0); + const float fWhiteCir = sqrt(iWhiteCir / 16.0); + const float fBlackPnt = sqrt(iBlackPnt / 9.0); + + // use center 1:3:1, because the border may be padded. + // a plan for MS + const float fRealMS = sqrt((iWhiteCir + iBlackPnt) / 25.0); + + // b plan for MS + int iTotalCount = 0; + for (int j = 1; j < 4; ++j) iTotalCount += irow_states[rj + j]; + const float fEstRowMS = iTotalCount / 5.0; + + if (fabs(fBlackCir - fWhiteCir) <= QR_MIN_FP_AREA_ERR && + fabs(fWhiteCir - fBlackPnt) <= QR_MIN_FP_AREA_ERR && + fabs(fRealMS - fEstRowMS) < QR_MIN_FP_MS_ERR) { + int centerI = 0; + int centerJ = 0; + if (fRealMS < QR_MIN_FP_ACCEPT) { + centerI = i; + centerJ = irow_offsets[rj + 2] + irow_states[rj + 2] / 2; + } else { + int iMinX = 0, iMinY = 0, iMaxX = 0, iMaxY = 0; + block_->GetMinPoint(i, irow_offsets[rj + 1], iMinY, iMinX); + block_->GetMaxPoint(i, irow_offsets[rj + 3], iMaxY, iMaxX); + centerI = (iMaxY + iMinY) / 2.0; // y + centerJ = (iMaxX + iMinX) / 2.0; // x + } + tryToPushToCenters(centerI, centerJ, fRealMS); + int rowSkip = findRowSkip(); + if (rowSkip > irow_states[rj + 2]) { + // Skip rows between row of lower confirmed center + // and top of presumed third confirmed center but + // back up a bit to get a full chance of detecting + // it, entire width of center of finder pattern Skip + // by rowSkip, but back off by stateCount[2] (size + // of last center of pattern we saw) to be + // conservative, and also back off by iSkip which is + // about to be re-added + i += rowSkip - irow_states[rj + 2] - iSkip; + rj = row_counter_width - 1; + } + rj += 4; + } + } + } + } + } + + vector> patternInfos = getPatternInfosFileMode(hints, err_handler); + if (err_handler.ErrCode()) { + return std::vector>(); + } + // sort with score + sort(patternInfos.begin(), patternInfos.end(), + [](Ref a, Ref b) { + return a->getPossibleFix() > b->getPossibleFix(); + }); + + return patternInfos; +} + +bool FinderPatternFinder::tryToPushToCenters(float centerI, float centerJ, + float estimatedModuleSize, + CrossCheckState horizontalState, + CrossCheckState verticalState) { + for (size_t index = 0; index < possibleCenters_.size(); index++) { + Ref center = possibleCenters_[index]; + // Look for about the same center and module size: + if (center->aboutEquals(estimatedModuleSize, centerI, centerJ)) { + possibleCenters_[index] = + center->combineEstimate(centerI, centerJ, estimatedModuleSize); + possibleCenters_[index]->setHorizontalCheckState( + horizontalState == FinderPatternFinder::NORMAL ? center->getHorizontalCheckState() + : horizontalState); + possibleCenters_[index]->setVerticalCheckState( + verticalState == FinderPatternFinder::NORMAL ? center->getVerticalCheckState() + : verticalState); + return false; + } + } + Ref newPattern(new FinderPattern(centerJ, centerI, estimatedModuleSize)); + newPattern->setHorizontalCheckState(horizontalState); + newPattern->setVerticalCheckState(verticalState); + possibleCenters_.push_back(newPattern); + return true; +} + +bool FinderPatternFinder::isEqualResult(Ref src, Ref dst) { + if (src == NULL) { + return false; + } + + if (dst == NULL) { + return true; + } + + auto topLeft = src->getTopLeft(); + auto bottomLeft = src->getBottomLeft(); + auto topRight = src->getTopRight(); + + return topLeft->aboutEquals(1.0, dst->getTopLeft()->getY(), dst->getTopLeft()->getX()) && + bottomLeft->aboutEquals(1.0, dst->getBottomLeft()->getY(), + dst->getBottomLeft()->getX()) && + topRight->aboutEquals(1.0, dst->getTopRight()->getY(), dst->getTopRight()->getX()); +} + +bool FinderPatternFinder::IsPossibleFindPatterInfo(Ref a, Ref b, + Ref c) { + // check variance + float aMs = a->getEstimatedModuleSize(); + float bMs = b->getEstimatedModuleSize(); + float cMs = c->getEstimatedModuleSize(); + + float avg = (aMs + bMs + cMs) / 3.0; + float val = + sqrt((aMs - avg) * (aMs - avg) + (bMs - avg) * (bMs - avg) + (cMs - avg) * (cMs - avg)); + + if (val >= FPS_MS_VAL) return false; + + float longSize = 0.0; + + return checkIsoscelesRightTriangle(a, b, c, longSize); +} + +void FinderPatternFinder::PushToResult(Ref a, Ref b, + Ref c, + vector>& patternInfos) { + vector> finderPatterns; + finderPatterns.push_back(a); + finderPatterns.push_back(b); + finderPatterns.push_back(c); + vector> finderPattern = orderBestPatterns(finderPatterns); + + Ref patternInfo(new FinderPatternInfo(finderPattern)); + + for (size_t j = 0; j < patternInfos.size(); j++) { + if (isEqualResult(patternInfos[j], patternInfo)) { + return; + } + } + patternInfos.push_back(patternInfo); +} + +vector> FinderPatternFinder::getPatternInfosFileMode( + DecodeHints const& hints, ErrorHandler& err_handler) { + size_t startSize = possibleCenters_.size(); + + if (startSize < 3) { + // Couldn't find enough finder patterns + err_handler = ReaderErrorHandler("Could not find three finder patterns"); + return vector>(); + } + + std::vector> patternInfos; + + if (startSize == 3) { + PushToResult(possibleCenters_[0], possibleCenters_[1], possibleCenters_[2], patternInfos); + return patternInfos; + } + + vector> finderPatterns; + Ref resultBest; + + // select best + if (FP_IS_SELECT_BEST) { + finderPatterns = selectBestPatterns(err_handler); + if (err_handler.ErrCode() == 0) + PushToResult(finderPatterns[0], finderPatterns[1], finderPatterns[2], patternInfos); + } + + if (FP_IS_SELECT_FILE_BEST) { + finderPatterns = selectFileBestPatterns(err_handler); + if (err_handler.ErrCode() == 0) + PushToResult(finderPatterns[0], finderPatterns[1], finderPatterns[2], patternInfos); + } + + // sort and filter + sort(possibleCenters_.begin(), possibleCenters_.end(), ModuleSizeComparator()); + std::vector> standardCenters; + + for (size_t i = 0; i < possibleCenters_.size(); i++) { + if (possibleCenters_[i]->getEstimatedModuleSize() >= FP_MS_MIN && + possibleCenters_[i]->getCount() >= FP_COUNT_MIN) { + standardCenters.push_back(possibleCenters_[i]); + if (standardCenters.size() >= size_t(FP_INPUT_MAX_NUM)) break; + if (hints.getUseNNDetector() && standardCenters.size() >= size_t(FP_INPUT_CNN_MAX_NUM)) + break; + } + } + + if (standardCenters.size() < 3) { + err_handler = ReaderErrorHandler("Could not find three finder patterns"); + return vector>(); + } + + if (standardCenters.size() <= size_t(FP_INPUT_CNN_MAX_NUM)) { + for (size_t x = 0; x < standardCenters.size(); x++) { + for (size_t y = x + 1; y < standardCenters.size(); y++) { + for (size_t z = y + 1; z < standardCenters.size(); z++) { + bool check_result = IsPossibleFindPatterInfo( + standardCenters[x], standardCenters[y], standardCenters[z]); + if (check_result) { + PushToResult(standardCenters[x], standardCenters[y], standardCenters[z], + patternInfos); + } + } + } + } + return patternInfos; + } + + // Kmeans + const int maxepoches = 100; + const int minchanged = 0; + // calculate K + int k = log(float(standardCenters.size())) * K_FACTOR - 1; + if (k < 1) k = 1; + + vector> trainX; + for (size_t i = 0; i < standardCenters.size(); i++) { + vector tmp; + tmp.push_back(standardCenters[i]->getCount()); + tmp.push_back(standardCenters[i]->getEstimatedModuleSize()); + trainX.push_back(tmp); + } + + vector clusters_out = k_means(trainX, k, maxepoches, minchanged); + + for (size_t i = 0; i < clusters_out.size(); i++) { + int cluster_select = 0; + + if (clusters_out[i].samples.size() < 3) { + if (i < clusters_out.size() - 1 && clusters_out[i + 1].samples.size() < 3) { + for (size_t j = 0; j < clusters_out[i].samples.size(); j++) + clusters_out[i + 1].samples.push_back(clusters_out[i].samples[j]); + } + continue; + } + + vector> clusterPatterns; + for (size_t j = 0; j < clusters_out[i].samples.size(); j++) { + clusterPatterns.push_back(standardCenters[clusters_out[i].samples[j]]); + } + + sort(clusterPatterns.begin(), clusterPatterns.end(), BestComparator2()); + + for (size_t x = 0; + x < clusters_out[i].samples.size() && cluster_select <= FPS_CLUSTER_MAX && + patternInfos.size() <= size_t(FPS_RESULT_MAX); + x++) { + for (size_t y = x + 1; + y < clusters_out[i].samples.size() && cluster_select <= FPS_CLUSTER_MAX && + patternInfos.size() <= size_t(FPS_RESULT_MAX); + y++) { + for (size_t z = y + 1; + z < clusters_out[i].samples.size() && cluster_select <= FPS_CLUSTER_MAX && + patternInfos.size() <= size_t(FPS_RESULT_MAX); + z++) { + bool check_result = IsPossibleFindPatterInfo( + clusterPatterns[x], clusterPatterns[y], clusterPatterns[z]); + if (check_result) { + PushToResult(clusterPatterns[x], clusterPatterns[y], clusterPatterns[z], + patternInfos); + cluster_select++; + } + } + } + } + } + return patternInfos; +} + +// Given a count of black/white/black/white/black pixels just seen and an end +// position, figures the location of the center of this run. +float FinderPatternFinder::centerFromEnd(int* stateCount, int end) { + // calculate the center by pattern 1:3:1 is better than pattern 3 + // because the finder pattern is irregular in some case + return (float)(end - stateCount[4]) - (stateCount[3] + stateCount[2] + stateCount[1]) / 2.0f; +} + +// return if the proportions of the counts is close enough to 1/1/3/1/1 ratios +// used by finder patterns to be considered a match +bool FinderPatternFinder::foundPatternCross(int* stateCount) { + int totalModuleSize = 0; + + int stateCountINT[5]; + + int minModuleSizeINT = 3; + minModuleSizeINT <<= INTEGER_MATH_SHIFT; + + for (int i = 0; i < 5; i++) { + if (stateCount[i] <= 0) { + return false; + } + stateCountINT[i] = stateCount[i] << INTEGER_MATH_SHIFT; + totalModuleSize += stateCount[i]; + } + if (totalModuleSize < 7) { + return false; + } + + CURRENT_CHECK_STATE = FinderPatternFinder::NOT_PATTERN; + + totalModuleSize = totalModuleSize << INTEGER_MATH_SHIFT; + + // Newer version to check 1 time, use 3 points + int moduleSize = ((totalModuleSize - stateCountINT[0] - stateCountINT[4])) / 5; + + int maxVariance = moduleSize; + + if (moduleSize > minModuleSizeINT) maxVariance = moduleSize / 2; + + int startCountINT = stateCountINT[0]; + int endCountINT = stateCountINT[4]; + + bool leftFit = (abs(moduleSize - startCountINT) <= maxVariance); + bool rightFit = (abs(moduleSize - endCountINT) <= maxVariance); + + if (leftFit) { + if (rightFit) { + moduleSize = totalModuleSize / 7; + CURRENT_CHECK_STATE = FinderPatternFinder::NORMAL; + } else { + moduleSize = (totalModuleSize - stateCountINT[4]) / 6; + CURRENT_CHECK_STATE = FinderPatternFinder::RIHGT_SPILL; + } + } else { + if (rightFit) { + moduleSize = (totalModuleSize - stateCountINT[0]) / 6; + CURRENT_CHECK_STATE = FinderPatternFinder::LEFT_SPILL; + } else { + // return false; + CURRENT_CHECK_STATE = FinderPatternFinder::LEFT_RIGHT_SPILL; + } + } + + // 1:1:3:1:1 || n:1:3:1:1 || 1:1:3:1:n + if (abs(moduleSize - stateCountINT[1]) <= maxVariance && + abs(3 * moduleSize - stateCountINT[2]) <= 3 * maxVariance && + abs(moduleSize - stateCountINT[3]) <= maxVariance) { + return true; + } + return false; +} + +int FinderPatternFinder::getMinModuleSize() { + int minModuleSize = (3 * min(image_->getWidth(), image_->getHeight())) / (4 * MAX_MODULES); + + if (minModuleSize < MIN_SKIP) { + minModuleSize = MIN_SKIP; + } + + return minModuleSize; +} + +/** + * After a vertical and horizontal scan finds a potential finder pattern, this + * method "cross-cross-cross-checks" by scanning down diagonally through the + * center of the possible finder pattern to see if the same proportion is + * detected. + * + * @param startI row where a finder pattern was detected + * @param centerJ center of the section that appears to cross a finder pattern + * @param maxCount maximum reasonable number of modules that should be + * observed in any reading state, based on the results of the horizontal scan + * @param originalStateCountTotal The original state count total. + * @return true if proportions are withing expected limits + */ +bool FinderPatternFinder::crossCheckDiagonal(int startI, int centerJ, int maxCount, + int originalStateCountTotal) { + int maxI = image_->getHeight(); + int maxJ = image_->getWidth(); + + if ((startI < 0) || (startI > maxI - 1) || (centerJ < 0) || (centerJ > maxJ - 1)) { + return false; + } + + int stateCount[5]; + stateCount[0] = 0; + stateCount[1] = 0; + stateCount[2] = 0; + stateCount[3] = 0; + stateCount[4] = 0; + + if (!image_->get(centerJ, startI)) { + if (startI + 1 < maxI && image_->get(centerJ, startI + 1)) + startI = startI + 1; + else if (0 < startI - 1 && image_->get(centerJ, startI - 1)) + startI = startI - 1; + else + return false; + } + + // This is slightly faster than using the Ref. Efficiency is important here + BitMatrix& matrix = *image_; + + // Start counting up, left from center finding black center mass + int i = 0; + // Fix possible crash 20140418 + // while (startI - i >= 0 && image.get(centerJ - i, startI - i)) { + while ((startI - i >= 0) && (centerJ - i >= 0) && matrix.get(centerJ - i, startI - i)) { + stateCount[2]++; + i++; + } + + if ((startI - i < 0) || (centerJ - i < 0)) { + return false; + } + + // Continue up, left finding white space + while ((startI - i >= 0) && (centerJ - i >= 0) && !matrix.get(centerJ - i, startI - i) && + stateCount[1] <= maxCount) { + stateCount[1]++; + i++; + } + + // If already too many modules in this state or ran off the edge: + if ((startI - i < 0) || (centerJ - i < 0) || stateCount[1] > maxCount) { + return false; + } + + CrossCheckState tmpCheckState = FinderPatternFinder::NORMAL; + + // Continue up, left finding black border + while ((startI - i >= 0) && (centerJ - i >= 0) && matrix.get(centerJ - i, startI - i) && + stateCount[0] <= maxCount) { + stateCount[0]++; + i++; + } + + if (stateCount[0] >= maxCount) { + tmpCheckState = FinderPatternFinder::LEFT_SPILL; + } + + // Now also count down, right from center + i = 1; + while ((startI + i < maxI) && (centerJ + i < maxJ) && matrix.get(centerJ + i, startI + i)) { + stateCount[2]++; + i++; + } + + // Ran off the edge? + if ((startI + i >= maxI) || (centerJ + i >= maxJ)) { + return false; + } + + while ((startI + i < maxI) && (centerJ + i < maxJ) && !matrix.get(centerJ + i, startI + i) && + stateCount[3] < maxCount) { + stateCount[3]++; + i++; + } + + if ((startI + i >= maxI) || (centerJ + i >= maxJ) || stateCount[3] >= maxCount) { + return false; + } + + while ((startI + i < maxI) && (centerJ + i < maxJ) && matrix.get(centerJ + i, startI + i) && + stateCount[4] < maxCount) { + stateCount[4]++; + i++; + } + + if (stateCount[4] >= maxCount) { + tmpCheckState = tmpCheckState == FinderPatternFinder::LEFT_SPILL + ? FinderPatternFinder::LEFT_RIGHT_SPILL + : FinderPatternFinder::RIHGT_SPILL; + } + + bool diagonal_check = foundPatternCross(stateCount); + if (!diagonal_check) return false; + + if (CURRENT_CHECK_STATE == FinderPatternFinder::LEFT_SPILL && + tmpCheckState == FinderPatternFinder::RIHGT_SPILL) + return false; + + if (CURRENT_CHECK_STATE == FinderPatternFinder::RIHGT_SPILL && + tmpCheckState == FinderPatternFinder::LEFT_SPILL) + return false; + + int stateCountTotal = getStateCountTotal(stateCount, CURRENT_CHECK_STATE); + + if (abs(stateCountTotal - originalStateCountTotal) < 2 * originalStateCountTotal) { + return true; + } else { + return false; + } +} + +int FinderPatternFinder::getStateCountTotal(int* stateCount, const CrossCheckState& check_state) { + int stateCountTotal = stateCount[1] + stateCount[2] + stateCount[3]; + if (check_state == FinderPatternFinder::NORMAL) { + stateCountTotal = stateCountTotal + stateCount[0] + stateCount[4]; + } else if (check_state == FinderPatternFinder::LEFT_SPILL) { + stateCountTotal = stateCountTotal + stateCount[1] + stateCount[4]; + } else if (check_state == FinderPatternFinder::RIHGT_SPILL) { + stateCountTotal = stateCountTotal + stateCount[0] + stateCount[3]; + } else if (check_state == FinderPatternFinder::LEFT_RIGHT_SPILL) { + stateCountTotal = stateCountTotal + stateCount[1] + stateCount[3]; + } + return stateCountTotal; +} +// After a horizontal scan finds a potential finder pattern, this method +// "cross-checks" by scanning down vertically through the center of the possible +// finder pattern to see if the same proportion is detected. +float FinderPatternFinder::crossCheckVertical(size_t startI, size_t centerJ, int maxCount, + int originalStateCountTotal, + float& estimatedVerticalModuleSize) { + int maxI = image_->getHeight(); + + int stateCount[5]; + for (int i = 0; i < 5; i++) stateCount[i] = 0; + + if (!image_->get(centerJ, startI)) { + if ((int)startI + 1 < maxI && image_->get(centerJ, startI + 1)) + startI = startI + 1; + else if (0 < (int)startI - 1 && image_->get(centerJ, startI - 1)) + startI = startI - 1; + else + return nan(); + } + + // This is slightly faster than using the Ref. Efficiency is important here + BitMatrix& matrix = *image_; + + bool* imageRow0 = matrix.getRowBoolPtr(0); + bool* p = imageRow0; + int imgWidth = matrix.getWidth(); + + // Start counting up from center + int ii = startI; + + p = imageRow0 + ii * imgWidth + centerJ; + + while (ii >= 0 && *p) { + stateCount[2]++; + ii--; + p -= imgWidth; + } + if (ii < 0) { + return nan(); + } + while (ii >= 0 && !*p && stateCount[1] <= maxCount) { + stateCount[1]++; + ii--; + p -= imgWidth; + } + // If already too many modules in this state or ran off the edge: + if (ii < 0 || stateCount[1] > maxCount) { + return nan(); + } + + CrossCheckState tmpCheckState = FinderPatternFinder::NORMAL; + + while (ii >= 0 && *p /*&& stateCount[0] <= maxCount*/) { // n:1:3:1:1 + stateCount[0]++; + ii--; + p -= imgWidth; + } + + if (stateCount[0] >= maxCount) { + tmpCheckState = FinderPatternFinder::LEFT_SPILL; + } + + // Now also count down from center + ii = startI + 1; + + p = imageRow0 + ii * imgWidth + centerJ; + + while (ii < maxI && *p) { // 1:1:"3":1:1 + // while (ii < maxI && matrix.get(centerJ, ii)) { + stateCount[2]++; + ii++; + + p += imgWidth; + } + if (ii == maxI) { + return nan(); + } + while (ii < maxI && !*p && stateCount[3] < maxCount) { // 1:1:3:"1":1 + stateCount[3]++; + ii++; + + p += imgWidth; + } + if (ii == maxI || stateCount[3] >= maxCount) { + return nan(); + } + + if (tmpCheckState == FinderPatternFinder::LEFT_SPILL) { + while (ii < maxI && *p && stateCount[4] < maxCount) { // 1:1:3:1:"1" + stateCount[4]++; + ii++; + + p += imgWidth; + } + if (stateCount[4] >= maxCount) { + return nan(); + } + } else { // 1:1:3:1:"n" + while (ii < maxI && *p) { + stateCount[4]++; + ii++; + + p += imgWidth; + } + if (stateCount[4] >= maxCount) { + tmpCheckState = FinderPatternFinder::RIHGT_SPILL; + } + } + + bool vertical_check = foundPatternCross(stateCount); + if (!vertical_check) return nan(); + + if ((CURRENT_CHECK_STATE == FinderPatternFinder::LEFT_SPILL && + tmpCheckState == FinderPatternFinder::RIHGT_SPILL) || + (CURRENT_CHECK_STATE == FinderPatternFinder::RIHGT_SPILL && + tmpCheckState == FinderPatternFinder::LEFT_SPILL)) { + return nan(); + } + + int stateCountTotal = getStateCountTotal(stateCount, CURRENT_CHECK_STATE); + + // If we found a finder-pattern-like section, but its size is more than 40% + // different than the original, assume it's a false positive + if (5 * abs(stateCountTotal - originalStateCountTotal) >= 2 * originalStateCountTotal) { + return nan(); + } + + estimatedVerticalModuleSize = (float)stateCountTotal / 7.0f; + + return centerFromEnd(stateCount, ii); +} + +// Like #crossCheckVertical(), and in fact is basically identical, +// except it reads horizontally instead of vertically. This is used to +// cross-cross check a vertical cross check and locate the real center of the +// alignment pattern. +float FinderPatternFinder::crossCheckHorizontal(size_t startJ, size_t centerI, int maxCount, + int originalStateCountTotal, + float& estimatedHorizontalModuleSize) { + int maxJ = image_->getWidth(); + + int stateCount[5]; + for (int i = 0; i < 5; i++) stateCount[i] = 0; + + if (!image_->get(startJ, centerI)) { + if ((int)startJ + 1 < maxJ && image_->get(startJ + 1, centerI)) + startJ = startJ + 1; + else if (0 < (int)startJ - 1 && image_->get(startJ - 1, centerI)) + startJ = startJ - 1; + else + return nan(); + } + + // This is slightly faster than using the Ref. Efficiency is important here + BitMatrix& matrix = *image_; + int j = startJ; + + bool* centerIrow = NULL; + + centerIrow = matrix.getRowBoolPtr(centerI); + + // while (j >= 0 &&matrix.get(j, centerI)) { + while (j >= 0 && centerIrow[j]) { + stateCount[2]++; + j--; + } + if (j < 0) { + return nan(); + } + while (j >= 0 && !centerIrow[j] && stateCount[1] <= maxCount) { + stateCount[1]++; + j--; + } + if (j < 0 || stateCount[1] > maxCount) { + return nan(); + } + CrossCheckState tmpCheckState = FinderPatternFinder::NORMAL; + + while (j >= 0 && centerIrow[j] /* && stateCount[0] <= maxCount*/) { + stateCount[0]++; + j--; + } + if (stateCount[0] >= maxCount) { + tmpCheckState = FinderPatternFinder::LEFT_SPILL; + } + + j = startJ + 1; + while (j < maxJ && centerIrow[j]) { + stateCount[2]++; + j++; + } + if (j == maxJ) { + return nan(); + } + while (j < maxJ && !centerIrow[j] && stateCount[3] < maxCount) { + stateCount[3]++; + j++; + } + if (j == maxJ || stateCount[3] >= maxCount) { + return nan(); + } + + if (tmpCheckState == LEFT_SPILL) { + while (j < maxJ && centerIrow[j] && stateCount[4] <= maxCount) { + stateCount[4]++; + j++; + } + if (stateCount[4] >= maxCount) { + return nan(); + } + } else { + while (j < maxJ && centerIrow[j]) { + stateCount[4]++; + j++; + } + if (stateCount[4] >= maxCount) { + tmpCheckState = RIHGT_SPILL; + } + } + + while (j < maxJ && centerIrow[j] /*&& stateCount[4] < maxCount*/) { + stateCount[4]++; + j++; + } + + // If we found a finder-pattern-like section, but its size is significantly + // different than the original, assume it's a false positive + // int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + + // stateCount[3] + stateCount[4]; + bool horizontal_check = foundPatternCross(stateCount); + if (!horizontal_check) return nan(); + + /* + if(tmpCheckState!=CURRENT_CHECK_STATE) + return nan();*/ + + // Cannot be a LEFT-RIGHT center + if ((CURRENT_CHECK_STATE == FinderPatternFinder::LEFT_SPILL && + tmpCheckState == FinderPatternFinder::RIHGT_SPILL) || + (CURRENT_CHECK_STATE == FinderPatternFinder::RIHGT_SPILL && + tmpCheckState == FinderPatternFinder::LEFT_SPILL)) { + return nan(); + } + + int stateCountTotal = getStateCountTotal(stateCount, CURRENT_CHECK_STATE); + if (5 * abs(stateCountTotal - originalStateCountTotal) >= originalStateCountTotal) { + return nan(); + } + + estimatedHorizontalModuleSize = (float)stateCountTotal / 7.0f; + return centerFromEnd(stateCount, j); +} + +float FinderPatternFinder::hasHorizontalCheckedResult(size_t startJ, size_t centerI) { + for (size_t i = 0; i < _horizontalCheckedResult[startJ].size(); i++) { + if (_horizontalCheckedResult[startJ][i].centerI == centerI) { + return _horizontalCheckedResult[startJ][i].centerJ; + } + } + + return -1.0; +} + +int FinderPatternFinder::addHorizontalCheckedResult(size_t startJ, size_t centerI, float centerJ) { + HorizontalCheckedResult result; + result.centerI = centerI; + result.centerJ = centerJ; + + _horizontalCheckedResult[startJ].push_back(result); + + return 1; +} + +#define CENTER_CHECK_TIME 3 + +/** + *

This is called when a horizontal scan finds a possible alignment pattern. + * It will cross check with a vertical scan, and if successful, will, ah, + * cross-cross-check with another horizontal scan. This is needed primarily to + * locate the real horizontal center of the pattern in cases of extreme skew. + * And then we cross-cross-cross check with another diagonal scan.

+ * + *

If that succeeds the finder pattern location is added to a list that + * tracks the number of times each location has been nearly-matched as a finder + * pattern. Each additional find is more evidence that the location is in fact a + * finder pattern center + * + * @param stateCount reading state module counts from horizontal scan + * @param i row where finder pattern may be found + * @param j end of possible finder pattern in row + * @return true if a finder pattern candidate was found this time + */ +bool FinderPatternFinder::handlePossibleCenter(int* stateCount, size_t i, size_t j) { + CrossCheckState tmpHorizontalState = CURRENT_CHECK_STATE; + float centerJ = centerFromEnd(stateCount, j); + int stateCountTotal = stateCount[1] + stateCount[2] + stateCount[3]; + if (tmpHorizontalState == FinderPatternFinder::NORMAL) { + // 1:1:3:1:1 + stateCountTotal = stateCountTotal + stateCount[0] + stateCount[4]; + } else if (tmpHorizontalState == FinderPatternFinder::LEFT_SPILL) { + // n:1:3:1:1 + stateCountTotal = stateCountTotal + stateCount[1] + stateCount[4]; + } else if (tmpHorizontalState == FinderPatternFinder::RIHGT_SPILL) { + // 1:1:3:1:n + stateCountTotal = stateCountTotal + stateCount[0] + stateCount[3]; + } + float estimatedHorizontalModuleSize = (float)stateCountTotal / 7.0f; + + float estimatedVerticalModuleSize; + + // try different size according to the estimatedHorizontalModuleSize + float tolerateModuleSize = + estimatedHorizontalModuleSize > 4.0 ? estimatedHorizontalModuleSize / 2.0f : 1.0f; + float possbileCenterJs[7] = {centerJ, + centerJ - tolerateModuleSize, + centerJ + tolerateModuleSize, + centerJ - 2 * tolerateModuleSize, + centerJ + 2 * tolerateModuleSize, + centerJ - 3 * tolerateModuleSize, + centerJ + 3 * tolerateModuleSize}; + int image_height = image_->getHeight(); + int image_width = image_->getWidth(); + for (int k = 0; k < CENTER_CHECK_TIME; k++) { + float possibleCenterJ = possbileCenterJs[k]; + if (possibleCenterJ < 0 || possibleCenterJ >= image_width) continue; + float centerI = crossCheckVertical(i, (size_t)possibleCenterJ, stateCount[2], + stateCountTotal, estimatedVerticalModuleSize); + + if (!isnan(centerI) && centerI >= 0.0) { + CrossCheckState tmpVerticalState = CURRENT_CHECK_STATE; + + float moduleSizeDiff = abs(estimatedHorizontalModuleSize - estimatedVerticalModuleSize); + + if (moduleSizeDiff > estimatedHorizontalModuleSize || + moduleSizeDiff > estimatedVerticalModuleSize) + return false; + + tolerateModuleSize = + estimatedVerticalModuleSize > 4.0 ? estimatedVerticalModuleSize / 2.0f : 1.0f; + + float possbileCenterIs[7] = {centerI, + centerI - tolerateModuleSize, + centerI + tolerateModuleSize, + centerI - 2 * tolerateModuleSize, + centerI + 2 * tolerateModuleSize, + centerI - 3 * tolerateModuleSize, + centerI + 3 * tolerateModuleSize}; + + for (int l = 0; l < CENTER_CHECK_TIME; l++) { + float possibleCenterI = possbileCenterIs[l]; + if (possibleCenterI < 0 || possibleCenterI >= image_height) continue; + // Re-cross check + float reEstimatedHorizontalModuleSize; + float cJ = hasHorizontalCheckedResult(centerJ, possibleCenterI); + + if (!isnan(cJ) && cJ >= 0.0) { + centerJ = cJ; + } else { + cJ = centerJ; + + float ccj = + crossCheckHorizontal((size_t)cJ, (size_t)possibleCenterI, stateCount[2], + stateCountTotal, reEstimatedHorizontalModuleSize); + + if (!isnan(ccj)) { + centerJ = ccj; + addHorizontalCheckedResult(cJ, possibleCenterI, ccj); + } + } + if (!isnan(centerJ)) { + tryToPushToCenters( + centerI, centerJ, + (estimatedHorizontalModuleSize + estimatedVerticalModuleSize) / 2.0, + tmpHorizontalState, tmpVerticalState); + return true; + } + } + } + } + + return false; +} + +// return the number of rows we could safely skip during scanning, based on the +// first two finder patterns that have been located. In some cases their +// position will allow us to infer that the third pattern must lie below a +// certain point farther down the image. +int FinderPatternFinder::findRowSkip() { + int max = possibleCenters_.size(); + if (max <= 1) { + return 0; + } + + if (max <= compared_finder_counts) return 0; + + Ref firstConfirmedCenter, secondConfirmedCenter; + + for (int i = 0; i < max - 1; i++) { + firstConfirmedCenter = possibleCenters_[i]; + if (firstConfirmedCenter->getCount() >= CENTER_QUORUM) { + float firstModuleSize = firstConfirmedCenter->getEstimatedModuleSize(); + int j_start = (i < compared_finder_counts) ? compared_finder_counts : i + 1; + for (int j = j_start; j < max; j++) { + secondConfirmedCenter = possibleCenters_[j]; + if (secondConfirmedCenter->getCount() >= CENTER_QUORUM) { + float secondModuleSize = secondConfirmedCenter->getEstimatedModuleSize(); + float moduleSizeDiff = abs(firstModuleSize - secondModuleSize); + if (moduleSizeDiff < 1.0f) { + hasSkipped_ = true; + return (int)(abs(firstConfirmedCenter->getX() - + secondConfirmedCenter->getX()) - + abs(firstConfirmedCenter->getY() - + secondConfirmedCenter->getY())) / + 2; + } + } + } + } + } + + compared_finder_counts = max; + + return 0; +} + +// return the 3 finder patterns from our list of candidates. The "best" are +// those that have been detected at least #CENTER_QUORUM times, and whose module +// size differs from the average among those patterns the least. // +vector> FinderPatternFinder::selectBestPatterns(ErrorHandler& err_handler) { + size_t startSize = possibleCenters_.size(); + + if (startSize < 3) { + // Couldn't find enough finder patterns + err_handler = ReaderErrorHandler("Could not find three finder patterns"); + return vector>(); + } + + vector> result(3); + + if (startSize == 3) { + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[2]; + return result; + } + + sort(possibleCenters_.begin(), possibleCenters_.end(), CountComparator()); + if ((possibleCenters_[2]->getCount() - possibleCenters_[3]->getCount()) > 1 && + possibleCenters_[2]->getCount() > 1) { + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[2]; + return result; + } else if (possibleCenters_[3]->getCount() > 1) { + float totalModuleSize = 0.0f; + for (int i = 0; i < 4; i++) { + totalModuleSize += possibleCenters_[i]->getEstimatedModuleSize(); + } + float everageModuleSize = totalModuleSize / 4.0f; + float maxDiffModuleSize = 0.0f; + int maxID = 0; + for (int i = 0; i < 4; i++) { + float diff = abs(possibleCenters_[i]->getEstimatedModuleSize() - everageModuleSize); + if (diff > maxDiffModuleSize) { + maxDiffModuleSize = diff; + maxID = i; + } + } + switch (maxID) { + case 0: + result[0] = possibleCenters_[1]; + result[1] = possibleCenters_[2]; + result[2] = possibleCenters_[3]; + break; + case 1: + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[2]; + result[2] = possibleCenters_[3]; + break; + case 2: + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[3]; + break; + default: + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[2]; + break; + } + + return result; + } else if (possibleCenters_[1]->getCount() > 1 && possibleCenters_[2]->getCount() == 1) { + vector> possibleThirdCenter; + float possibleModuleSize = (possibleCenters_[0]->getEstimatedModuleSize() + + possibleCenters_[1]->getEstimatedModuleSize()) / + 2.0f; + for (size_t i = 2; i < startSize; i++) { + if (abs(possibleCenters_[i]->getEstimatedModuleSize() - possibleModuleSize) < + 0.5 * possibleModuleSize) + possibleThirdCenter.push_back(possibleCenters_[i]); + } + float longestSide = 0.0f; + size_t longestId = 0; + for (size_t i = 0; i < possibleThirdCenter.size(); i++) { + float tmpLongSide = 0.0f; + if (checkIsoscelesRightTriangle(possibleCenters_[0], possibleCenters_[1], + possibleThirdCenter[i], tmpLongSide)) { + if (tmpLongSide >= longestSide) { + longestSide = tmpLongSide; + longestId = i; + } + } + } + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + + // Error with decoding + if (longestId >= possibleThirdCenter.size()) { + err_handler = ReaderErrorHandler("Not find any available possibleThirdCenter"); + return vector>(); + } else { + result[2] = possibleThirdCenter[longestId]; + } + + return result; + } + + // Filter outlier possibilities whose module size is too different + if (startSize > 3) { + // But we can only afford to do so if we have at least 4 possibilities + // to choose from + float totalModuleSize = 0.0f; + float square = 0.0f; + for (size_t i = 0; i < startSize; i++) { + float size = possibleCenters_[i]->getEstimatedModuleSize(); + totalModuleSize += size; + square += size * size; + } + float average = totalModuleSize / (float)startSize; + float stdDev = (float)sqrt(square / startSize - average * average); + + sort(possibleCenters_.begin(), possibleCenters_.end(), + FurthestFromAverageComparator(average)); + + // float limit = max(0.2f * average, stdDev); + float limit = max(0.5f * average, stdDev); + + for (size_t i = 0; i < possibleCenters_.size() && possibleCenters_.size() > 3; i++) { + if (abs(possibleCenters_[i]->getEstimatedModuleSize() - average) > limit) { + possibleCenters_.erase(possibleCenters_.begin() + i); + i--; + } + } + } + + int tryHardPossibleCenterSize = 15; + int possibleCenterSize = 12; + + if (possibleCenters_.size() > size_t(tryHardPossibleCenterSize)) { + sort(possibleCenters_.begin(), possibleCenters_.end(), CountComparator()); + possibleCenters_.erase(possibleCenters_.begin() + tryHardPossibleCenterSize, + possibleCenters_.end()); + } else if (possibleCenters_.size() > size_t(possibleCenterSize)) { + sort(possibleCenters_.begin(), possibleCenters_.end(), CountComparator()); + possibleCenters_.erase(possibleCenters_.begin() + possibleCenterSize, + possibleCenters_.end()); + } + + if (possibleCenters_.size() >= 6) { + sort(possibleCenters_.begin(), possibleCenters_.end(), XComparator()); + possibleCenters_.erase(possibleCenters_.begin() + 4, possibleCenters_.end() - 2); + sort(possibleCenters_.begin(), possibleCenters_.begin() + 4, YComparator()); + possibleCenters_.erase(possibleCenters_.begin() + 1, possibleCenters_.begin() + 3); + sort(possibleCenters_.end() - 2, possibleCenters_.end(), YComparator()); + possibleCenters_.erase(possibleCenters_.end() - 1, possibleCenters_.end()); + } else if (possibleCenters_.size() > 3) { + // Throw away all but those first size candidate points we found. + float totalModuleSize = 0.0f; + for (size_t i = 0; i < possibleCenters_.size(); i++) { + float size = possibleCenters_[i]->getEstimatedModuleSize(); + totalModuleSize += size; + } + float average = totalModuleSize / (float)possibleCenters_.size(); + sort(possibleCenters_.begin(), possibleCenters_.end(), CenterComparator(average)); + possibleCenters_.erase(possibleCenters_.begin() + 3, possibleCenters_.end()); + } + + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[2]; + + return result; +} + +vector> FinderPatternFinder::selectFileBestPatterns(ErrorHandler& err_handler) { + size_t startSize = possibleCenters_.size(); + + if (startSize < 3) { + // Couldn't find enough finder patterns + err_handler = ReaderErrorHandler("Could not find three finder patterns"); + return vector>(); + } + + vector> result(3); + + if (startSize == 3) { + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[2]; + return result; + } + + sort(possibleCenters_.begin(), possibleCenters_.end(), BestComparator()); + + result[0] = possibleCenters_[0]; + result[1] = possibleCenters_[1]; + result[2] = possibleCenters_[2]; + + for (size_t i = 0; i < possibleCenters_.size() - 2; ++i) { + float tmpLongSide = 0; + + int iCountDiff = 0; + float fModuleSizeDiff = 0; + for (size_t j = 0; j < 3; ++j) { + iCountDiff += abs(possibleCenters_[i + j]->getCount() - + possibleCenters_[i + ((j + 1) % 3)]->getCount()); + fModuleSizeDiff += fabs(possibleCenters_[i + j]->getEstimatedModuleSize() - + possibleCenters_[i + ((j + 1) % 3)]->getEstimatedModuleSize()); + } + + if (iCountDiff > 2) continue; + if (fModuleSizeDiff > 5) continue; + + if (checkIsoscelesRightTriangle(possibleCenters_[i], possibleCenters_[i + 1], + possibleCenters_[i + 2], tmpLongSide)) { + result[0] = possibleCenters_[i]; + result[1] = possibleCenters_[i + 1]; + result[2] = possibleCenters_[i + 2]; + + break; + } + } + + return result; +} + +// Orders an array of three patterns in an order [A,B,C] such that +// AB> FinderPatternFinder::orderBestPatterns( + vector> patterns) { + // Find distances between pattern centers + float abDistance = distance(patterns[0], patterns[1]); + float bcDistance = distance(patterns[1], patterns[2]); + float acDistance = distance(patterns[0], patterns[2]); + + Ref topLeft; + Ref topRight; + Ref bottomLeft; + // Assume one closest to other two is top left; + // topRight and bottomLeft will just be guesses below at first + if (bcDistance >= abDistance && bcDistance >= acDistance) { + topLeft = patterns[0]; + topRight = patterns[1]; + bottomLeft = patterns[2]; + } else if (acDistance >= bcDistance && acDistance >= abDistance) { + topLeft = patterns[1]; + topRight = patterns[0]; + bottomLeft = patterns[2]; + } else { + topLeft = patterns[2]; + topRight = patterns[0]; + bottomLeft = patterns[1]; + } + + // Use cross product to figure out which of other1/2 is the bottom left + // pattern. The vector "top_left -> bottom_left" x "top_left -> top_right" + // should yield a vector with positive z component + if ((bottomLeft->getY() - topLeft->getY()) * (topRight->getX() - topLeft->getX()) < + (bottomLeft->getX() - topLeft->getX()) * (topRight->getY() - topLeft->getY())) { + Ref temp = topRight; + topRight = bottomLeft; + bottomLeft = temp; + } + + vector> results(3); + results[0] = bottomLeft; + results[1] = topLeft; + results[2] = topRight; + + return results; +} + +bool FinderPatternFinder::checkIsoscelesRightTriangle(Ref centerA, + Ref centerB, + Ref centerC, float& longSide) { + float shortSide1, shortSide2; + FinderPatternInfo::calculateSides(centerA, centerB, centerC, longSide, shortSide1, shortSide2); + + auto minAmongThree = [](float a, float b, float c) { return min(min(a, b), c); }; + auto maxAmongThree = [](float a, float b, float c) { return max(max(a, b), c); }; + + float shortSideSqrt1 = sqrt(shortSide1); + float shortSideSqrt2 = sqrt(shortSide2); + float longSideSqrt = sqrt(longSide); + auto minSide = minAmongThree(shortSideSqrt1, shortSideSqrt2, longSideSqrt); + auto maxModuleSize = + maxAmongThree(centerA->getEstimatedModuleSize(), centerB->getEstimatedModuleSize(), + centerC->getEstimatedModuleSize()); + // if edge length smaller than 14 * module size + if (minSide <= maxModuleSize * 14) return false; + + float CosLong = (shortSide1 + shortSide2 - longSide) / (2 * shortSideSqrt1 * shortSideSqrt2); + float CosShort1 = (longSide + shortSide1 - shortSide2) / (2 * longSideSqrt * shortSideSqrt1); + float CosShort2 = (longSide + shortSide2 - shortSide1) / (2 * longSideSqrt * shortSideSqrt2); + + if (abs(CosLong) > FP_RIGHT_ANGLE || + (CosShort1 < FP_SMALL_ANGLE2 || CosShort1 > FP_SMALL_ANGLE1) || + (CosShort2 < FP_SMALL_ANGLE2 || CosShort2 > FP_SMALL_ANGLE1)) { + return false; + } + + return true; +} + +// return distance between two points +float FinderPatternFinder::distance(Ref p1, Ref p2) { + float dx = p1->getX() - p2->getX(); + float dy = p1->getY() - p2->getY(); + return (float)sqrt(dx * dx + dy * dy); +} + +FinderPatternFinder::FinderPatternFinder(Ref image, Ref block) + : finder_time(0), + compared_finder_counts(0), + image_(image), + possibleCenters_(), + hasSkipped_(false), + block_(block) { + CURRENT_CHECK_STATE = FinderPatternFinder::NORMAL; +} + +Ref FinderPatternFinder::getImage() { return image_; } + +vector>& FinderPatternFinder::getPossibleCenters() { return possibleCenters_; } + +} // namespace qrcode +} // namespace zxing \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_finder.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_finder.hpp new file mode 100644 index 00000000..442198bf --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_finder.hpp @@ -0,0 +1,136 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_FINDER_HPP_ +#define __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_FINDER_HPP_ + +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../common/unicomblock.hpp" +#include "../../errorhandler.hpp" +#include "finder_pattern.hpp" +#include "finder_pattern_info.hpp" +using zxing::ErrorHandler; +using zxing::ReaderErrorHandler; + +namespace zxing { + +class DecodeHints; + +namespace qrcode { + +class FinderPatternFinder { +public: + enum CrossCheckState { + NORMAL = 0, + LEFT_SPILL = 1, + RIHGT_SPILL = 2, + LEFT_RIGHT_SPILL = 3, + NOT_PATTERN = 4, + }; + +private: + static int CENTER_QUORUM; + static int MIN_SKIP; + static int MAX_MODULES; + static int INTEGER_MATH_SHIFT; + static int FP_INPUT_CNN_MAX_NUM; + static int FP_IS_SELECT_BEST; + static int FP_IS_SELECT_FILE_BEST; + static int FP_INPUT_MAX_NUM; + static int FP_FILTER_SIZE; + static int FPS_CLUSTER_MAX; + static int FPS_RESULT_MAX; + static int K_FACTOR; + + static float FPS_MS_VAL; + static float FP_COUNT_MIN; + static float FP_MS_MIN; + static float FP_RIGHT_ANGLE; + static float FP_SMALL_ANGLE1; + static float FP_SMALL_ANGLE2; + static float QR_MIN_FP_AREA_ERR; + static float QR_MIN_FP_MS_ERR; + static int QR_MIN_FP_ACCEPT; + + int finder_time; + CrossCheckState CURRENT_CHECK_STATE; + int compared_finder_counts; + + struct HorizontalCheckedResult { + size_t centerI; + float centerJ; + }; + + vector > _horizontalCheckedResult; + + // INI CONFIG + +protected: + Ref image_; + std::vector > possibleCenters_; + + bool hasSkipped_; + Ref block_; + + /** stateCount must be int[5] */ + float centerFromEnd(int* stateCount, int end); + // check if satisfies finder pattern + bool foundPatternCross(int* stateCount); + + // try to insert to possibleCenters_ + int getStateCountTotal(int* stateCount, const CrossCheckState& check_state); + bool tryToPushToCenters(float posX, float posY, float estimatedModuleSize, + CrossCheckState horizontalState = FinderPatternFinder::NORMAL, + CrossCheckState verticalState = FinderPatternFinder::NORMAL); + bool crossCheckDiagonal(int startI, int centerJ, int maxCount, int originalStateCountTotal); + float crossCheckVertical(size_t startI, size_t centerJ, int maxCount, + int originalStateCountTota, float& estimatedVerticalModuleSize); + float crossCheckHorizontal(size_t startJ, size_t centerI, int maxCount, + int originalStateCountTotal, float& estimatedHorizontalModuleSize); + + float hasHorizontalCheckedResult(size_t startJ, size_t centerI); + int addHorizontalCheckedResult(size_t startJ, size_t centerI, float centerJ); + int getMinModuleSize(); + + bool isEqualResult(Ref src, Ref dst); + + /** stateCount must be int[5] */ + bool handlePossibleCenter(int* stateCount, size_t i, size_t j); + int findRowSkip(); + + std::vector > selectBestPatterns(ErrorHandler& err_handler); + std::vector > selectFileBestPatterns(ErrorHandler& err_handler); + std::vector > orderBestPatterns(std::vector > patterns); + + vector > getPatternInfosFileMode(DecodeHints const& hints, + ErrorHandler& err_handler); + + bool IsPossibleFindPatterInfo(Ref a, Ref b, Ref c); + void PushToResult(Ref a, Ref b, Ref c, + vector >& patternInfos); + + Ref getImage(); + std::vector >& getPossibleCenters(); + +public: + void InitConfig(); + float distance(Ref p1, Ref p2); + FinderPatternFinder(Ref image, Ref block); + + std::vector > find(DecodeHints const& hints, ErrorHandler& err_handler); + + bool checkIsoscelesRightTriangle(Ref centerA, Ref centerB, + Ref centerC, float& longSide); +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_FINDER_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_info.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_info.cpp new file mode 100644 index 00000000..2e121f97 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_info.cpp @@ -0,0 +1,96 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "finder_pattern_info.hpp" + +namespace zxing { +namespace qrcode { +FinderPatternInfo::FinderPatternInfo(std::vector > patternCenters) + : bottomLeft_(patternCenters[0]), + topLeft_(patternCenters[1]), + topRight_(patternCenters[2]), + possibleFix_(0) { + estimateFinderPatternInfo(); +} + +Ref FinderPatternInfo::getBottomLeft() { return bottomLeft_; } + +Ref FinderPatternInfo::getTopLeft() { return topLeft_; } + +Ref FinderPatternInfo::getTopRight() { return topRight_; } + + +float FinderPatternInfo::getPossibleFix() { return possibleFix_; } + +float FinderPatternInfo::getAnglePossibleFix() { return anglePossibleFix_; } + +// bottomLeft_ => centerA +void FinderPatternInfo::calculateSides(Ref centerA, Ref centerB, + Ref centerC, float &longSide, + float &shortSide1, float &shortSide2) { + float a_m_b_x = centerA->getX() - centerB->getX(); + float a_m_b_y = centerA->getY() - centerB->getY(); + float ab_s = a_m_b_x * a_m_b_x + a_m_b_y * a_m_b_y; + float a_m_c_x = centerA->getX() - centerC->getX(); + float a_m_c_y = centerA->getY() - centerC->getY(); + float ac_s = a_m_c_x * a_m_c_x + a_m_c_y * a_m_c_y; + float b_m_c_x = centerB->getX() - centerC->getX(); + float b_m_c_y = centerB->getY() - centerC->getY(); + float bc_s = b_m_c_x * b_m_c_x + b_m_c_y * b_m_c_y; + + if (ab_s > bc_s && ab_s > ac_s) { + longSide = ab_s; + shortSide1 = ac_s; + shortSide2 = bc_s; + + } else if (bc_s > ab_s && bc_s > ac_s) { + longSide = bc_s; + shortSide1 = ab_s; + shortSide2 = ac_s; + } else { + longSide = ac_s; + shortSide1 = ab_s; + shortSide2 = bc_s; + } +} +void FinderPatternInfo::estimateFinderPatternInfo() { + float longSide, shortSide1, shortSide2; + calculateSides(bottomLeft_, topLeft_, topRight_, longSide, shortSide1, shortSide2); + + float CosLong = + (shortSide1 + shortSide2 - longSide) / (2 * sqrt(shortSide1) * sqrt(shortSide2)); + float CosShort1 = + (longSide + shortSide1 - shortSide2) / (2 * sqrt(longSide) * sqrt(shortSide1)); + float CosShort2 = + (longSide + shortSide2 - shortSide1) / (2 * sqrt(longSide) * sqrt(shortSide2)); + + float fAngleLong = acos(CosLong) * 180 / acos(-1.0); + float fAngleShort1 = acos(CosShort1) * 180 / acos(-1.0); + float fAngleShort2 = acos(CosShort2) * 180 / acos(-1.0); + if (fAngleShort1 < fAngleShort2) swap(fAngleShort1, fAngleShort2); + + float fLongDiff = fabs(fAngleLong - 90); + float fLongScore = 100.0 - fLongDiff; + + float fShortDiff = std::max(fabs(fAngleShort1 - 45), fabs(fAngleShort2 - 45)); + float fShortScore = 100.0 - 2 * fShortDiff; + + float fFinalScore = std::min(fShortScore, fLongScore); + + anglePossibleFix_ = fFinalScore / 100.0; + + int totalCount = (bottomLeft_->getCount() + topLeft_->getCount() + topRight_->getCount()); + + float fCountScore = (max(3, min(totalCount, 10)) - 3) / (10.0 - 3.0); + + possibleFix_ = anglePossibleFix_ * 0.5 + fCountScore * 0.5; +} +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_info.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_info.hpp new file mode 100644 index 00000000..223524e2 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/finder_pattern_info.hpp @@ -0,0 +1,46 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_INFO_HPP_ +#define __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_INFO_HPP_ + +#include "../../common/array.hpp" +#include "../../common/counted.hpp" +#include "finder_pattern.hpp" + +namespace zxing { +namespace qrcode { + +class FinderPatternInfo : public Counted { +private: + Ref bottomLeft_; + Ref topLeft_; + Ref topRight_; + float possibleFix_; + float anglePossibleFix_; + +public: + explicit FinderPatternInfo(std::vector > patternCenters); + + Ref getBottomLeft(); + Ref getTopLeft(); + Ref getTopRight(); + void estimateFinderPatternInfo(); + float getPossibleFix(); + float getAnglePossibleFix(); + // to void code duplicated + static void calculateSides(Ref centerA, Ref centerB, + Ref centerC, float &longSide, float &shortSide1, + float &shortSide2); +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_FINDER_PATTERN_INFO_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/pattern_result.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/pattern_result.cpp new file mode 100644 index 00000000..b44349ee --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/pattern_result.cpp @@ -0,0 +1,28 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../../precomp.hpp" +#include "pattern_result.hpp" + + +using zxing::Ref; +using zxing::ResultPoint; +using zxing::qrcode::FinderPattern; +using zxing::qrcode::FinderPatternInfo; +using zxing::qrcode::PatternResult; + +PatternResult::PatternResult(Ref info) { + finderPatternInfo = info; + possibleAlignmentPatterns.clear(); +} + +void PatternResult::setConfirmedAlignmentPattern(int index) { + if (index >= int(possibleAlignmentPatterns.size())) return; + confirmedAlignmentPattern = possibleAlignmentPatterns[index]; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/pattern_result.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/pattern_result.hpp new file mode 100644 index 00000000..93a4f2ba --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/detector/pattern_result.hpp @@ -0,0 +1,48 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_DETECTOR_PATTERN_RESULT_HPP_ +#define __ZXING_QRCODE_DETECTOR_PATTERN_RESULT_HPP_ + +#include "../../common/array.hpp" +#include "../../common/bitmatrix.hpp" +#include "../../common/counted.hpp" +#include "../../common/detector_result.hpp" +#include "../../resultpoint.hpp" +#include "alignment_pattern.hpp" +#include "finder_pattern.hpp" +#include "finder_pattern_info.hpp" +namespace zxing { +namespace qrcode { +class PatternResult : public Counted { +public: + Ref finderPatternInfo; + vector > possibleAlignmentPatterns; + Ref confirmedAlignmentPattern; + int possibleDimension; + // vector possibleDimensions; + unsigned int possibleVersion; + float possibleFix; + float possibleModuleSize; + + explicit PatternResult(Ref info); + void setConfirmedAlignmentPattern(int index); + int getPossibleAlignmentCount() { return possibleAlignmentPatterns.size(); } + // int getPossibleDimensionCount(); +public: + unsigned int getPossibleVersion() { return possibleVersion; } + float getPossibleFix() { return possibleFix; } + float getPossibleModuleSize() { return possibleModuleSize; } + int getDimension() { return possibleDimension; } +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_DETECTOR_PATTERN_RESULT_HPP_ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/error_correction_level.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/error_correction_level.cpp new file mode 100644 index 00000000..1b2c63de --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/error_correction_level.cpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "error_correction_level.hpp" +using zxing::ErrorHandler; + +namespace zxing { +namespace qrcode { + +ErrorCorrectionLevel::ErrorCorrectionLevel(int inOrdinal, int bits, char const* name) + : ordinal_(inOrdinal), bits_(bits), name_(name) {} + +int ErrorCorrectionLevel::ordinal() const { return ordinal_; } + +int ErrorCorrectionLevel::bits() const { return bits_; } + +string const& ErrorCorrectionLevel::name() const { return name_; } + +ErrorCorrectionLevel::operator string const &() const { return name_; } + +ErrorCorrectionLevel& ErrorCorrectionLevel::forBits(int bits, ErrorHandler& err_handler) { + if (bits < 0 || bits >= N_LEVELS) { + err_handler = zxing::ReaderErrorHandler("Ellegal error correction level bits"); + return *FOR_BITS[0]; + } + return *FOR_BITS[bits]; +} + +ErrorCorrectionLevel ErrorCorrectionLevel::L(0, 0x01, "L"); +ErrorCorrectionLevel ErrorCorrectionLevel::M(1, 0x00, "M"); +ErrorCorrectionLevel ErrorCorrectionLevel::Q(2, 0x03, "Q"); +ErrorCorrectionLevel ErrorCorrectionLevel::H(3, 0x02, "H"); +ErrorCorrectionLevel* ErrorCorrectionLevel::FOR_BITS[] = {&M, &L, &H, &Q}; +int ErrorCorrectionLevel::N_LEVELS = 4; + +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/error_correction_level.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/error_correction_level.hpp new file mode 100644 index 00000000..67344a83 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/error_correction_level.hpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_ERROR_CORRECTION_LEVEL_HPP__ +#define __ZXING_QRCODE_ERROR_CORRECTION_LEVEL_HPP__ + +#include "../errorhandler.hpp" + +namespace zxing { +namespace qrcode { + +class ErrorCorrectionLevel { +private: + int ordinal_; + int bits_; + std::string name_; + ErrorCorrectionLevel(int inOrdinal, int bits, char const* name); + static ErrorCorrectionLevel* FOR_BITS[]; + static int N_LEVELS; + +public: + static ErrorCorrectionLevel L; + static ErrorCorrectionLevel M; + static ErrorCorrectionLevel Q; + static ErrorCorrectionLevel H; + + int ordinal() const; + int bits() const; + std::string const& name() const; + operator std::string const &() const; + + static ErrorCorrectionLevel& forBits(int bits, ErrorHandler& err_handler); +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_ERROR_CORRECTION_LEVEL_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/format_information.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/format_information.cpp new file mode 100644 index 00000000..5395f791 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/format_information.cpp @@ -0,0 +1,114 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "format_information.hpp" + +#include + +using zxing::ErrorHandler; + +namespace zxing { +namespace qrcode { + +int FormatInformation::FORMAT_INFO_MASK_QR = 0x5412; +int FormatInformation::FORMAT_INFO_DECODE_LOOKUP[][2] = { + {0x5412, 0x00}, {0x5125, 0x01}, {0x5E7C, 0x02}, {0x5B4B, 0x03}, {0x45F9, 0x04}, {0x40CE, 0x05}, + {0x4F97, 0x06}, {0x4AA0, 0x07}, {0x77C4, 0x08}, {0x72F3, 0x09}, {0x7DAA, 0x0A}, {0x789D, 0x0B}, + {0x662F, 0x0C}, {0x6318, 0x0D}, {0x6C41, 0x0E}, {0x6976, 0x0F}, {0x1689, 0x10}, {0x13BE, 0x11}, + {0x1CE7, 0x12}, {0x19D0, 0x13}, {0x0762, 0x14}, {0x0255, 0x15}, {0x0D0C, 0x16}, {0x083B, 0x17}, + {0x355F, 0x18}, {0x3068, 0x19}, {0x3F31, 0x1A}, {0x3A06, 0x1B}, {0x24B4, 0x1C}, {0x2183, 0x1D}, + {0x2EDA, 0x1E}, {0x2BED, 0x1F}, +}; +int FormatInformation::N_FORMAT_INFO_DECODE_LOOKUPS = 32; + +int FormatInformation::BITS_SET_IN_HALF_BYTE[] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}; + +FormatInformation::FormatInformation(int formatInfo, float possiableFix, ErrorHandler& err_handler) + : errorCorrectionLevel_(ErrorCorrectionLevel::forBits((formatInfo >> 3) & 0x03, err_handler)), + dataMask_((char)(formatInfo & 0x07)) { + possiableFix_ = possiableFix; + if (err_handler.ErrCode()) return; +} + +ErrorCorrectionLevel& FormatInformation::getErrorCorrectionLevel() { return errorCorrectionLevel_; } + +char FormatInformation::getDataMask() { return dataMask_; } + +float FormatInformation::getPossiableFix() { return possiableFix_; } + +int FormatInformation::numBitsDiffering(int a, int b) { + a ^= b; + return BITS_SET_IN_HALF_BYTE[a & 0x0F] + BITS_SET_IN_HALF_BYTE[(a >> 4 & 0x0F)] + + BITS_SET_IN_HALF_BYTE[(a >> 8 & 0x0F)] + BITS_SET_IN_HALF_BYTE[(a >> 12 & 0x0F)] + + BITS_SET_IN_HALF_BYTE[(a >> 16 & 0x0F)] + BITS_SET_IN_HALF_BYTE[(a >> 20 & 0x0F)] + + BITS_SET_IN_HALF_BYTE[(a >> 24 & 0x0F)] + BITS_SET_IN_HALF_BYTE[(a >> 28 & 0x0F)]; +} + +Ref FormatInformation::decodeFormatInformation(int maskedFormatInfo1, + int maskedFormatInfo2) { + Ref result(doDecodeFormatInformation(maskedFormatInfo1, maskedFormatInfo2)); + if (result != 0) { + return result; + } + // Should return null, but, some QR codes apparently + // do not mask this info. Try again by actually masking the pattern + // first + return doDecodeFormatInformation(maskedFormatInfo1 ^ FORMAT_INFO_MASK_QR, + maskedFormatInfo2 ^ FORMAT_INFO_MASK_QR); +} +Ref FormatInformation::doDecodeFormatInformation(int maskedFormatInfo1, + int maskedFormatInfo2) { + ErrorHandler err_handler; + int distance = numBitsDiffering(maskedFormatInfo1, maskedFormatInfo2); + float possiableFix_ = (16.0 - (distance > 16 ? 16 : distance)) / 16.0; + + // Find the int in FORMAT_INFO_DECODE_LOOKUP with fewest bits differing + int bestDifference = std::numeric_limits::max(); + int bestFormatInfo = 0; + for (int i = 0; i < N_FORMAT_INFO_DECODE_LOOKUPS; i++) { + int* decodeInfo = FORMAT_INFO_DECODE_LOOKUP[i]; + int targetInfo = decodeInfo[0]; + if (targetInfo == maskedFormatInfo1 || targetInfo == maskedFormatInfo2) { + // Found an exact match + Ref result( + new FormatInformation(decodeInfo[1], possiableFix_, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return result; + } + int bitsDifference = numBitsDiffering(maskedFormatInfo1, targetInfo); + if (bitsDifference < bestDifference) { + bestFormatInfo = decodeInfo[1]; + bestDifference = bitsDifference; + } + if (maskedFormatInfo1 != maskedFormatInfo2) { + // also try the other option + bitsDifference = numBitsDiffering(maskedFormatInfo2, targetInfo); + if (bitsDifference < bestDifference) { + bestFormatInfo = decodeInfo[1]; + bestDifference = bitsDifference; + } + } + } + if (bestDifference <= 3) { + Ref result( + new FormatInformation(bestFormatInfo, possiableFix_, err_handler)); + if (err_handler.ErrCode()) return Ref(); + return result; + } + Ref result; + return result; +} + +bool operator==(const FormatInformation& a, const FormatInformation& b) { + return &(a.errorCorrectionLevel_) == &(b.errorCorrectionLevel_) && a.dataMask_ == b.dataMask_; +} + +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/format_information.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/format_information.hpp new file mode 100644 index 00000000..249c35b7 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/format_information.hpp @@ -0,0 +1,48 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_FORMAT_INFORMATION_HPP__ +#define __ZXING_QRCODE_FORMAT_INFORMATION_HPP__ + +#include "../common/counted.hpp" +#include "../errorhandler.hpp" +#include "error_correction_level.hpp" + +namespace zxing { +namespace qrcode { + +class FormatInformation : public Counted { +private: + static int FORMAT_INFO_MASK_QR; + static int FORMAT_INFO_DECODE_LOOKUP[][2]; + static int N_FORMAT_INFO_DECODE_LOOKUPS; + static int BITS_SET_IN_HALF_BYTE[]; + + ErrorCorrectionLevel &errorCorrectionLevel_; + char dataMask_; + float possiableFix_; + + FormatInformation(int formatInfo, float possiableFix, ErrorHandler &err_handler); + +public: + static int numBitsDiffering(int a, int b); + static Ref decodeFormatInformation(int maskedFormatInfo1, + int maskedFormatInfo2); + static Ref doDecodeFormatInformation(int maskedFormatInfo1, + int maskedFormatInfo2); + ErrorCorrectionLevel &getErrorCorrectionLevel(); + char getDataMask(); + float getPossiableFix(); + friend bool operator==(const FormatInformation &a, const FormatInformation &b); +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_FORMAT_INFORMATION_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/qrcode_reader.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/qrcode_reader.cpp new file mode 100644 index 00000000..b6c4c4f7 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/qrcode_reader.cpp @@ -0,0 +1,500 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "qrcode_reader.hpp" +#include +#include "../common/bitarray.hpp" +#include "detector/detector.hpp" + + +using zxing::ErrorHandler; + +namespace zxing { +namespace qrcode { + +QRCodeReader::QRCodeReader() : decoder_() { + readerState_ = QRCodeReader::READER_START; + detectedDimension_ = -1; + lastDecodeTime_ = 0; + lastDecodeID_ = 0; + decodeID_ = 0; + lastPossibleAPCount_ = 0; + possibleAPCount_ = 0; + lastSamePossibleAPCountTimes_ = 0; + samePossibleAPCountTimes_ = 0; + lastRecommendedImageSizeType_ = 0; + recommendedImageSizeType_ = 0; + smoothMaxMultiple_ = 40; +} + +vector> QRCodeReader::decode(Ref image) { return decode(image, DecodeHints()); } + +vector> QRCodeReader::decode(Ref image, DecodeHints hints) { + // Binarize image using the Histogram Binarized method and be binarized + ErrorHandler err_handler; + vector> result_list; + Ref imageBitMatrix = image->getBlackMatrix(err_handler); + if (err_handler.ErrCode() || imageBitMatrix == NULL) return result_list; + + vector> rst = decodeMore(image, imageBitMatrix, hints, err_handler); + if (err_handler.ErrCode() || rst.empty()) { + // black white mirro!!! + Ref invertedMatrix = image->getInvertedMatrix(err_handler); + if (err_handler.ErrCode() || invertedMatrix == NULL) return result_list; + vector> tmp_rst = decodeMore(image, invertedMatrix, hints, err_handler); + if (err_handler.ErrCode() || tmp_rst.empty()) return tmp_rst; + } + + return rst; +} + +vector> QRCodeReader::decodeMore(Ref image, Ref imageBitMatrix, + DecodeHints hints, ErrorHandler &err_handler) { + nowHints_ = hints; + std::string ept; + vector> result_list; + if (imageBitMatrix == NULL) return result_list; + image->m_poUnicomBlock->Init(); + image->m_poUnicomBlock->Reset(imageBitMatrix); + + for (int tryTimes = 0; tryTimes < 1; tryTimes++) { + Ref detector(new Detector(imageBitMatrix, image->m_poUnicomBlock)); + err_handler.Reset(); + + detector->detect(hints, err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::ReaderErrorHandler("error detect"); + setReaderState(detector->getState()); + ept = err_handler.ErrMsg(); + continue; + } + + setReaderState(detector->getState()); + + int possiblePatternCount = detector->getPossiblePatternCount(); + + if (possiblePatternCount <= 0) { + continue; + } + for (int i = 0; i < possiblePatternCount; i++) { + // filter and perserve the highest score. + Ref patternInfo = detector->getFinderPatternInfo(i); + setPatternFix(patternInfo->getPossibleFix()); + if (patternInfo->getAnglePossibleFix() < 0.6 && i) continue; + bool patternFoundFlag = false; + int possibleAlignmentCount = 0; + possibleAlignmentCount = detector->getPossibleAlignmentCount(i); + if (possibleAlignmentCount < 0) continue; + + detectedDimension_ = detector->getDimension(i); + possibleModuleSize_ = detector->getPossibleModuleSize(i); + setPossibleAPCountByVersion(detector->getPossibleVersion(i)); + + vector needTryVariousDeimensions(possibleAlignmentCount, false); + for (int j = 0; j < possibleAlignmentCount; j++) { + if (patternFoundFlag){break;} + ArrayRef > points; + err_handler.Reset(); + Ref detectorResult = + detector->getResultViaAlignment(i, j, detectedDimension_, err_handler); + if (err_handler.ErrCode()) { + ept = err_handler.ErrCode(); + setDecoderFix(decoder_.getPossibleFix(), points); + setReaderState(decoder_.getState()); + + if ((patternInfo->getPossibleFix() > 0.9 && decoder_.getPossibleFix() < 0.1)) { + needTryVariousDeimensions[j] = true; + } + continue; + } + + points = detectorResult->getPoints(); + Ref decoderResult( + decoder_.decode(detectorResult->getBits(), err_handler)); + if (err_handler.ErrCode()) { + ept = err_handler.ErrCode(); + setDecoderFix(decoder_.getPossibleFix(), points); + setReaderState(decoder_.getState()); + + if ((patternInfo->getPossibleFix() > 0.9 && decoder_.getPossibleFix() < 0.1)) { + needTryVariousDeimensions[j] = true; + } + continue; + } + + // If the code was mirrored: swap the bottom-left and the + // top-right points. + if (decoderResult->getOtherClassName() == "QRCodeDecoderMetaData") { + decoderResult->getOther()->applyMirroredCorrection(points); + } + + setDecoderFix(decoder_.getPossibleFix(), points); + setReaderState(decoder_.getState()); + + Ref result( + new Result(decoderResult->getText(), decoderResult->getRawBytes(), points, + decoderResult->getCharset(), decoderResult->getQRCodeVersion(), + decoderResult->getEcLevel(), decoderResult->getCharsetMode())); + setSuccFix(points); + result_list.push_back(result); + patternFoundFlag = true; + if (nowHints_.getUseNNDetector()) { + return result_list; + } + } + // try different dimentions + for (int j = 0; j < possibleAlignmentCount; j++) { + if (patternFoundFlag){break;} + err_handler.Reset(); + ArrayRef > points; + if (needTryVariousDeimensions[j]) { + vector possibleDimensions = getPossibleDimentions(detectedDimension_); + for (size_t k = 1; k < possibleDimensions.size(); k++) { + err_handler.Reset(); + int dimension = possibleDimensions[k]; + + Ref detectorResult = + detector->getResultViaAlignment(i, j, dimension, err_handler); + if (err_handler.ErrCode() || detectorResult == NULL) { + ept = err_handler.ErrMsg(); + setDecoderFix(decoder_.getPossibleFix(), points); + setReaderState(decoder_.getState()); + continue; + } + + points = detectorResult->getPoints(); + Ref decoderResult( + decoder_.decode(detectorResult->getBits(), err_handler)); + if (err_handler.ErrCode() || decoderResult == NULL) { + ept = err_handler.ErrMsg(); + setDecoderFix(decoder_.getPossibleFix(), points); + setReaderState(decoder_.getState()); + continue; + } + + if (decoderResult->getOtherClassName() == "QRCodeDecoderMetaData") { + decoderResult->getOther()->applyMirroredCorrection(points); + } + + setDecoderFix(decoder_.getPossibleFix(), points); + setReaderState(decoder_.getState()); + + detectedDimension_ = possibleDimensions[k]; + Ref result(new Result( + decoderResult->getText(), decoderResult->getRawBytes(), points, + decoderResult->getCharset(), decoderResult->getQRCodeVersion(), + decoderResult->getEcLevel(), decoderResult->getCharsetMode())); + + setSuccFix(points); + result_list.push_back(result); + patternFoundFlag = true; + if (nowHints_.getUseNNDetector()) { + return result_list; + } + } + } + } + } + } + return result_list; +} + +vector QRCodeReader::getPossibleDimentions(int detectDimension) { + vector possibleDimentions; + possibleDimentions.clear(); + + if (detectDimension < 0) { + return possibleDimentions; + } + + possibleDimentions.push_back(detectDimension); + + if (detectDimension <= 169 && detectDimension >= 73) { + possibleDimentions.push_back(detectDimension + 4); + possibleDimentions.push_back(detectDimension - 4); + possibleDimentions.push_back(detectDimension - 8); + possibleDimentions.push_back(detectDimension + 8); + } else if (detectDimension <= 69 && detectDimension >= 45) { + possibleDimentions.push_back(detectDimension + 4); + possibleDimentions.push_back(detectDimension - 4); + } + + if (detectDimension == 19) { + possibleDimentions.push_back(21); + } + + return possibleDimentions; +} + +void QRCodeReader::setPossibleAPCountByVersion(unsigned int version) { + // cout<<"setPossibleAPCountByVersion"< input, Ref output, + int window) { + BitMatrix &imatrix = *input; + BitMatrix &omatrix = *output; + window >>= 1; + int count = 0; + int width = input->getWidth(); + int height = input->getHeight(); + int bitsize = imatrix.getRowBitsSize(); + + bool *jrowtoset = new bool[bitsize]; + + bool *jrow = NULL; + + jrow = NULL; + + unsigned int size = window * window; + + for (int j = (window + 1); j < (height - 1 - window); ++j) { + int y1 = j - window - 1; + int y2 = j + window; + + int offset1 = y1 * width; + int offset2 = y2 * width; + + jrow = imatrix.getRowBoolPtr(j); + + memcpy(jrowtoset, jrow, bitsize * sizeof(bool)); + + for (int i = (window + 1); i < (width - 1 - window); ++i) { + int x1 = i - window - 1; + int x2 = i + window; + unsigned int sum = integral[offset2 + x2] - integral[offset2 + x1] + + integral[offset1 + x2] - integral[offset1 + x1]; + bool b = jrow[i]; + bool result; + // the middle 1/3 contains informations of corner, these + // informations is useful for finding the finder pattern + int sum3 = 3 * sum; + if ((unsigned int)sum3 <= size) { + result = false; + } else if ((unsigned int)sum3 >= size * 2) { + result = true; + } else { + result = b; + } + + if (result) { + jrowtoset[i] = true; + } + count += (result ^ b) == 1 ? 1U : 0U; + } + omatrix.setRowBool(j, jrowtoset); + } + + delete[] jrowtoset; + return count; +} + +void QRCodeReader::initIntegralOld(unsigned int *integral, Ref input) { + BitMatrix &matrix = *input; + int width = input->getWidth(); + int height = input->getHeight(); + + bool *therow = NULL; + + therow = matrix.getRowBoolPtr(0); + + integral[0] = therow[0]; + + int *s = new int[width]; + + memset(s, 0, width * sizeof(int)); + + integral[0] = therow[0]; + for (int j = 1; j < width; j++) { + integral[j] = integral[j - 1] + therow[j]; + s[j] += therow[j]; + } + + int offset = width; + unsigned int prevSum = 0; + + for (int i = 1; i < height; i++) { + offset = i * width; + therow = matrix.getRowBoolPtr(i); + + integral[offset] = integral[offset - width] + therow[0]; + offset++; + + for (int j = 1; j < width; j++) { + s[j] += therow[j]; + integral[offset] = prevSum + s[j]; + prevSum = integral[offset]; + offset++; + } + } + + delete[] s; + + return; +} + +void QRCodeReader::initIntegral(unsigned int *integral, Ref input) { + BitMatrix &matrix = *input; + int width = input->getWidth(); + int height = input->getHeight(); + + bool *therow = NULL; + + therow = matrix.getRowBoolPtr(0); + + // first row only + int rs = 0; + for (int j = 0; j < width; j++) { + rs += therow[j]; + integral[j] = rs; + } + + // remaining cells are sum above and to the left + int offset = 0; + + for (int i = 1; i < height; ++i) { + therow = matrix.getRowBoolPtr(i); + + rs = 0; + + offset += width; + + for (int j = 0; j < width; ++j) { + rs += therow[j]; + integral[offset + j] = rs + integral[offset - width + j]; + } + } + + return; +} + +int QRCodeReader::getRecommendedImageSizeTypeInteral() { + if (time(0) - lastDecodeTime_ > 30) recommendedImageSizeType_ = 0; + return recommendedImageSizeType_; +} + +unsigned int QRCodeReader::getDecodeID() { return decodeID_; } + +void QRCodeReader::setDecodeID(unsigned int id) { + lastDecodeTime_ = time(0); + + decodeID_ = id; + if (decodeID_ != lastDecodeID_) { + lastDecodeID_ = decodeID_; + lastPossibleAPCount_ = possibleAPCount_; + lastSamePossibleAPCountTimes_ = samePossibleAPCountTimes_; + lastRecommendedImageSizeType_ = getRecommendedImageSizeTypeInteral(); + possibleAPCount_ = 0; + recommendedImageSizeType_ = 0; + } +} + +QRCodeReader::~QRCodeReader() {} +Decoder &QRCodeReader::getDecoder() { return decoder_; } + +unsigned int QRCodeReader::getPossibleAPType() { + int version = (detectedDimension_ - 21) / 4 + 1; + setPossibleAPCountByVersion(version); + return possibleAPCount_; +} +int QRCodeReader::getPossibleFixType() { return possibleQrcodeInfo_.possibleFix > 0.0 ? 1 : 0; } + +void QRCodeReader::setPatternFix(float possibleFix) { + possibleQrcodeInfo_.patternPossibleFix = possibleFix; +} + +void QRCodeReader::setDecoderFix(float possibleFix, ArrayRef > border) { + float realFix = possibleFix; + if (possibleQrcodeInfo_.possibleFix < realFix) { + possibleQrcodeInfo_.possibleFix = realFix; + possibleQrcodeInfo_.qrcodeBorder.clear(); + possibleQrcodeInfo_.possibleModuleSize = possibleModuleSize_; + if (border) { + for (int i = 0; i < 4; ++i) { + possibleQrcodeInfo_.qrcodeBorder.push_back(border[i]); + } + } + } +} +void QRCodeReader::setSuccFix(ArrayRef > border) { + possibleQrcodeInfo_.qrcodeBorder.clear(); + possibleQrcodeInfo_.possibleModuleSize = possibleModuleSize_; + if (border) { + for (int i = 0; i < 4; ++i) { + possibleQrcodeInfo_.qrcodeBorder.push_back(border[i]); + } + } +} + +void QRCodeReader::setReaderState(Detector::DetectorState state) { + switch (state) { + case Detector::START: + this->readerState_ = QRCodeReader::DETECT_START; + break; + case Detector::FINDFINDERPATTERN: + this->readerState_ = QRCodeReader::DETECT_FINDFINDERPATTERN; + break; + case Detector::FINDALIGNPATTERN: + this->readerState_ = QRCodeReader::DETECT_FINDALIGNPATTERN; + break; + } + return; +} +void QRCodeReader::setReaderState(Decoder::DecoderState state) { + switch (state) { + case Decoder::NOTSTART: + this->readerState_ = QRCodeReader::DETECT_FAILD; + break; + case Decoder::START: + if (this->readerState_ < QRCodeReader::DECODE_START) { + this->readerState_ = QRCodeReader::DECODE_START; + } + break; + case Decoder::READVERSION: + if (this->readerState_ < QRCodeReader::DECODE_READVERSION) { + this->readerState_ = QRCodeReader::DECODE_READVERSION; + } + break; + case Decoder::READERRORCORRECTIONLEVEL: + if (this->readerState_ < QRCodeReader::DECODE_READERRORCORRECTIONLEVEL) { + this->readerState_ = QRCodeReader::DECODE_READERRORCORRECTIONLEVEL; + } + break; + case Decoder::READCODEWORDSORRECTIONLEVEL: + if (this->readerState_ < QRCodeReader::DECODE_READCODEWORDSORRECTIONLEVEL) { + this->readerState_ = QRCodeReader::DECODE_READCODEWORDSORRECTIONLEVEL; + } + break; + case Decoder::FINISH: + if (this->readerState_ < QRCodeReader::DECODE_FINISH) { + this->readerState_ = QRCodeReader::DECODE_FINISH; + } + break; + } + return; +} +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/qrcode_reader.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/qrcode_reader.hpp new file mode 100644 index 00000000..613286c2 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/qrcode_reader.hpp @@ -0,0 +1,131 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_QRCODE_READER_HPP__ +#define __ZXING_QRCODE_QRCODE_READER_HPP__ + +#include "../decodehints.hpp" +#include "../errorhandler.hpp" +#include "../reader.hpp" +#include "decoder/decoder.hpp" +#include "decoder/qrcode_decoder_metadata.hpp" +#include "detector/detector.hpp" + +namespace zxing { +namespace qrcode { + +struct QBAR_QRCODE_DETECT_INFO { + int possibleFixIndex; + unsigned int possibleAPType; + + // QRCodeReader Info + float possibleFix; + float patternPossibleFix; + int pyramidLev; + float possibleModuleSize; + std::vector > qrcodeBorder; + + QBAR_QRCODE_DETECT_INFO() { clear(); } + + void clear() { + possibleFixIndex = -1; + possibleAPType = 0; + possibleModuleSize = 0; + + possibleFix = 0; + patternPossibleFix = 0; + pyramidLev = 0; + qrcodeBorder.clear(); + } +}; + +class QRCodeReader : public Reader { +public: + enum ReaderState { + READER_START = -1, + DETECT_START = 0, + DETECT_FINDFINDERPATTERN = 1, + DETECT_FINDALIGNPATTERN = 2, + DETECT_FAILD = 3, + DECODE_START = 4, + DECODE_READVERSION = 5, + DECODE_READERRORCORRECTIONLEVEL = 6, + DECODE_READCODEWORDSORRECTIONLEVEL = 7, + DECODE_FINISH = 8 + }; + +private: + Decoder decoder_; + int detectedDimension_; + ReaderState readerState_; + DecodeHints nowHints_; + +protected: + Decoder& getDecoder(); + +public: + QRCodeReader(); + virtual ~QRCodeReader(); + string name() override { return "qrcode"; } + + vector> decode(Ref image) override; + vector> decode(Ref image, DecodeHints hints) override; + + vector> decodeMore(Ref image, Ref imageBitMatrix, + DecodeHints hints, ErrorHandler& err_handler); + +private: + QBAR_QRCODE_DETECT_INFO possibleQrcodeInfo_; + +protected: + void setPossibleAPCountByVersion(unsigned int version); + int getRecommendedImageSizeTypeInteral(); + static void initIntegralOld(unsigned int* integral, Ref input); + static void initIntegral(unsigned int* integral, Ref input); + static int smooth(unsigned int* integral, Ref input, Ref output, + int window); + unsigned int lastDecodeTime_; + unsigned int lastDecodeID_; + unsigned int decodeID_; + int lastPossibleAPCount_; + int possibleAPCount_; + int possibleModuleSize_; + unsigned int lastSamePossibleAPCountTimes_; + unsigned int samePossibleAPCountTimes_; + unsigned int lastRecommendedImageSizeType_; + unsigned int recommendedImageSizeType_; + unsigned int smoothMaxMultiple_; + +public: + virtual unsigned int getDecodeID() override; + virtual void setDecodeID(unsigned int id) override; + virtual float getPossibleFix() override; + virtual unsigned int getPossibleAPType(); + virtual int getPossibleFixType(); + + void setReaderState(Detector::DetectorState state); + void setReaderState(Decoder::DecoderState state); + + void setPatternFix(float possibleFix); + void setDecoderFix(float possibleFix, ArrayRef > border); + void setSuccFix(ArrayRef > border); + + ReaderState getReaderState() { return this->readerState_; } + float calQrcodeArea(Ref detectorResult); + float calTriangleArea(Ref centerA, Ref centerB, + Ref centerC); + + vector getPossibleDimentions(int detectDimension); +}; + +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_QRCODE_READER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/version.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/version.cpp new file mode 100644 index 00000000..1f08a6b6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/version.cpp @@ -0,0 +1,500 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../../precomp.hpp" +#include "version.hpp" +#include "format_information.hpp" + +#include +using std::numeric_limits; +using zxing::ErrorHandler; + +namespace zxing { +namespace qrcode { + +ECB::ECB(int count, int dataCodewords) : count_(count), dataCodewords_(dataCodewords) {} + +int ECB::getCount() { return count_; } + +int ECB::getDataCodewords() { return dataCodewords_; } + +ECBlocks::ECBlocks(int ecCodewords, ECB *ecBlocks) + : ecCodewords_(ecCodewords), ecBlocks_(1, ecBlocks) {} + +ECBlocks::ECBlocks(int ecCodewords, ECB *ecBlocks1, ECB *ecBlocks2) + : ecCodewords_(ecCodewords), ecBlocks_(1, ecBlocks1) { + ecBlocks_.push_back(ecBlocks2); +} + +int ECBlocks::getECCodewords() { return ecCodewords_; } + +std::vector &ECBlocks::getECBlocks() { return ecBlocks_; } + +ECBlocks::~ECBlocks() { + for (size_t i = 0; i < ecBlocks_.size(); i++) { + delete ecBlocks_[i]; + } +} + +unsigned int Version::VERSION_DECODE_INFO[] = { + 0x07C94, 0x085BC, 0x09A99, 0x0A4D3, 0x0BBF6, 0x0C762, 0x0D847, 0x0E60D, 0x0F928, + 0x10B78, 0x1145D, 0x12A17, 0x13532, 0x149A6, 0x15683, 0x168C9, 0x177EC, 0x18EC4, + 0x191E1, 0x1AFAB, 0x1B08E, 0x1CC1A, 0x1D33F, 0x1ED75, 0x1F250, 0x209D5, 0x216F0, + 0x228BA, 0x2379F, 0x24B0B, 0x2542E, 0x26A64, 0x27541, 0x28C69}; +int Version::N_VERSION_DECODE_INFOS = 34; + +vector > Version::VERSIONS; +static int N_VERSIONS = Version::buildVersions(); + +int Version::getVersionNumber() { return versionNumber_; } + +vector &Version::getAlignmentPatternCenters() { return alignmentPatternCenters_; } + +int Version::getTotalCodewords() { return totalCodewords_; } + +int Version::getDimensionForVersion(ErrorHandler &err_handler) { + if (versionNumber_ < 1 || versionNumber_ > N_VERSIONS) { + err_handler = zxing::ReaderErrorHandler("versionNumber must be between 1 and 40"); + return -1; + } + return 17 + 4 * versionNumber_; +} + +ECBlocks &Version::getECBlocksForLevel(ErrorCorrectionLevel &ecLevel) { + return *ecBlocks_[ecLevel.ordinal()]; +} + +static vector *intArray(size_t n...) { + va_list ap; + va_start(ap, n); + vector *result = new vector(n); + for (size_t i = 0; i < n; i++) { + (*result)[i] = va_arg(ap, int); + } + va_end(ap); + return result; +} + +Version *Version::getProvisionalVersionForDimension(int dimension, ErrorHandler &err_handler) { + if (dimension % 4 != 1) { + err_handler = zxing::FormatErrorHandler("dimension % 4 != 1"); + return NULL; + } + + // return Version::getVersionForNumber((dimension - 17) >> 2); + Version *version = Version::getVersionForNumber((dimension - 17) >> 2, err_handler); + if (err_handler.ErrCode()) { + err_handler = zxing::FormatErrorHandler("err format"); + return NULL; + } + return version; +} + +Version *Version::getVersionForNumber(int versionNumber, ErrorHandler &err_handler) { + if (versionNumber < 1 || versionNumber > N_VERSIONS) { + err_handler = zxing::ReaderErrorHandler("versionNumber must be between 1 and 40"); + return NULL; + } + return VERSIONS[versionNumber - 1]; +} + +Version::Version(int versionNumber, vector *alignmentPatternCenters, ECBlocks *ecBlocks1, + ECBlocks *ecBlocks2, ECBlocks *ecBlocks3, ECBlocks *ecBlocks4) + : versionNumber_(versionNumber), + alignmentPatternCenters_(*alignmentPatternCenters), + ecBlocks_(4), + totalCodewords_(0) { + ecBlocks_[0] = ecBlocks1; + ecBlocks_[1] = ecBlocks2; + ecBlocks_[2] = ecBlocks3; + ecBlocks_[3] = ecBlocks4; + + int total = 0; + int ecCodewords = ecBlocks1->getECCodewords(); + vector &ecbArray = ecBlocks1->getECBlocks(); + for (size_t i = 0; i < ecbArray.size(); i++) { + ECB *ecBlock = ecbArray[i]; + total += ecBlock->getCount() * (ecBlock->getDataCodewords() + ecCodewords); + } + totalCodewords_ = total; +} + +Version::~Version() { + delete &alignmentPatternCenters_; + for (size_t i = 0; i < ecBlocks_.size(); i++) { + delete ecBlocks_[i]; + } +} + +Version *Version::decodeVersionInformation(unsigned int versionBits) { + int bestDifference = numeric_limits::max(); + size_t bestVersion = 0; + ErrorHandler err_handler; + for (int i = 0; i < N_VERSION_DECODE_INFOS; i++) { + unsigned targetVersion = VERSION_DECODE_INFO[i]; + // Do the version info bits match exactly? done. + if (targetVersion == versionBits) { + // return getVersionForNumber(i + 7 ); + Version *version = getVersionForNumber(i + 7, err_handler); + if (err_handler.ErrCode()) return 0; + return version; + } + // Otherwise see if this is the closest to a real version info bit + // string we have seen so far + int bitsDifference = FormatInformation::numBitsDiffering(versionBits, targetVersion); + if (bitsDifference < bestDifference) { + bestVersion = i + 7; + bestDifference = bitsDifference; + } + } + // We can tolerate up to 3 bits of error since no two version info codewords + // will differ in less than 4 bits. + if (bestDifference <= 3) { + // return getVersionForNumber(bestVersion); + Version *version = getVersionForNumber(bestVersion, err_handler); + if (err_handler.ErrCode()) return 0; + return version; + } + // If we didn't find a close enough match, fail + return 0; +} + +Ref Version::buildFixedPatternValue(ErrorHandler &err_handler) { + int dimension = getDimensionForVersion(err_handler); + if (err_handler.ErrCode()) return Ref(); + + Ref fixedInfo(new BitMatrix(dimension, err_handler)); + if (err_handler.ErrCode()) return Ref(); + + // first timming patterns + for (int i = 0; i < dimension; i += 2) fixedInfo->set(i, 6); + for (int i = 0; i < dimension; i += 2) fixedInfo->set(6, i); + + // FP top left + fixedInfo->setRegion(0, 0, 8, 8, err_handler); + fixedInfo->flipRegion(0, 0, 8, 8, err_handler); + fixedInfo->flipRegion(0, 0, 7, 7, err_handler); + fixedInfo->flipRegion(1, 1, 5, 5, err_handler); + fixedInfo->flipRegion(2, 2, 3, 3, err_handler); + + // FP top right + fixedInfo->setRegion(dimension - 8, 0, 8, 8, err_handler); + fixedInfo->flipRegion(dimension - 8, 0, 8, 8, err_handler); + fixedInfo->flipRegion(dimension - 7, 0, 7, 7, err_handler); + fixedInfo->flipRegion(dimension - 6, 1, 5, 5, err_handler); + fixedInfo->flipRegion(dimension - 5, 2, 3, 3, err_handler); + + // FP bottom left + fixedInfo->setRegion(0, dimension - 8, 8, 8, err_handler); + fixedInfo->flipRegion(0, dimension - 8, 8, 8, err_handler); + fixedInfo->flipRegion(0, dimension - 7, 7, 7, err_handler); + fixedInfo->flipRegion(1, dimension - 6, 5, 5, err_handler); + fixedInfo->flipRegion(2, dimension - 5, 3, 3, err_handler); + if (err_handler.ErrCode()) return Ref(); + + // Alignment patterns + size_t max = alignmentPatternCenters_.size(); + for (size_t x = 0; x < max; x++) { + int i = alignmentPatternCenters_[x] - 2; + for (size_t y = 0; y < max; y++) { + if ((x == 0 && (y == 0 || y == max - 1)) || (x == max - 1 && y == 0)) { + // No alignment patterns near the three finder patterns + continue; + } + fixedInfo->setRegion(alignmentPatternCenters_[y] - 2, i, 5, 5, err_handler); + // fixedInfo->flipRegion(alignmentPatternCenters_[y] - 2, i, 5, 5); + fixedInfo->flipRegion(alignmentPatternCenters_[y] - 1, i + 1, 3, 3, err_handler); + fixedInfo->flipRegion(alignmentPatternCenters_[y], i + 2, 1, 1, err_handler); + if (err_handler.ErrCode()) return Ref(); + } + } + return fixedInfo; +} + +Ref Version::buildFixedPatternTemplate(ErrorHandler &err_handler) { + int dimension = getDimensionForVersion(err_handler); + Ref functionPattern(new BitMatrix(dimension, err_handler)); + if (err_handler.ErrCode()) return Ref(); + + // Top left finder pattern + separator + format + functionPattern->setRegion(0, 0, 8, 8, err_handler); + // Top right finder pattern + separator + format + functionPattern->setRegion(dimension - 8, 0, 8, 8, err_handler); + // Bottom left finder pattern + separator + format + functionPattern->setRegion(0, dimension - 8, 8, 8, err_handler); + if (err_handler.ErrCode()) return Ref(); + + // Alignment patterns + size_t max = alignmentPatternCenters_.size(); + for (size_t x = 0; x < max; x++) { + int i = alignmentPatternCenters_[x] - 2; + for (size_t y = 0; y < max; y++) { + if ((x == 0 && (y == 0 || y == max - 1)) || (x == max - 1 && y == 0)) { + // No alignment patterns near the three finder patterns + continue; + } + functionPattern->setRegion(alignmentPatternCenters_[y] - 2, i, 5, 5, err_handler); + } + } + + // Vertical timing pattern + functionPattern->setRegion(6, 8, 1, dimension - 16, err_handler); + // Horizontal timing pattern + functionPattern->setRegion(8, 6, dimension - 16, 1, err_handler); + if (err_handler.ErrCode()) return Ref(); + + return functionPattern; +} + +Ref Version::buildFunctionPattern(ErrorHandler &err_handler) { + int dimension = getDimensionForVersion(err_handler); + Ref functionPattern(new BitMatrix(dimension, err_handler)); + if (err_handler.ErrCode()) return Ref(); + + // Top left finder pattern + separator + format + functionPattern->setRegion(0, 0, 9, 9, err_handler); + // Top right finder pattern + separator + format + functionPattern->setRegion(dimension - 8, 0, 8, 9, err_handler); + // Bottom left finder pattern + separator + format + functionPattern->setRegion(0, dimension - 8, 9, 8, err_handler); + + // Alignment patterns + size_t max = alignmentPatternCenters_.size(); + for (size_t x = 0; x < max; x++) { + int i = alignmentPatternCenters_[x] - 2; + for (size_t y = 0; y < max; y++) { + if ((x == 0 && (y == 0 || y == max - 1)) || (x == max - 1 && y == 0)) { + // No alignment patterns near the three finder patterns + continue; + } + functionPattern->setRegion(alignmentPatternCenters_[y] - 2, i, 5, 5, err_handler); + } + } + + // Vertical timing pattern + functionPattern->setRegion(6, 9, 1, dimension - 17, err_handler); + // Horizontal timing pattern + functionPattern->setRegion(9, 6, dimension - 17, 1, err_handler); + if (err_handler.ErrCode()) return Ref(); + + if (versionNumber_ > 6) { + // Version info, top right + functionPattern->setRegion(dimension - 11, 0, 3, 6, err_handler); + // Version info, bottom left + functionPattern->setRegion(0, dimension - 11, 6, 3, err_handler); + if (err_handler.ErrCode()) return Ref(); + } + + return functionPattern; +} + +int Version::buildVersions() { + VERSIONS.push_back(Ref(new Version( + 1, intArray(0), new ECBlocks(7, new ECB(1, 19)), new ECBlocks(10, new ECB(1, 16)), + new ECBlocks(13, new ECB(1, 13)), new ECBlocks(17, new ECB(1, 9))))); + VERSIONS.push_back(Ref(new Version( + 2, intArray(2, 6, 18), new ECBlocks(10, new ECB(1, 34)), new ECBlocks(16, new ECB(1, 28)), + new ECBlocks(22, new ECB(1, 22)), new ECBlocks(28, new ECB(1, 16))))); + VERSIONS.push_back(Ref(new Version( + 3, intArray(2, 6, 22), new ECBlocks(15, new ECB(1, 55)), new ECBlocks(26, new ECB(1, 44)), + new ECBlocks(18, new ECB(2, 17)), new ECBlocks(22, new ECB(2, 13))))); + VERSIONS.push_back(Ref(new Version( + 4, intArray(2, 6, 26), new ECBlocks(20, new ECB(1, 80)), new ECBlocks(18, new ECB(2, 32)), + new ECBlocks(26, new ECB(2, 24)), new ECBlocks(16, new ECB(4, 9))))); + VERSIONS.push_back(Ref(new Version( + 5, intArray(2, 6, 30), new ECBlocks(26, new ECB(1, 108)), new ECBlocks(24, new ECB(2, 43)), + new ECBlocks(18, new ECB(2, 15), new ECB(2, 16)), + new ECBlocks(22, new ECB(2, 11), new ECB(2, 12))))); + VERSIONS.push_back(Ref(new Version( + 6, intArray(2, 6, 34), new ECBlocks(18, new ECB(2, 68)), new ECBlocks(16, new ECB(4, 27)), + new ECBlocks(24, new ECB(4, 19)), new ECBlocks(28, new ECB(4, 15))))); + VERSIONS.push_back(Ref(new Version( + 7, intArray(3, 6, 22, 38), new ECBlocks(20, new ECB(2, 78)), + new ECBlocks(18, new ECB(4, 31)), new ECBlocks(18, new ECB(2, 14), new ECB(4, 15)), + new ECBlocks(26, new ECB(4, 13), new ECB(1, 14))))); + VERSIONS.push_back( + Ref(new Version(8, intArray(3, 6, 24, 42), new ECBlocks(24, new ECB(2, 97)), + new ECBlocks(22, new ECB(2, 38), new ECB(2, 39)), + new ECBlocks(22, new ECB(4, 18), new ECB(2, 19)), + new ECBlocks(26, new ECB(4, 14), new ECB(2, 15))))); + VERSIONS.push_back( + Ref(new Version(9, intArray(3, 6, 26, 46), new ECBlocks(30, new ECB(2, 116)), + new ECBlocks(22, new ECB(3, 36), new ECB(2, 37)), + new ECBlocks(20, new ECB(4, 16), new ECB(4, 17)), + new ECBlocks(24, new ECB(4, 12), new ECB(4, 13))))); + VERSIONS.push_back(Ref(new Version(10, intArray(3, 6, 28, 50), + new ECBlocks(18, new ECB(2, 68), new ECB(2, 69)), + new ECBlocks(26, new ECB(4, 43), new ECB(1, 44)), + new ECBlocks(24, new ECB(6, 19), new ECB(2, 20)), + new ECBlocks(28, new ECB(6, 15), new ECB(2, 16))))); + VERSIONS.push_back( + Ref(new Version(11, intArray(3, 6, 30, 54), new ECBlocks(20, new ECB(4, 81)), + new ECBlocks(30, new ECB(1, 50), new ECB(4, 51)), + new ECBlocks(28, new ECB(4, 22), new ECB(4, 23)), + new ECBlocks(24, new ECB(3, 12), new ECB(8, 13))))); + VERSIONS.push_back(Ref(new Version(12, intArray(3, 6, 32, 58), + new ECBlocks(24, new ECB(2, 92), new ECB(2, 93)), + new ECBlocks(22, new ECB(6, 36), new ECB(2, 37)), + new ECBlocks(26, new ECB(4, 20), new ECB(6, 21)), + new ECBlocks(28, new ECB(7, 14), new ECB(4, 15))))); + VERSIONS.push_back( + Ref(new Version(13, intArray(3, 6, 34, 62), new ECBlocks(26, new ECB(4, 107)), + new ECBlocks(22, new ECB(8, 37), new ECB(1, 38)), + new ECBlocks(24, new ECB(8, 20), new ECB(4, 21)), + new ECBlocks(22, new ECB(12, 11), new ECB(4, 12))))); + VERSIONS.push_back(Ref(new Version( + 14, intArray(4, 6, 26, 46, 66), new ECBlocks(30, new ECB(3, 115), new ECB(1, 116)), + new ECBlocks(24, new ECB(4, 40), new ECB(5, 41)), + new ECBlocks(20, new ECB(11, 16), new ECB(5, 17)), + new ECBlocks(24, new ECB(11, 12), new ECB(5, 13))))); + VERSIONS.push_back(Ref(new Version( + 15, intArray(4, 6, 26, 48, 70), new ECBlocks(22, new ECB(5, 87), new ECB(1, 88)), + new ECBlocks(24, new ECB(5, 41), new ECB(5, 42)), + new ECBlocks(30, new ECB(5, 24), new ECB(7, 25)), + new ECBlocks(24, new ECB(11, 12), new ECB(7, 13))))); + VERSIONS.push_back(Ref(new Version( + 16, intArray(4, 6, 26, 50, 74), new ECBlocks(24, new ECB(5, 98), new ECB(1, 99)), + new ECBlocks(28, new ECB(7, 45), new ECB(3, 46)), + new ECBlocks(24, new ECB(15, 19), new ECB(2, 20)), + new ECBlocks(30, new ECB(3, 15), new ECB(13, 16))))); + VERSIONS.push_back(Ref(new Version( + 17, intArray(4, 6, 30, 54, 78), new ECBlocks(28, new ECB(1, 107), new ECB(5, 108)), + new ECBlocks(28, new ECB(10, 46), new ECB(1, 47)), + new ECBlocks(28, new ECB(1, 22), new ECB(15, 23)), + new ECBlocks(28, new ECB(2, 14), new ECB(17, 15))))); + VERSIONS.push_back(Ref(new Version( + 18, intArray(4, 6, 30, 56, 82), new ECBlocks(30, new ECB(5, 120), new ECB(1, 121)), + new ECBlocks(26, new ECB(9, 43), new ECB(4, 44)), + new ECBlocks(28, new ECB(17, 22), new ECB(1, 23)), + new ECBlocks(28, new ECB(2, 14), new ECB(19, 15))))); + VERSIONS.push_back(Ref(new Version( + 19, intArray(4, 6, 30, 58, 86), new ECBlocks(28, new ECB(3, 113), new ECB(4, 114)), + new ECBlocks(26, new ECB(3, 44), new ECB(11, 45)), + new ECBlocks(26, new ECB(17, 21), new ECB(4, 22)), + new ECBlocks(26, new ECB(9, 13), new ECB(16, 14))))); + VERSIONS.push_back(Ref(new Version( + 20, intArray(4, 6, 34, 62, 90), new ECBlocks(28, new ECB(3, 107), new ECB(5, 108)), + new ECBlocks(26, new ECB(3, 41), new ECB(13, 42)), + new ECBlocks(30, new ECB(15, 24), new ECB(5, 25)), + new ECBlocks(28, new ECB(15, 15), new ECB(10, 16))))); + VERSIONS.push_back(Ref(new Version( + 21, intArray(5, 6, 28, 50, 72, 94), new ECBlocks(28, new ECB(4, 116), new ECB(4, 117)), + new ECBlocks(26, new ECB(17, 42)), new ECBlocks(28, new ECB(17, 22), new ECB(6, 23)), + new ECBlocks(30, new ECB(19, 16), new ECB(6, 17))))); + VERSIONS.push_back(Ref(new Version( + 22, intArray(5, 6, 26, 50, 74, 98), new ECBlocks(28, new ECB(2, 111), new ECB(7, 112)), + new ECBlocks(28, new ECB(17, 46)), new ECBlocks(30, new ECB(7, 24), new ECB(16, 25)), + new ECBlocks(24, new ECB(34, 13))))); + VERSIONS.push_back(Ref(new Version( + 23, intArray(5, 6, 30, 54, 78, 102), new ECBlocks(30, new ECB(4, 121), new ECB(5, 122)), + new ECBlocks(28, new ECB(4, 47), new ECB(14, 48)), + new ECBlocks(30, new ECB(11, 24), new ECB(14, 25)), + new ECBlocks(30, new ECB(16, 15), new ECB(14, 16))))); + VERSIONS.push_back(Ref(new Version( + 24, intArray(5, 6, 28, 54, 80, 106), new ECBlocks(30, new ECB(6, 117), new ECB(4, 118)), + new ECBlocks(28, new ECB(6, 45), new ECB(14, 46)), + new ECBlocks(30, new ECB(11, 24), new ECB(16, 25)), + new ECBlocks(30, new ECB(30, 16), new ECB(2, 17))))); + VERSIONS.push_back(Ref(new Version( + 25, intArray(5, 6, 32, 58, 84, 110), new ECBlocks(26, new ECB(8, 106), new ECB(4, 107)), + new ECBlocks(28, new ECB(8, 47), new ECB(13, 48)), + new ECBlocks(30, new ECB(7, 24), new ECB(22, 25)), + new ECBlocks(30, new ECB(22, 15), new ECB(13, 16))))); + VERSIONS.push_back(Ref(new Version( + 26, intArray(5, 6, 30, 58, 86, 114), new ECBlocks(28, new ECB(10, 114), new ECB(2, 115)), + new ECBlocks(28, new ECB(19, 46), new ECB(4, 47)), + new ECBlocks(28, new ECB(28, 22), new ECB(6, 23)), + new ECBlocks(30, new ECB(33, 16), new ECB(4, 17))))); + VERSIONS.push_back(Ref(new Version( + 27, intArray(5, 6, 34, 62, 90, 118), new ECBlocks(30, new ECB(8, 122), new ECB(4, 123)), + new ECBlocks(28, new ECB(22, 45), new ECB(3, 46)), + new ECBlocks(30, new ECB(8, 23), new ECB(26, 24)), + new ECBlocks(30, new ECB(12, 15), new ECB(28, 16))))); + VERSIONS.push_back( + Ref(new Version(28, intArray(6, 6, 26, 50, 74, 98, 122), + new ECBlocks(30, new ECB(3, 117), new ECB(10, 118)), + new ECBlocks(28, new ECB(3, 45), new ECB(23, 46)), + new ECBlocks(30, new ECB(4, 24), new ECB(31, 25)), + new ECBlocks(30, new ECB(11, 15), new ECB(31, 16))))); + VERSIONS.push_back( + Ref(new Version(29, intArray(6, 6, 30, 54, 78, 102, 126), + new ECBlocks(30, new ECB(7, 116), new ECB(7, 117)), + new ECBlocks(28, new ECB(21, 45), new ECB(7, 46)), + new ECBlocks(30, new ECB(1, 23), new ECB(37, 24)), + new ECBlocks(30, new ECB(19, 15), new ECB(26, 16))))); + VERSIONS.push_back( + Ref(new Version(30, intArray(6, 6, 26, 52, 78, 104, 130), + new ECBlocks(30, new ECB(5, 115), new ECB(10, 116)), + new ECBlocks(28, new ECB(19, 47), new ECB(10, 48)), + new ECBlocks(30, new ECB(15, 24), new ECB(25, 25)), + new ECBlocks(30, new ECB(23, 15), new ECB(25, 16))))); + VERSIONS.push_back( + Ref(new Version(31, intArray(6, 6, 30, 56, 82, 108, 134), + new ECBlocks(30, new ECB(13, 115), new ECB(3, 116)), + new ECBlocks(28, new ECB(2, 46), new ECB(29, 47)), + new ECBlocks(30, new ECB(42, 24), new ECB(1, 25)), + new ECBlocks(30, new ECB(23, 15), new ECB(28, 16))))); + VERSIONS.push_back(Ref( + new Version(32, intArray(6, 6, 34, 60, 86, 112, 138), new ECBlocks(30, new ECB(17, 115)), + new ECBlocks(28, new ECB(10, 46), new ECB(23, 47)), + new ECBlocks(30, new ECB(10, 24), new ECB(35, 25)), + new ECBlocks(30, new ECB(19, 15), new ECB(35, 16))))); + VERSIONS.push_back( + Ref(new Version(33, intArray(6, 6, 30, 58, 86, 114, 142), + new ECBlocks(30, new ECB(17, 115), new ECB(1, 116)), + new ECBlocks(28, new ECB(14, 46), new ECB(21, 47)), + new ECBlocks(30, new ECB(29, 24), new ECB(19, 25)), + new ECBlocks(30, new ECB(11, 15), new ECB(46, 16))))); + VERSIONS.push_back( + Ref(new Version(34, intArray(6, 6, 34, 62, 90, 118, 146), + new ECBlocks(30, new ECB(13, 115), new ECB(6, 116)), + new ECBlocks(28, new ECB(14, 46), new ECB(23, 47)), + new ECBlocks(30, new ECB(44, 24), new ECB(7, 25)), + new ECBlocks(30, new ECB(59, 16), new ECB(1, 17))))); + VERSIONS.push_back( + Ref(new Version(35, intArray(7, 6, 30, 54, 78, 102, 126, 150), + new ECBlocks(30, new ECB(12, 121), new ECB(7, 122)), + new ECBlocks(28, new ECB(12, 47), new ECB(26, 48)), + new ECBlocks(30, new ECB(39, 24), new ECB(14, 25)), + new ECBlocks(30, new ECB(22, 15), new ECB(41, 16))))); + VERSIONS.push_back( + Ref(new Version(36, intArray(7, 6, 24, 50, 76, 102, 128, 154), + new ECBlocks(30, new ECB(6, 121), new ECB(14, 122)), + new ECBlocks(28, new ECB(6, 47), new ECB(34, 48)), + new ECBlocks(30, new ECB(46, 24), new ECB(10, 25)), + new ECBlocks(30, new ECB(2, 15), new ECB(64, 16))))); + VERSIONS.push_back( + Ref(new Version(37, intArray(7, 6, 28, 54, 80, 106, 132, 158), + new ECBlocks(30, new ECB(17, 122), new ECB(4, 123)), + new ECBlocks(28, new ECB(29, 46), new ECB(14, 47)), + new ECBlocks(30, new ECB(49, 24), new ECB(10, 25)), + new ECBlocks(30, new ECB(24, 15), new ECB(46, 16))))); + VERSIONS.push_back( + Ref(new Version(38, intArray(7, 6, 32, 58, 84, 110, 136, 162), + new ECBlocks(30, new ECB(4, 122), new ECB(18, 123)), + new ECBlocks(28, new ECB(13, 46), new ECB(32, 47)), + new ECBlocks(30, new ECB(48, 24), new ECB(14, 25)), + new ECBlocks(30, new ECB(42, 15), new ECB(32, 16))))); + VERSIONS.push_back( + Ref(new Version(39, intArray(7, 6, 26, 54, 82, 110, 138, 166), + new ECBlocks(30, new ECB(20, 117), new ECB(4, 118)), + new ECBlocks(28, new ECB(40, 47), new ECB(7, 48)), + new ECBlocks(30, new ECB(43, 24), new ECB(22, 25)), + new ECBlocks(30, new ECB(10, 15), new ECB(67, 16))))); + VERSIONS.push_back( + Ref(new Version(40, intArray(7, 6, 30, 58, 86, 114, 142, 170), + new ECBlocks(30, new ECB(19, 118), new ECB(6, 119)), + new ECBlocks(28, new ECB(18, 47), new ECB(31, 48)), + new ECBlocks(30, new ECB(34, 24), new ECB(34, 25)), + new ECBlocks(30, new ECB(20, 15), new ECB(61, 16))))); + return VERSIONS.size(); +} + +} // namespace qrcode +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/version.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/version.hpp new file mode 100644 index 00000000..68bb7674 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/qrcode/version.hpp @@ -0,0 +1,86 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_QRCODE_VERSION_HPP__ +#define __ZXING_QRCODE_VERSION_HPP__ + +#include "../common/bitmatrix.hpp" +#include "../common/counted.hpp" +#include "../errorhandler.hpp" +#include "error_correction_level.hpp" + +namespace zxing { +namespace qrcode { + +// Encapsualtes the parameters for one error-correction block in one symbol +// version. This includes the number of data codewords, and the number of times +// a block with these parameters is used consecutively in the QR code version's +// format. +class ECB { +private: + int count_; + int dataCodewords_; + +public: + ECB(int count, int dataCodewords); + int getCount(); + int getDataCodewords(); +}; + +// Encapsulates a set of error-correction blocks in one symbol version. Most +// versions will use blocks of differing sizes within one version, so, this +// encapsulates the parameters for each set of blocks. It also holds the number +// of error-correction codewords per block since it will be the same across all +// blocks within one version.

+class ECBlocks { +private: + int ecCodewords_; + std::vector ecBlocks_; + +public: + ECBlocks(int ecCodewords, ECB *ecBlocks); + ECBlocks(int ecCodewords, ECB *ecBlocks1, ECB *ecBlocks2); + int getECCodewords(); + std::vector &getECBlocks(); + ~ECBlocks(); +}; + +class Version : public Counted { +private: + int versionNumber_; + std::vector &alignmentPatternCenters_; + std::vector ecBlocks_; + int totalCodewords_; + Version(int versionNumber, std::vector *alignmentPatternCenters, ECBlocks *ecBlocks1, + ECBlocks *ecBlocks2, ECBlocks *ecBlocks3, ECBlocks *ecBlocks4); + +public: + static unsigned int VERSION_DECODE_INFO[]; + static int N_VERSION_DECODE_INFOS; + static std::vector > VERSIONS; + + ~Version(); + int getVersionNumber(); + std::vector &getAlignmentPatternCenters(); + int getTotalCodewords(); + int getDimensionForVersion(ErrorHandler &err_handler); + ECBlocks &getECBlocksForLevel(ErrorCorrectionLevel &ecLevel); + static Version *getProvisionalVersionForDimension(int dimension, ErrorHandler &err_handler); + static Version *getVersionForNumber(int versionNumber, ErrorHandler &err_handler); + static Version *decodeVersionInformation(unsigned int versionBits); + Ref buildFunctionPattern(ErrorHandler &err_handler); + Ref buildFixedPatternValue(ErrorHandler &err_handler); + Ref buildFixedPatternTemplate(ErrorHandler &err_handler); + static int buildVersions(); +}; +} // namespace qrcode +} // namespace zxing + +#endif // __ZXING_QRCODE_VERSION_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/reader.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/reader.cpp new file mode 100644 index 00000000..7fa6b65f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/reader.cpp @@ -0,0 +1,28 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../precomp.hpp" +#include "reader.hpp" + +namespace zxing { + +Reader::~Reader() {} + +vector> Reader::decode(Ref image) { return decode(image, DecodeHints()); } + +unsigned int Reader::getDecodeID() { return 0; } + +void Reader::setDecodeID(unsigned int) {} + +float Reader::getPossibleFix() { return 0.0; } + + +string Reader::name() { return "unknow"; } + +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/reader.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/reader.hpp new file mode 100644 index 00000000..e958013c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/reader.hpp @@ -0,0 +1,39 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_READER_HPP__ +#define __ZXING_READER_HPP__ + +#include "binarybitmap.hpp" +#include "decodehints.hpp" +#include "errorhandler.hpp" +#include "result.hpp" + +namespace zxing { + +class Reader : public Counted { +protected: + Reader() {} + +public: + virtual vector> decode(Ref image); + virtual vector> decode(Ref image, DecodeHints hints) = 0; + + virtual ~Reader(); + virtual string name(); + virtual unsigned int getDecodeID(); + virtual void setDecodeID(unsigned int id); + + virtual float getPossibleFix(); +}; + +} // namespace zxing + +#endif // __ZXING_READER_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/result.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/result.cpp new file mode 100644 index 00000000..e33c2c51 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/result.cpp @@ -0,0 +1,71 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../precomp.hpp" +#include "result.hpp" + +using zxing::ArrayRef; +using zxing::Ref; +using zxing::Result; +using zxing::ResultPoint; +using zxing::String; + +Result::Result(Ref text, ArrayRef rawBytes, ArrayRef > resultPoints) + : text_(text), rawBytes_(rawBytes), resultPoints_(resultPoints) { + charset_ = "UTF-8"; + + qrcodeVersion_ = -1; + pyramidLv_ = -1; + binaryMethod_ = -1; + ecLevel_ = '0'; +} + +Result::Result(Ref text, ArrayRef rawBytes, ArrayRef > resultPoints, + std::string charset) + : text_(text), rawBytes_(rawBytes), resultPoints_(resultPoints), charset_(charset) { + qrcodeVersion_ = -1; + pyramidLv_ = -1; + binaryMethod_ = -1; + ecLevel_ = '0'; +} + +Result::Result(Ref text, ArrayRef rawBytes, ArrayRef > resultPoints, + std::string charset, int QRCodeVersion, string ecLevel, string charsetMode) + : text_(text), + rawBytes_(rawBytes), + resultPoints_(resultPoints), + charset_(charset), + qrcodeVersion_(QRCodeVersion), + ecLevel_(ecLevel), + charsetMode_(charsetMode) { + pyramidLv_ = -1; + binaryMethod_ = -1; +} + +Result::~Result() {} + +Ref Result::getText() { return text_; } + +ArrayRef Result::getRawBytes() { return rawBytes_; } + +ArrayRef > const& Result::getResultPoints() const { return resultPoints_; } + +ArrayRef >& Result::getResultPoints() { return resultPoints_; } + +void Result::enlargeResultPoints(int scale) { + for (int i = 0; i < resultPoints_->size(); i++) { + resultPoints_[i] = Ref(new ResultPoint( + resultPoints_[i]->getX() * (float)scale, resultPoints_[i]->getY() * (float)scale)); + } + return; +} + +std::string Result::getCharset() const { return charset_; } + +std::string zxing::Result::getChartsetMode() const { return charsetMode_; } diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/result.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/result.hpp new file mode 100644 index 00000000..6bf053c0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/result.hpp @@ -0,0 +1,78 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_RESULT_HPP__ +#define __ZXING_RESULT_HPP__ + +#include +#include "common/array.hpp" +#include "common/counted.hpp" +#include "common/str.hpp" +#include "resultpoint.hpp" + +#include + +namespace zxing { + +class Result : public Counted { +private: + Ref text_; + ArrayRef rawBytes_; + ArrayRef > resultPoints_; + std::string charset_; + int qrcodeVersion_; + int pyramidLv_; + int binaryMethod_; + string ecLevel_; + string charsetMode_; + string scale_list_; + float decode_scale_; + uint32_t detect_time_; + uint32_t sr_time_; + +public: + Result(Ref text, ArrayRef rawBytes, ArrayRef > resultPoints); + + Result(Ref text, ArrayRef rawBytes, ArrayRef > resultPoints, + std::string charset); + + Result(Ref text, ArrayRef rawBytes, ArrayRef > resultPoints, + std::string charset, int QRCodeVersion, string ecLevel, string charsetMode); + + ~Result(); + + Ref getText(); + ArrayRef getRawBytes(); + ArrayRef > const& getResultPoints() const; + ArrayRef >& getResultPoints(); + std::string getCharset() const; + std::string getChartsetMode() const; + void enlargeResultPoints(int scale); + + int getQRCodeVersion() const { return qrcodeVersion_; }; + void setQRCodeVersion(int QRCodeVersion) { qrcodeVersion_ = QRCodeVersion; }; + int getPyramidLv() const { return pyramidLv_; }; + void setPyramidLv(int pyramidLv) { pyramidLv_ = pyramidLv; }; + int getBinaryMethod() const { return binaryMethod_; }; + void setBinaryMethod(int binaryMethod) { binaryMethod_ = binaryMethod; }; + string getEcLevel() const { return ecLevel_; } + void setEcLevel(char ecLevel) { ecLevel_ = ecLevel; } + std::string getScaleList() { return scale_list_; }; + void setScaleList(const std::string& scale_list) { scale_list_ = scale_list; }; + float getDecodeScale() { return decode_scale_; }; + void setDecodeScale(float decode_scale) { decode_scale_ = decode_scale; }; + uint32_t getDetectTime() { return detect_time_; }; + void setDetectTime(uint32_t detect_time) { detect_time_ = detect_time; }; + uint32_t getSrTime() { return sr_time_; }; + void setSrTime(uint32_t sr_time) { sr_time_ = sr_time; }; +}; + +} // namespace zxing +#endif // __ZXING_RESULT_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/resultpoint.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/resultpoint.cpp new file mode 100644 index 00000000..0fab518d --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/resultpoint.cpp @@ -0,0 +1,101 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). +#include "../precomp.hpp" +#include "resultpoint.hpp" +#include "common/mathutils.hpp" + +using zxing::common::MathUtils; + +namespace zxing { + +ResultPoint::ResultPoint() : posX_(0), posY_(0) {} + +ResultPoint::ResultPoint(float x, float y) : posX_(x), posY_(y) {} + +ResultPoint::ResultPoint(int x, int y) : posX_(float(x)), posY_(float(y)) {} + +ResultPoint::~ResultPoint() {} + +float ResultPoint::getX() const { return posX_; } + +float ResultPoint::getY() const { return posY_; } + +void ResultPoint::SetX(float fX) { posX_ = fX; } + +void ResultPoint::SetY(float fY) { posY_ = fY; } + +bool ResultPoint::equals(Ref other) { + return (fabs(posX_ - other->getX()) <= 1e-6) && (fabs(posY_ - other->getY()) <= 1e-6); +} + +/** + *

Orders an array of three ResultPoints in an order [A,B,C] such that AB < + * AC and BC < AC and the angle between BC and BA is less than 180 degrees. + */ +void ResultPoint::orderBestPatterns(std::vector > &patterns) { + // Find distances between pattern centers + float zeroOneDistance = distance(patterns[0]->getX(), patterns[1]->getX(), patterns[0]->getY(), + patterns[1]->getY()); + float oneTwoDistance = distance(patterns[1]->getX(), patterns[2]->getX(), patterns[1]->getY(), + patterns[2]->getY()); + float zeroTwoDistance = distance(patterns[0]->getX(), patterns[2]->getX(), patterns[0]->getY(), + patterns[2]->getY()); + + Ref pointA, pointB, pointC; + // Assume one closest to other two is B; A and C will just be guesses at + // first + if (oneTwoDistance >= zeroOneDistance && oneTwoDistance >= zeroTwoDistance) { + pointB = patterns[0]; + pointA = patterns[1]; + pointC = patterns[2]; + } else if (zeroTwoDistance >= oneTwoDistance && zeroTwoDistance >= zeroOneDistance) { + pointB = patterns[1]; + pointA = patterns[0]; + pointC = patterns[2]; + } else { + pointB = patterns[2]; + pointA = patterns[0]; + pointC = patterns[1]; + } + + // Use cross product to figure out whether A and C are correct or flipped. + // This asks whether BC x BA has a positive z component, which is the + // arrangement we want for A, B, C. If it's negative, then we've got it + // flipped around and should swap A and C. + if (crossProductZ(pointA, pointB, pointC) < 0.0f) { + Ref temp = pointA; + pointA = pointC; + pointC = temp; + } + + patterns[0] = pointA; + patterns[1] = pointB; + patterns[2] = pointC; +} + +float ResultPoint::distance(Ref pattern1, Ref pattern2) { + return MathUtils::distance(pattern1->posX_, pattern1->posY_, pattern2->posX_, pattern2->posY_); +} + +float ResultPoint::distance(float x1, float x2, float y1, float y2) { + float xDiff = x1 - x2; + float yDiff = y1 - y2; + return (float)sqrt((double)(xDiff * xDiff + yDiff * yDiff)); +} + +float ResultPoint::crossProductZ(Ref pointA, Ref pointB, + Ref pointC) { + float bX = pointB->getX(); + float bY = pointB->getY(); + return ((pointC->getX() - bX) * (pointA->getY() - bY)) - + ((pointC->getY() - bY) * (pointA->getX() - bX)); +} +} // namespace zxing diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/resultpoint.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/resultpoint.hpp new file mode 100644 index 00000000..cb5d05d0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/resultpoint.hpp @@ -0,0 +1,47 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_RESULTPOINT_HPP__ +#define __ZXING_RESULTPOINT_HPP__ + +#include "common/counted.hpp" + +namespace zxing { + +class ResultPoint : public Counted { +protected: + float posX_; + float posY_; + +public: + ResultPoint(); + ResultPoint(float x, float y); + ResultPoint(int x, int y); + virtual ~ResultPoint(); + + virtual float getX() const; + virtual float getY() const; + virtual void SetX(float fX); + virtual void SetY(float fY); + + bool equals(Ref other); + + static void orderBestPatterns(std::vector > &patterns); + static float distance(Ref point1, Ref point2); + static float distance(float x1, float x2, float y1, float y2); + +private: + static float crossProductZ(Ref pointA, Ref pointB, + Ref pointC); +}; + +} // namespace zxing + +#endif // __ZXING_RESULTPOINT_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/zxing.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/zxing.hpp new file mode 100644 index 00000000..76efae22 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/src/zxing/zxing.hpp @@ -0,0 +1,69 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. +// +// Modified from ZXing. Copyright ZXing authors. +// Licensed under the Apache License, Version 2.0 (the "License"). + +#ifndef __ZXING_ZXING_HPP__ +#define __ZXING_ZXING_HPP__ + + +#define COUNTER_TYPE short + + +#define ZXING_ARRAY_LEN(v) ((int)(sizeof(v) / sizeof(v[0]))) +#define ZX_LOG_DIGITS(digits) \ + ((digits == 8) \ + ? 3 \ + : ((digits == 16) \ + ? 4 \ + : ((digits == 32) ? 5 : ((digits == 64) ? 6 : ((digits == 128) ? 7 : (-1)))))) + +#ifndef USE_QRCODE_ONLY +#define USE_ONED_WRITER 1 +#endif + +#if defined(__ANDROID_API__) || defined(_MSC_VER) + +#ifndef NO_ICONV +#define NO_ICONV +#endif + +#endif + + + +#ifndef NO_ICONV_INSIDE +#define NO_ICONV_INSIDE +#endif + +#define ZXING_MAX_WIDTH 2048 +#define ZXING_MAX_HEIGHT 2048 + +namespace zxing { +typedef char byte; +typedef unsigned char boolean; +// typedef unsigned short ushort; +} // namespace zxing + +#include +#include + +namespace zxing { +inline bool isnan(float v) { return std::isnan(v); } +inline bool isnan(double v) { return std::isnan(v); } +inline float nan() { return std::numeric_limits::quiet_NaN(); } +} // namespace zxing + +#ifndef ZXING_TIME +#define ZXING_TIME(string) (void)0 +#endif +#ifndef ZXING_TIME_MARK +#define ZXING_TIME_MARK(string) (void)0 +#endif + +#endif // __ZXING_ZXING_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_main.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_main.cpp new file mode 100644 index 00000000..02b3a26f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_main.cpp @@ -0,0 +1,29 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#include "test_precomp.hpp" + +#if defined(HAVE_HPX) +#include +#endif + +static +void initTests() +{ +#ifdef HAVE_OPENCV_DNN + const char* extraTestDataPath = +#ifdef WINRT + NULL; +#else + getenv("OPENCV_DNN_TEST_DATA_PATH"); +#endif + if (extraTestDataPath) + cvtest::addDataSearchPath(extraTestDataPath); +#endif // HAVE_OPENCV_DNN +} + +CV_TEST_MAIN("cv", initTests()) diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_precomp.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_precomp.hpp new file mode 100644 index 00000000..0f5ee5ba --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_precomp.hpp @@ -0,0 +1,14 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#ifndef __OPENCV_TEST_PRECOMP_HPP__ +#define __OPENCV_TEST_PRECOMP_HPP__ + +#include "opencv2/ts.hpp" +#include "opencv2/wechat_qrcode.hpp" + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_qrcode.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_qrcode.cpp new file mode 100644 index 00000000..d59932b8 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/wechat_qrcode/test/test_qrcode.cpp @@ -0,0 +1,459 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Tencent is pleased to support the open source community by making WeChat QRCode available. +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. + +#include "test_precomp.hpp" +#include "opencv2/objdetect.hpp" + +namespace opencv_test { +namespace { +std::string qrcode_images_name[] = { + "version_1_down.jpg", /*"version_1_left.jpg", "version_1_right.jpg", "version_1_up.jpg",*/ + "version_1_top.jpg", + /*"version_2_down.jpg",*/ "version_2_left.jpg", /*"version_2_right.jpg",*/ + "version_2_up.jpg", + "version_2_top.jpg", + "version_3_down.jpg", + "version_3_left.jpg", + /*"version_3_right.jpg",*/ "version_3_up.jpg", + "version_3_top.jpg", + "version_4_down.jpg", + "version_4_left.jpg", + /*"version_4_right.jpg",*/ "version_4_up.jpg", + "version_4_top.jpg", + "version_5_down.jpg", + "version_5_left.jpg", + /*"version_5_right.jpg",*/ "version_5_up.jpg", + "version_5_top.jpg", + "russian.jpg", + "kanji.jpg", /*"link_github_ocv.jpg",*/ + "link_ocv.jpg", + "link_wiki_cv.jpg"}; + +std::string qrcode_images_close[] = {/*"close_1.png",*/ "close_2.png", "close_3.png", "close_4.png", + "close_5.png"}; +std::string qrcode_images_monitor[] = {"monitor_1.png", "monitor_2.png", "monitor_3.png", + "monitor_4.png", "monitor_5.png"}; +std::string qrcode_images_curved[] = {"curved_1.jpg", /*"curved_2.jpg", "curved_3.jpg", + "curved_4.jpg",*/ + "curved_5.jpg", "curved_6.jpg", + /*"curved_7.jpg", "curved_8.jpg"*/}; +std::string qrcode_images_multiple[] = {/*"2_qrcodes.png",*/ "3_close_qrcodes.png", /*"3_qrcodes.png", + "4_qrcodes.png", "5_qrcodes.png", "6_qrcodes.png",*/ + "7_qrcodes.png"/*, "8_close_qrcodes.png"*/}; + +typedef testing::TestWithParam Objdetect_QRCode; +TEST_P(Objdetect_QRCode, regression) { + const std::string name_current_image = GetParam(); + const std::string root = "qrcode/"; + + std::string image_path = findDataFile(root + name_current_image); + Mat src = imread(image_path, IMREAD_GRAYSCALE); + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + + vector points; + // can not find the model file + // so we temporarily comment it out + // auto detector = wechat_qrcode::WeChatQRCode( + // findDataFile("detect.prototxt", false), findDataFile("detect.caffemodel", false), + // findDataFile("sr.prototxt", false), findDataFile("sr.caffemodel", false)); + auto detector = wechat_qrcode::WeChatQRCode(); + auto decoded_info = detector.detectAndDecode(src, points); + + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::READ); + ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config; + { + FileNode images_list = file_config["test_images"]; + size_t images_count = static_cast(images_list.size()); + ASSERT_GT(images_count, 0u) + << "Can't find validation data entries in 'test_images': " << dataset_config; + + for (size_t index = 0; index < images_count; index++) { + FileNode config = images_list[(int)index]; + std::string name_test_image = config["image_name"]; + if (name_test_image == name_current_image) { + std::string original_info = config["info"]; + string decoded_str; + if (decoded_info.size()) { + decoded_str = decoded_info[0]; + } + EXPECT_EQ(decoded_str, original_info); + return; // done + } + } + std::cerr << "Not found results for '" << name_current_image + << "' image in config file:" << dataset_config << std::endl + << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data." + << std::endl; + } +} + +typedef testing::TestWithParam Objdetect_QRCode_Close; +TEST_P(Objdetect_QRCode_Close, regression) { + const std::string name_current_image = GetParam(); + const std::string root = "qrcode/close/"; + + std::string image_path = findDataFile(root + name_current_image); + Mat src = imread(image_path, IMREAD_GRAYSCALE); + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + + vector points; + // can not find the model file + // so we temporarily comment it out + // auto detector = wechat_qrcode::WeChatQRCode( + // findDataFile("detect.prototxt", false), findDataFile("detect.caffemodel", false), + // findDataFile("sr.prototxt", false), findDataFile("sr.caffemodel", false)); + auto detector = wechat_qrcode::WeChatQRCode(); + auto decoded_info = detector.detectAndDecode(src, points); + + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::READ); + ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config; + { + FileNode images_list = file_config["close_images"]; + size_t images_count = static_cast(images_list.size()); + ASSERT_GT(images_count, 0u) + << "Can't find validation data entries in 'close_images': " << dataset_config; + + for (size_t index = 0; index < images_count; index++) { + FileNode config = images_list[(int)index]; + std::string name_test_image = config["image_name"]; + if (name_test_image == name_current_image) { + std::string original_info = config["info"]; + string decoded_str; + if (decoded_info.size()) { + decoded_str = decoded_info[0]; + } + EXPECT_EQ(decoded_str, original_info); + return; // done + } + } + std::cerr << "Not found results for '" << name_current_image + << "' image in config file:" << dataset_config << std::endl + << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data." + << std::endl; + } +} + +typedef testing::TestWithParam Objdetect_QRCode_Monitor; +TEST_P(Objdetect_QRCode_Monitor, regression) { + const std::string name_current_image = GetParam(); + const std::string root = "qrcode/monitor/"; + + std::string image_path = findDataFile(root + name_current_image); + Mat src = imread(image_path, IMREAD_GRAYSCALE); + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + + vector points; + // can not find the model file + // so we temporarily comment it out + // auto detector = wechat_qrcode::WeChatQRCode( + // findDataFile("detect.prototxt", false), findDataFile("detect.caffemodel", false), + // findDataFile("sr.prototxt", false), findDataFile("sr.caffemodel", false)); + auto detector = wechat_qrcode::WeChatQRCode(); + auto decoded_info = detector.detectAndDecode(src, points); + + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::READ); + ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config; + { + FileNode images_list = file_config["monitor_images"]; + size_t images_count = static_cast(images_list.size()); + ASSERT_GT(images_count, 0u) + << "Can't find validation data entries in 'monitor_images': " << dataset_config; + + for (size_t index = 0; index < images_count; index++) { + FileNode config = images_list[(int)index]; + std::string name_test_image = config["image_name"]; + if (name_test_image == name_current_image) { + std::string original_info = config["info"]; + string decoded_str; + if (decoded_info.size()) { + decoded_str = decoded_info[0]; + } + EXPECT_EQ(decoded_str, original_info); + return; // done + } + } + std::cerr << "Not found results for '" << name_current_image + << "' image in config file:" << dataset_config << std::endl + << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data." + << std::endl; + } +} + +typedef testing::TestWithParam Objdetect_QRCode_Curved; +TEST_P(Objdetect_QRCode_Curved, regression) { + const std::string name_current_image = GetParam(); + const std::string root = "qrcode/curved/"; + + std::string image_path = findDataFile(root + name_current_image); + Mat src = imread(image_path, IMREAD_GRAYSCALE); + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + + vector points; + // can not find the model file + // so we temporarily comment it out + // auto detector = wechat_qrcode::WeChatQRCode( + // findDataFile("detect.prototxt", false), findDataFile("detect.caffemodel", false), + // findDataFile("sr.prototxt", false), findDataFile("sr.caffemodel", false)); + auto detector = wechat_qrcode::WeChatQRCode(); + auto decoded_info = detector.detectAndDecode(src, points); + + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::READ); + ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config; + { + FileNode images_list = file_config["test_images"]; + size_t images_count = static_cast(images_list.size()); + ASSERT_GT(images_count, 0u) + << "Can't find validation data entries in 'test_images': " << dataset_config; + + for (size_t index = 0; index < images_count; index++) { + FileNode config = images_list[(int)index]; + std::string name_test_image = config["image_name"]; + if (name_test_image == name_current_image) { + std::string original_info = config["info"]; + string decoded_str; + if (decoded_info.size()) { + decoded_str = decoded_info[0]; + } + EXPECT_EQ(decoded_str, original_info); + return; // done + } + } + std::cerr << "Not found results for '" << name_current_image + << "' image in config file:" << dataset_config << std::endl + << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data." + << std::endl; + } +} + +typedef testing::TestWithParam Objdetect_QRCode_Multi; +TEST_P(Objdetect_QRCode_Multi, regression) { + const std::string name_current_image = GetParam(); + const std::string root = "qrcode/multiple/"; + string path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, path_sr_caffemodel; + string model_version = "_2021-01"; + path_detect_prototxt = findDataFile("dnn/wechat"+model_version+"/detect.prototxt", false); + path_detect_caffemodel = findDataFile("dnn/wechat"+model_version+"/detect.caffemodel", false); + path_sr_prototxt = findDataFile("dnn/wechat"+model_version+"/sr.prototxt", false); + path_sr_caffemodel = findDataFile("dnn/wechat"+model_version+"/sr.caffemodel", false); + + std::string image_path = findDataFile(root + name_current_image); + Mat src = imread(image_path); + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + + vector points; + auto detector = wechat_qrcode::WeChatQRCode(path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, + path_sr_caffemodel); + vector decoded_info = detector.detectAndDecode(src, points); + + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::READ); + ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config; + { + FileNode images_list = file_config["multiple_images"]; + size_t images_count = static_cast(images_list.size()); + ASSERT_GT(images_count, 0u) + << "Can't find validation data entries in 'test_images': " << dataset_config; + for (size_t index = 0; index < images_count; index++) { + FileNode config = images_list[(int)index]; + std::string name_test_image = config["image_name"]; + if (name_test_image == name_current_image) { + size_t count_eq_info = 0; + for (int i = 0; i < int(decoded_info.size()); i++) { + for (int j = 0; j < int(config["info"].size()); j++) { + std::string original_info = config["info"][j]; + if (original_info == decoded_info[i]) { + count_eq_info++; + break; + } + } + } + EXPECT_EQ(config["info"].size(), count_eq_info); + return; // done + } + } + std::cerr << "Not found results for '" << name_current_image + << "' image in config file:" << dataset_config << std::endl + << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data." + << std::endl; + } +} + +TEST(Objdetect_QRCode_points_position, rotate45) { + string path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, path_sr_caffemodel; + string model_version = "_2021-01"; + path_detect_prototxt = findDataFile("dnn/wechat"+model_version+"/detect.prototxt", false); + path_detect_caffemodel = findDataFile("dnn/wechat"+model_version+"/detect.caffemodel", false); + path_sr_prototxt = findDataFile("dnn/wechat"+model_version+"/sr.prototxt", false); + path_sr_caffemodel = findDataFile("dnn/wechat"+model_version+"/sr.caffemodel", false); + + auto detector = wechat_qrcode::WeChatQRCode(path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, + path_sr_caffemodel); + + const cv::String expect_msg = "OpenCV"; + QRCodeEncoder::Params params; + params.version = 5; // 37x37 + Ptr qrcode_enc = cv::QRCodeEncoder::create(params); + Mat qrImage; + qrcode_enc->encode(expect_msg, qrImage); + Mat image(800, 800, CV_8UC1); + const int pixInBlob = 4; + Size qrSize = Size((21+(params.version-1)*4)*pixInBlob,(21+(params.version-1)*4)*pixInBlob); + Rect2f rec(static_cast((image.cols - qrSize.width)/2), + static_cast((image.rows - qrSize.height)/2), + static_cast(qrSize.width), + static_cast(qrSize.height)); + vector goldCorners = {rec.x, rec.y, + rec.x+rec.width, rec.y, + rec.x+rec.width, rec.y+rec.height, + rec.x, rec.y+rec.height}; + Mat roiImage = image(rec); + cv::resize(qrImage, roiImage, qrSize, 1., 1., INTER_NEAREST); + + vector points1; + auto decoded_info1 = detector.detectAndDecode(image, points1); + ASSERT_EQ(1ull, decoded_info1.size()); + ASSERT_EQ(expect_msg, decoded_info1[0]); + EXPECT_NEAR(0, cvtest::norm(Mat(goldCorners), points1[0].reshape(1, 8), NORM_INF), 8.); + + const double angle = 45; + Point2f pc(image.cols/2.f, image.rows/2.f); + Mat rot = getRotationMatrix2D(pc, angle, 1.); + warpAffine(image, image, rot, image.size()); + vector rotateGoldCorners; + for (int i = 0; i < static_cast(goldCorners.size()); i+= 2) { + rotateGoldCorners.push_back(static_cast(rot.at(0, 0) * goldCorners[i] + + rot.at(0, 1) * goldCorners[i+1] + rot.at(0, 2))); + rotateGoldCorners.push_back(static_cast(rot.at(1, 0) * goldCorners[i] + + rot.at(1, 1) * goldCorners[i+1] + rot.at(1, 2))); + } + vector points2; + auto decoded_info2 = detector.detectAndDecode(image, points2); + ASSERT_EQ(1ull, decoded_info2.size()); + ASSERT_EQ(expect_msg, decoded_info2[0]); + EXPECT_NEAR(0, cvtest::norm(Mat(rotateGoldCorners), points2[0].reshape(1, 8), NORM_INF), 11.); +} + +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode, testing::ValuesIn(qrcode_images_name)); +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Close, testing::ValuesIn(qrcode_images_close)); +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor)); +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Curved, testing::ValuesIn(qrcode_images_curved)); +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple)); + +TEST(Objdetect_QRCode_Big, regression) { + string path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, path_sr_caffemodel; + string model_version = "_2021-01"; + path_detect_prototxt = findDataFile("dnn/wechat"+model_version+"/detect.prototxt", false); + path_detect_caffemodel = findDataFile("dnn/wechat"+model_version+"/detect.caffemodel", false); + path_sr_prototxt = findDataFile("dnn/wechat"+model_version+"/sr.prototxt", false); + path_sr_caffemodel = findDataFile("dnn/wechat"+model_version+"/sr.caffemodel", false); + + auto detector = wechat_qrcode::WeChatQRCode(path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, + path_sr_caffemodel); + + const cv::String expect_msg = "OpenCV"; + QRCodeEncoder::Params params; + params.version = 4; // 33x33 + Ptr qrcode_enc = cv::QRCodeEncoder::create(params); + Mat qrImage; + qrcode_enc->encode(expect_msg, qrImage); + Mat largeImage(4032, 3024, CV_8UC1); + const int pixInBlob = 4; + Size qrSize = Size((21+(params.version-1)*4)*pixInBlob,(21+(params.version-1)*4)*pixInBlob); + Mat roiImage = largeImage(Rect((largeImage.cols - qrSize.width)/2, (largeImage.rows - qrSize.height)/2, + qrSize.width, qrSize.height)); + cv::resize(qrImage, roiImage, qrSize, 1., 1., INTER_NEAREST); + + vector points; + detector.setScaleFactor(0.25f); + auto decoded_info = detector.detectAndDecode(largeImage, points); + ASSERT_EQ(1ull, decoded_info.size()); + ASSERT_EQ(expect_msg, decoded_info[0]); +} + +TEST(Objdetect_QRCode_Tiny, regression) { + string path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, path_sr_caffemodel; + string model_version = "_2021-01"; + path_detect_prototxt = findDataFile("dnn/wechat"+model_version+"/detect.prototxt", false); + path_detect_caffemodel = findDataFile("dnn/wechat"+model_version+"/detect.caffemodel", false); + path_sr_prototxt = findDataFile("dnn/wechat"+model_version+"/sr.prototxt", false); + path_sr_caffemodel = findDataFile("dnn/wechat"+model_version+"/sr.caffemodel", false); + + auto detector = wechat_qrcode::WeChatQRCode(path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, + path_sr_caffemodel); + + const cv::String expect_msg = "OpenCV"; + QRCodeEncoder::Params params; + params.version = 4; // 33x33 + Ptr qrcode_enc = cv::QRCodeEncoder::create(params); + Mat qrImage; + qrcode_enc->encode(expect_msg, qrImage); + Mat tinyImage(80, 80, CV_8UC1); + const int pixInBlob = 2; + Size qrSize = Size((21+(params.version-1)*4)*pixInBlob,(21+(params.version-1)*4)*pixInBlob); + Mat roiImage = tinyImage(Rect((tinyImage.cols - qrSize.width)/2, (tinyImage.rows - qrSize.height)/2, + qrSize.width, qrSize.height)); + cv::resize(qrImage, roiImage, qrSize, 1., 1., INTER_NEAREST); + + vector points; + auto decoded_info = detector.detectAndDecode(tinyImage, points); + ASSERT_EQ(1ull, decoded_info.size()); + ASSERT_EQ(expect_msg, decoded_info[0]); +} + + +typedef testing::TestWithParam Objdetect_QRCode_Easy_Multi; +TEST_P(Objdetect_QRCode_Easy_Multi, regression) { + string path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, path_sr_caffemodel; + string model_path = GetParam(); + + if (!model_path.empty()) { + path_detect_prototxt = findDataFile(model_path + "/detect.prototxt", false); + path_detect_caffemodel = findDataFile(model_path + "/detect.caffemodel", false); + path_sr_prototxt = findDataFile(model_path + "/sr.prototxt", false); + path_sr_caffemodel = findDataFile(model_path + "/sr.caffemodel", false); + } + + auto detector = wechat_qrcode::WeChatQRCode(path_detect_prototxt, path_detect_caffemodel, path_sr_prototxt, + path_sr_caffemodel); + + const cv::String expect_msg1 = "OpenCV1", expect_msg2 = "OpenCV2"; + QRCodeEncoder::Params params; + params.version = 4; // 33x33 + Ptr qrcode_enc = cv::QRCodeEncoder::create(params); + Mat qrImage1, qrImage2; + qrcode_enc->encode(expect_msg1, qrImage1); + qrcode_enc->encode(expect_msg2, qrImage2); + const int pixInBlob = 2; + const int offset = 14; + const int qr_size = (params.version - 1) * 4 + 21; + Mat tinyImage = Mat::zeros(qr_size*pixInBlob+offset, (qr_size*pixInBlob+offset)*2, CV_8UC1); + Size qrSize = Size(qrImage1.cols, qrImage1.rows); + + Mat roiImage = tinyImage(Rect((tinyImage.cols/2 - qrSize.width)/2, (tinyImage.rows - qrSize.height)/2, + qrSize.width, qrSize.height)); + cv::resize(qrImage1, roiImage, qrSize, 1., 1., INTER_NEAREST); + + roiImage = tinyImage(Rect((tinyImage.cols/2 - qrSize.width)/2+tinyImage.cols/2, (tinyImage.rows - qrSize.height)/2, + qrSize.width, qrSize.height)); + cv::resize(qrImage2, roiImage, qrSize, 1., 1., INTER_NEAREST); + + vector points; + auto decoded_info = detector.detectAndDecode(tinyImage, points); + ASSERT_EQ(2ull, decoded_info.size()); + ASSERT_TRUE((expect_msg1 == decoded_info[0] && expect_msg2 == decoded_info[1]) || + (expect_msg1 == decoded_info[1] && expect_msg2 == decoded_info[0])); +} + +std::string qrcode_model_path[] = {"", "dnn/wechat_2021-01"}; +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Easy_Multi, testing::ValuesIn(qrcode_model_path)); + +} // namespace +} // namespace opencv_test diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/CMakeLists.txt b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/CMakeLists.txt new file mode 100644 index 00000000..f30d2ed8 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/CMakeLists.txt @@ -0,0 +1,38 @@ +set(the_description "Contributed/Experimental Algorithms for Salient 2D Features Detection") + +if(HAVE_CUDA) + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) +endif() +ocv_define_module(xfeatures2d opencv_core opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_shape opencv_ml opencv_cudaarithm WRAP python java objc) + +if(NOT OPENCV_SKIP_FEATURES2D_DOWNLOADING) + include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/download_vgg.cmake) + include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/download_boostdesc.cmake) + set(DOWNLOAD_DIR "${OpenCV_BINARY_DIR}/downloads/xfeatures2d") + download_boost_descriptors("${DOWNLOAD_DIR}" boost_status) + download_vgg_descriptors("${DOWNLOAD_DIR}" vgg_status) + if(boost_status) + ocv_append_source_file_compile_definitions(${CMAKE_CURRENT_SOURCE_DIR}/src/boostdesc.cpp "OPENCV_XFEATURES2D_HAS_BOOST_DATA=1") + else() + message(WARNING "features2d: Boost descriptor implementation is not available due to missing data (download failed: https://github.com/opencv/opencv_contrib/issues/1301)") + endif() + if(vgg_status) + ocv_append_source_file_compile_definitions(${CMAKE_CURRENT_SOURCE_DIR}/src/vgg.cpp "OPENCV_XFEATURES2D_HAS_VGG_DATA=1") + else() + message(WARNING "features2d: VGG descriptor implementation is not available due to missing data (download failed: https://github.com/opencv/opencv_contrib/issues/1301)") + endif() + + if(boost_status OR vgg_status) + ocv_module_include_directories("${DOWNLOAD_DIR}") + endif() +endif() + +if(TARGET opencv_test_${name}) + ocv_target_include_directories(opencv_test_${name} "${OpenCV_SOURCE_DIR}/modules") # use common files from features2d tests + if(boost_status) + ocv_target_compile_definitions(opencv_test_${name} PRIVATE "OPENCV_XFEATURES2D_HAS_BOOST_DATA=1") + endif() + if(vgg_status) + ocv_target_compile_definitions(opencv_test_${name} PRIVATE "OPENCV_XFEATURES2D_HAS_VGG_DATA=1") + endif() +endif() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/README.md b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/README.md new file mode 100644 index 00000000..29214fd6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/README.md @@ -0,0 +1,8 @@ +Extra 2D Features Framework +=========================== + +1. Experimental 2D feature algorithms +2. Non-free 2D feature algorithms + +Extra 2D Features Framework containing experimental and non-free 2D feature detector/descriptor algorithms: + SURF, BRIEF, Censure, Freak, LUCID, Daisy, BEBLID, TEBLID, Self-similar. diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/cmake/download_boostdesc.cmake b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/cmake/download_boostdesc.cmake new file mode 100644 index 00000000..87bedf98 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/cmake/download_boostdesc.cmake @@ -0,0 +1,36 @@ +function(download_boost_descriptors dst_dir status_var) + set(OPENCV_3RDPARTY_COMMIT "34e4206aef44d50e6bbcd0ab06354b52e7466d26") + + set(ids BGM BGM_BI BGM_HD BINBOOST_064 BINBOOST_128 BINBOOST_256 LBGM) + set(name_BGM boostdesc_bgm.i) + set(name_BGM_BI boostdesc_bgm_bi.i) + set(name_BGM_HD boostdesc_bgm_hd.i) + set(name_BINBOOST_064 boostdesc_binboost_064.i) + set(name_BINBOOST_128 boostdesc_binboost_128.i) + set(name_BINBOOST_256 boostdesc_binboost_256.i) + set(name_LBGM boostdesc_lbgm.i) + set(hash_BGM "0ea90e7a8f3f7876d450e4149c97c74f") + set(hash_BGM_BI "232c966b13651bd0e46a1497b0852191") + set(hash_BGM_HD "324426a24fa56ad9c5b8e3e0b3e5303e") + set(hash_BINBOOST_064 "202e1b3e9fec871b04da31f7f016679f") + set(hash_BINBOOST_128 "98ea99d399965c03d555cef3ea502a0b") + set(hash_BINBOOST_256 "e6dcfa9f647779eb1ce446a8d759b6ea") + set(hash_LBGM "0ae0675534aa318d9668f2a179c2a052") + + set(${status_var} TRUE PARENT_SCOPE) + foreach(id ${ids}) + ocv_download(FILENAME ${name_${id}} + HASH ${hash_${id}} + URL + "${OPENCV_BOOSTDESC_URL}" + "$ENV{OPENCV_BOOSTDESC_URL}" + "https://raw.githubusercontent.com/opencv/opencv_3rdparty/${OPENCV_3RDPARTY_COMMIT}/" + DESTINATION_DIR ${dst_dir} + ID "xfeatures2d/boostdesc" + RELATIVE_URL + STATUS res) + if(NOT res) + set(${status_var} FALSE PARENT_SCOPE) + endif() + endforeach() +endfunction() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/cmake/download_vgg.cmake b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/cmake/download_vgg.cmake new file mode 100644 index 00000000..67e1f8f0 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/cmake/download_vgg.cmake @@ -0,0 +1,30 @@ +function(download_vgg_descriptors dst_dir status_var) + set(OPENCV_3RDPARTY_COMMIT "fccf7cd6a4b12079f73bbfb21745f9babcd4eb1d") + + set(ids VGG_48 VGG_64 VGG_80 VGG_120) + set(name_VGG_48 "vgg_generated_48.i") + set(name_VGG_64 "vgg_generated_64.i") + set(name_VGG_80 "vgg_generated_80.i") + set(name_VGG_120 "vgg_generated_120.i") + set(hash_VGG_48 "e8d0dcd54d1bcfdc29203d011a797179") + set(hash_VGG_64 "7126a5d9a8884ebca5aea5d63d677225") + set(hash_VGG_80 "7cd47228edec52b6d82f46511af325c5") + set(hash_VGG_120 "151805e03568c9f490a5e3a872777b75") + + set(${status_var} TRUE PARENT_SCOPE) + foreach(id ${ids}) + ocv_download(FILENAME ${name_${id}} + HASH ${hash_${id}} + URL + "${OPENCV_VGGDESC_URL}" + "$ENV{OPENCV_VGGDESC_URL}" + "https://raw.githubusercontent.com/opencv/opencv_3rdparty/${OPENCV_3RDPARTY_COMMIT}/" + DESTINATION_DIR "${dst_dir}" + ID "xfeatures2d/vgg" + RELATIVE_URL + STATUS res) + if(NOT res) + set(${status_var} FALSE PARENT_SCOPE) + endif() + endforeach() +endfunction() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/doc/xfeatures2d.bib b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/doc/xfeatures2d.bib new file mode 100644 index 00000000..7d3f146c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/doc/xfeatures2d.bib @@ -0,0 +1,175 @@ +@incollection{Agrawal08, + title = {Censure: Center surround extremas for realtime feature detection and matching}, + author = {Agrawal, Motilal and Konolige, Kurt and Blas, Morten Rufus}, + booktitle = {Computer Vision--ECCV 2008}, + pages = {102--115}, + year = {2008}, + publisher = {Springer} +} + +@inproceedings{AOV12, + title = {Freak: Fast retina keypoint}, + author = {Alahi, Alexandre and Ortiz, Raphael and Vandergheynst, Pierre}, + booktitle = {Computer Vision and Pattern Recognition (CVPR), 2012 IEEE Conference on}, + pages = {510--517}, + year = {2012}, + organization = {IEEE} +} + +@article{Bay06, + title = {Surf: Speeded up robust features}, + author = {Bay, Herbert and Tuytelaars, Tinne and Van Gool, Luc}, + journal = {Computer Vision--ECCV 2006}, + pages = {404--417}, + year = {2006}, + publisher = {Springer Berlin Heidelberg} +} + +@inproceedings{BeecksUS10, + author = {Christian Beecks and Merih Seran Uysal and Thomas Seidl}, + title = {Signature Quadratic Form Distance}, + booktitle = {CIVR}, + pages = {438--445}, + publisher = {ACM}, + year = {2010} +} + +@inproceedings{Bian2017gms, + title = {GMS: Grid-based Motion Statistics for Fast, Ultra-robust Feature Correspondence}, + author = {JiaWang Bian and Wen-Yan Lin and Yasuyuki Matsushita and Sai-Kit Yeung and Tan Dat Nguyen and Ming-Ming Cheng}, + booktitle = {IEEE Conference on Computer Vision and Pattern Recognition}, + year = {2017} +} + +@incollection{calon2010, + title = {Brief: Binary robust independent elementary features}, + author = {Calonder, Michael and Lepetit, Vincent and Strecha, Christoph and Fua, Pascal}, + booktitle = {Computer Vision--ECCV 2010}, + pages = {778--792}, + year = {2010}, + publisher = {Springer} +} + +@article{KrulisLS16, + author = {Martin Krulis and Jakub Lokoc and Tomas Skopal}, + title = {Efficient extraction of clustering-based feature signatures using {GPU} architectures}, + journal = {Multimedia Tools Appl.}, + volume = {75}, + number = {13}, + pages = {8071--8103}, + year = {2016} +} + +@article{Lowry2018LOGOSLG, + title = {LOGOS: Local Geometric Support for High-Outlier Spatial Verification}, + author = {Stephanie Lowry and Henrik Andreasson}, + journal = {2018 IEEE International Conference on Robotics and Automation (ICRA)}, + year = {2018}, + pages = {7262-7269}, + doi = {10.1109/ICRA.2018.8460988}, +} + +@article{Mikolajczyk2004, + title = {Scale \& affine invariant interest point detectors}, + author = {Mikolajczyk, Krystian and Schmid, Cordelia}, + journal = {International journal of computer vision}, + volume = {60}, + number = {1}, + pages = {63--86}, + year = {2004}, + publisher = {Springer} +} + +@ARTICLE{Najman2014, + author={Y. {Xu} and P. {Monasse} and T. {Géraud} and L. {Najman}}, + journal={IEEE Transactions on Image Processing}, + title={Tree-Based Morse Regions: A Topological Approach to Local Feature Detection}, + year={2014}, + volume={23}, + number={12}, + pages={5612-5625}, + abstract={This paper introduces a topological approach to local invariant feature detection motivated by Morse theory. We use the critical points of the graph of the intensity image, revealing directly the topology information as initial interest points. Critical points are selected from what we call a tree-based shape-space. In particular, they are selected from both the connected components of the upper level sets of the image (the Max-tree) and those of the lower level sets (the Min-tree). They correspond to specific nodes on those two trees: 1) to the leaves (extrema) and 2) to the nodes having bifurcation (saddle points). We then associate to each critical point the largest region that contains it and is topologically equivalent in its tree. We call such largest regions the tree-based Morse regions (TBMRs). The TBMR can be seen as a variant of maximally stable extremal region (MSER), which are contrasted regions. Contrarily to MSER, TBMR relies only on topological information and thus fully inherit the invariance properties of the space of shapes (e.g., invariance to affine contrast changes and covariance to continuous transformations). In particular, TBMR extracts the regions independently of the contrast, which makes it truly contrast invariant. Furthermore, it is quasi-parameter free. TBMR extraction is fast, having the same complexity as MSER. Experimentally, TBMR achieves a repeatability on par with state-of-the-art methods, but obtains a significantly higher number of features. Both the accuracy and robustness of TBMR are demonstrated by applications to image registration and 3D reconstruction.}, + keywords={feature extraction;image reconstruction;image registration;trees (mathematics);tree-based Morse regions;topological approach;local invariant feature detection;Morse theory;intensity image;initial interest points;critical points;tree-based shape-space;upper level image sets;Max-tree;lower level sets;Min-tree;saddle points;bifurcation;maximally stable extremal region variant;MSER;topological information;TBMR extraction;3D reconstruction;image registration;Feature extraction;Detectors;Shape;Time complexity;Level set;Three-dimensional displays;Image registration;Min/Max tree;local features;affine region detectors;image registration;3D reconstruction;Min/Max tree;local features;affine region detectors;image registration;3D reconstruction}, + doi={10.1109/TIP.2014.2364127}, + ISSN={1941-0042}, + month={Dec},} + +@article{Simonyan14, + author = {Simonyan, K. and Vedaldi, A. and Zisserman, A.}, + title = {Learning Local Feature Descriptors Using Convex Optimisation}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + year = {2014} +} + +@article{Tola10, + author = {E. Tola and V. Lepetit and P. Fua}, + title = {DAISY: An Efficient Dense Descriptor Applied to Wide Baseline Stereo}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, + year = {2010}, + month = {May}, + pages = {815--830}, + volume = {32}, + number = {5} +} + +@inproceedings{Tombari14, + title = {Interest Points via Maximal Self-Dissimilarities}, + author = {Tombari, Federico and Di Stefano, Luigi}, + booktitle = {Asian Conference on Computer Vision -- ACCV 2014}, + year = {2014} +} + +@inproceedings{Trzcinski13a, + author = {T. Trzcinski, M. Christoudias, V. Lepetit and P. Fua}, + title = {Boosting Binary Keypoint Descriptors}, + booktitle = {Computer Vision and Pattern Recognition}, + year = {2013} +} + +@article{Trzcinski13b, + author = {T. Trzcinski, M. Christoudias and V. Lepetit}, + title = {Learning Image Descriptors with Boosting}, + journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI)}, + year = {2013} +} + +@incollection{LUCID, + title = {Locally uniform comparison image descriptor}, + author = {Ziegler, Andrew, Eric Christiansen, David Kriegman, and Serge J. Belongie} + booktitle = {Advances in Neural Information Processing Systems} + pages = {1--9} + year = {2012} + publisher = {NIPS} +} + +@article{Suarez2020BEBLID, + title = {{BEBLID: Boosted Efficient Binary Local Image Descriptor}}, + journal = {Pattern Recognition Letters}, + volume = {133}, + pages = {366--372}, + year = {2020}, + issn = {0167-8655}, + doi = {https://doi.org/10.1016/j.patrec.2020.04.005}, + url = {https://raw.githubusercontent.com/iago-suarez/BEBLID/master/BEBLID_Boosted_Efficient_Binary_Local_Image_Descriptor.pdf}, + author = {Iago Su\'arez and Ghesn Sfeir and Jos\'e M. Buenaposada and Luis Baumela}, +} + +@article{Suarez2021TEBLID, + title = {Revisiting Binary Local Image Description for Resource Limited Devices}, + journal = {IEEE Robotics and Automation Letters}, + volume = {6}, + pages = {8317--8324}, + year = {2021}, + number = {4}, + doi = {https://doi.org/10.1109/LRA.2021.3107024}, + url = {https://arxiv.org/pdf/2108.08380.pdf}, + author = {Iago Su\'arez and Jos\'e M. Buenaposada and Luis Baumela}, +} + +@inproceedings{winder2007learning, + title= {Learning Local Image Descriptors}, + author= {Winder, Simon AJ and Brown, Matthew}, + booktitle= {Computer Vision and Pattern Recognition}, + pages={1--8}, + year={2007}, +} \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp new file mode 100644 index 00000000..cf0980dd --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d.hpp @@ -0,0 +1,1279 @@ +/* +By downloading, copying, installing or using the software you agree to this +license. If you do not agree to this license, do not download, install, +copy or use the software. + + License Agreement + For Open Source Computer Vision Library + (3-clause BSD License) + +Copyright (C) 2013, OpenCV Foundation, all rights reserved. +Third party copyrights are property of their respective owners. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the names of the copyright holders nor the names of the contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +This software is provided by the copyright holders and contributors "as is" and +any express or implied warranties, including, but not limited to, the implied +warranties of merchantability and fitness for a particular purpose are +disclaimed. In no event shall copyright holders or contributors be liable for +any direct, indirect, incidental, special, exemplary, or consequential damages +(including, but not limited to, procurement of substitute goods or services; +loss of use, data, or profits; or business interruption) however caused +and on any theory of liability, whether in contract, strict liability, +or tort (including negligence or otherwise) arising in any way out of +the use of this software, even if advised of the possibility of such damage. +*/ + +#ifndef __OPENCV_XFEATURES2D_HPP__ +#define __OPENCV_XFEATURES2D_HPP__ + +#include "opencv2/features2d.hpp" +#include "opencv2/xfeatures2d/nonfree.hpp" + +/** @defgroup xfeatures2d Extra 2D Features Framework +@{ + @defgroup xfeatures2d_experiment Experimental 2D Features Algorithms + +This section describes experimental algorithms for 2d feature detection. + + @defgroup xfeatures2d_nonfree Non-free 2D Features Algorithms + +This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are +known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk. + + @defgroup xfeatures2d_match Experimental 2D Features Matching Algorithm + +This section describes the following matching strategies: + - GMS: Grid-based Motion Statistics, @cite Bian2017gms + - LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG + +@} +*/ + +namespace cv +{ +namespace xfeatures2d +{ + +//! @addtogroup xfeatures2d_experiment +//! @{ + +/** @brief Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in @cite AOV12 . + +The algorithm propose a novel keypoint descriptor inspired by the human visual system and more +precisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is +computed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in +general faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK. +They are competitive alternatives to existing keypoints in particular for embedded applications. + +@note + - An example on how to use the FREAK descriptor can be found at + opencv_source_code/samples/cpp/freak_demo.cpp + */ +class CV_EXPORTS_W FREAK : public Feature2D +{ +public: + + static const int NB_SCALES = 64; + static const int NB_PAIRS = 512; + static const int NB_ORIENPAIRS = 45; + + /** + @param orientationNormalized Enable orientation normalization. + @param scaleNormalized Enable scale normalization. + @param patternScale Scaling of the description pattern. + @param nOctaves Number of octaves covered by the detected keypoints. + @param selectedPairs (Optional) user defined selected pairs indexes, + */ + CV_WRAP static Ptr create(bool orientationNormalized = true, + bool scaleNormalized = true, + float patternScale = 22.0f, + int nOctaves = 4, + const std::vector& selectedPairs = std::vector()); + + CV_WRAP virtual void setOrientationNormalized(bool orientationNormalized) = 0; + CV_WRAP virtual bool getOrientationNormalized() const = 0; + + CV_WRAP virtual void setScaleNormalized(bool scaleNormalized) = 0; + CV_WRAP virtual bool getScaleNormalized() const = 0; + + CV_WRAP virtual void setPatternScale(double patternScale) = 0; + CV_WRAP virtual double getPatternScale() const = 0; + + CV_WRAP virtual void setNOctaves(int nOctaves) = 0; + CV_WRAP virtual int getNOctaves() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + + +/** @brief The class implements the keypoint detector introduced by @cite Agrawal08, synonym of StarDetector. : + */ +class CV_EXPORTS_W StarDetector : public Feature2D +{ +public: + //! the full constructor + CV_WRAP static Ptr create(int maxSize=45, int responseThreshold=30, + int lineThresholdProjected=10, + int lineThresholdBinarized=8, + int suppressNonmaxSize=5); + + CV_WRAP virtual void setMaxSize(int _maxSize) = 0; + CV_WRAP virtual int getMaxSize() const = 0; + + CV_WRAP virtual void setResponseThreshold(int _responseThreshold) = 0; + CV_WRAP virtual int getResponseThreshold() const = 0; + + CV_WRAP virtual void setLineThresholdProjected(int _lineThresholdProjected) = 0; + CV_WRAP virtual int getLineThresholdProjected() const = 0; + + CV_WRAP virtual void setLineThresholdBinarized(int _lineThresholdBinarized) = 0; + CV_WRAP virtual int getLineThresholdBinarized() const = 0; + + CV_WRAP virtual void setSuppressNonmaxSize(int _suppressNonmaxSize) = 0; + CV_WRAP virtual int getSuppressNonmaxSize() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/* + * BRIEF Descriptor + */ + +/** @brief Class for computing BRIEF descriptors described in @cite calon2010 . + +@param bytes legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64 . +@param use_orientation sample patterns using keypoints orientation, disabled by default. + + */ +class CV_EXPORTS_W BriefDescriptorExtractor : public Feature2D +{ +public: + CV_WRAP static Ptr create( int bytes = 32, bool use_orientation = false ); + + CV_WRAP virtual void setDescriptorSize(int bytes) = 0; + CV_WRAP virtual int getDescriptorSize() const = 0; + + CV_WRAP virtual void setUseOrientation(bool use_orientation) = 0; + CV_WRAP virtual bool getUseOrientation() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID + +An image descriptor that can be computed very fast, while being +about as robust as, for example, SURF or BRIEF. + +@note It requires a color image as input. + */ +class CV_EXPORTS_W LUCID : public Feature2D +{ +public: + /** + * @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth + * @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth + */ + CV_WRAP static Ptr create(const int lucid_kernel = 1, const int blur_kernel = 2); + + CV_WRAP virtual void setLucidKernel(int lucid_kernel) = 0; + CV_WRAP virtual int getLucidKernel() const = 0; + + CV_WRAP virtual void setBlurKernel(int blur_kernel) = 0; + CV_WRAP virtual int getBlurKernel() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + + +/* +* LATCH Descriptor +*/ + +/** latch Class for computing the LATCH descriptor. +If you find this code useful, please add a reference to the following paper in your work: +Gil Levi and Tal Hassner, "LATCH: Learned Arrangements of Three Patch Codes", arXiv preprint arXiv:1501.03719, 15 Jan. 2015 + +LATCH is a binary descriptor based on learned comparisons of triplets of image patches. + +* bytes is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1 +* rotationInvariance - whether or not the descriptor should compansate for orientation changes. +* half_ssd_size - the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x + then the half_ssd_size should be (7-1)/2 = 3. +* sigma - sigma value for GaussianBlur smoothing of the source image. Source image will be used without smoothing in case sigma value is 0. + +Note: the descriptor can be coupled with any keypoint extractor. The only demand is that if you use set rotationInvariance = True then + you will have to use an extractor which estimates the patch orientation (in degrees). Examples for such extractors are ORB and SIFT. + +Note: a complete example can be found under /samples/cpp/tutorial_code/xfeatures2D/latch_match.cpp + +*/ +class CV_EXPORTS_W LATCH : public Feature2D +{ +public: + CV_WRAP static Ptr create(int bytes = 32, bool rotationInvariance = true, int half_ssd_size = 3, double sigma = 2.0); + + CV_WRAP virtual void setBytes(int bytes) = 0; + CV_WRAP virtual int getBytes() const = 0; + + CV_WRAP virtual void setRotationInvariance(bool rotationInvariance) = 0; + CV_WRAP virtual bool getRotationInvariance() const = 0; + + CV_WRAP virtual void setHalfSSDsize(int half_ssd_size) = 0; + CV_WRAP virtual int getHalfSSDsize() const = 0; + + CV_WRAP virtual void setSigma(double sigma) = 0; + CV_WRAP virtual double getSigma() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing BEBLID (Boosted Efficient Binary Local Image Descriptor), + * described in @cite Suarez2020BEBLID . + +BEBLID \cite Suarez2020BEBLID is a efficient binary descriptor learned with boosting. +It is able to describe keypoints from any detector just by changing the scale_factor parameter. +In several benchmarks it has proved to largely improve other binary descriptors like ORB or +BRISK with the same efficiency. BEBLID describes using the difference of mean gray values in +different regions of the image around the KeyPoint, the descriptor is specifically optimized for +image matching and patch retrieval addressing the asymmetries of these problems. + +If you find this code useful, please add a reference to the following paper: +

Iago Suárez, Ghesn Sfeir, José M. Buenaposada, and Luis Baumela. +BEBLID: Boosted efficient binary local image descriptor. +Pattern Recognition Letters, 133:366–372, 2020.
+ +The descriptor was trained using 1 million of randomly sampled pairs of patches +(20% positives and 80% negatives) from the Liberty split of the UBC datasets +\cite winder2007learning as described in the paper @cite Suarez2020BEBLID. +You can check in the [AKAZE example](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp) +how well BEBLID works. Detecting 10000 keypoints with ORB and describing with BEBLID obtains +561 inliers (75%) whereas describing with ORB obtains only 493 inliers (63%). +*/ +class CV_EXPORTS_W BEBLID : public Feature2D +{ +public: + /** + * @brief Descriptor number of bits, each bit is a boosting weak-learner. + * The user can choose between 512 or 256 bits. + */ + enum BeblidSize + { + SIZE_512_BITS = 100, SIZE_256_BITS = 101, + }; + /** @brief Creates the BEBLID descriptor. + @param scale_factor Adjust the sampling window around detected keypoints: + - 1.00f should be the scale for ORB keypoints + - 6.75f should be the scale for SIFT detected keypoints + - 6.25f is default and fits for KAZE, SURF detected keypoints + - 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints + @param n_bits Determine the number of bits in the descriptor. Should be either + BEBLID::SIZE_512_BITS or BEBLID::SIZE_256_BITS. + */ + CV_WRAP static Ptr create(float scale_factor, int n_bits = BEBLID::SIZE_512_BITS); + + CV_WRAP virtual void setScaleFactor(float scale_factor) = 0; + CV_WRAP virtual float getScaleFactor() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing TEBLID (Triplet-based Efficient Binary Local Image Descriptor), + * described in @cite Suarez2021TEBLID. + +TEBLID stands for Triplet-based Efficient Binary Local Image Descriptor, although originally it was called BAD +\cite Suarez2021TEBLID. It is an improvement over BEBLID \cite Suarez2020BEBLID, that uses triplet loss, +hard negative mining, and anchor swap to improve the image matching results. +It is able to describe keypoints from any detector just by changing the scale_factor parameter. +TEBLID is as efficient as ORB, BEBLID or BRISK, but the triplet-based training objective selected more +discriminative features that explain the accuracy gain. It is also more compact than BEBLID, +when running the [AKAZE example](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp) +with 10000 keypoints detected by ORB, BEBLID obtains 561 inliers (75%) with 512 bits, whereas +TEBLID obtains 621 (75.2%) with 256 bits. ORB obtains only 493 inliers (63%). + +If you find this code useful, please add a reference to the following paper: +
Iago Suárez, José M. Buenaposada, and Luis Baumela. +Revisiting Binary Local Image Description for Resource Limited Devices. +IEEE Robotics and Automation Letters, vol. 6, no. 4, pp. 8317-8324, Oct. 2021.
+ +The descriptor was trained in Liberty split of the UBC datasets \cite winder2007learning . +*/ +class CV_EXPORTS_W TEBLID : public Feature2D +{ +public: + /** + * @brief Descriptor number of bits, each bit is a box average difference. + * The user can choose between 256 or 512 bits. + */ + enum TeblidSize + { + SIZE_256_BITS = 102, SIZE_512_BITS = 103, + }; + /** @brief Creates the TEBLID descriptor. + @param scale_factor Adjust the sampling window around detected keypoints: + - 1.00f should be the scale for ORB keypoints + - 6.75f should be the scale for SIFT detected keypoints + - 6.25f is default and fits for KAZE, SURF detected keypoints + - 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints + @param n_bits Determine the number of bits in the descriptor. Should be either + TEBLID::SIZE_256_BITS or TEBLID::SIZE_512_BITS. + */ + CV_WRAP static Ptr create(float scale_factor, int n_bits = TEBLID::SIZE_256_BITS); + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing DAISY descriptor, described in @cite Tola10 + +@param radius radius of the descriptor at the initial scale +@param q_radius amount of radial range division quantity +@param q_theta amount of angular range division quantity +@param q_hist amount of gradient orientations range division quantity +@param norm choose descriptors normalization type, where +DAISY::NRM_NONE will not do any normalization (default), +DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0, +DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0, +DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT +@param H optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image +@param interpolation switch to disable interpolation for speed improvement at minor quality loss +@param use_orientation sample patterns using keypoints orientation, disabled by default. + + */ +class CV_EXPORTS_W DAISY : public Feature2D +{ +public: + enum NormalizationType + { + NRM_NONE = 100, NRM_PARTIAL = 101, NRM_FULL = 102, NRM_SIFT = 103, + }; + CV_WRAP static Ptr create( float radius = 15, int q_radius = 3, int q_theta = 8, + int q_hist = 8, DAISY::NormalizationType norm = DAISY::NRM_NONE, InputArray H = noArray(), + bool interpolation = true, bool use_orientation = false ); + + CV_WRAP virtual void setRadius(float radius) = 0; + CV_WRAP virtual float getRadius() const = 0; + + CV_WRAP virtual void setQRadius(int q_radius) = 0; + CV_WRAP virtual int getQRadius() const = 0; + + CV_WRAP virtual void setQTheta(int q_theta) = 0; + CV_WRAP virtual int getQTheta() const = 0; + + CV_WRAP virtual void setQHist(int q_hist) = 0; + CV_WRAP virtual int getQHist() const = 0; + + CV_WRAP virtual void setNorm(int norm) = 0; + CV_WRAP virtual int getNorm() const = 0; + + CV_WRAP virtual void setH(InputArray H) = 0; + CV_WRAP virtual cv::Mat getH() const = 0; + + CV_WRAP virtual void setInterpolation(bool interpolation) = 0; + CV_WRAP virtual bool getInterpolation() const = 0; + + CV_WRAP virtual void setUseOrientation(bool use_orientation) = 0; + CV_WRAP virtual bool getUseOrientation() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; + + /** @overload + * @param image image to extract descriptors + * @param keypoints of interest within image + * @param descriptors resulted descriptors array + */ + virtual void compute( InputArray image, std::vector& keypoints, OutputArray descriptors ) CV_OVERRIDE = 0; + + virtual void compute( InputArrayOfArrays images, + std::vector >& keypoints, + OutputArrayOfArrays descriptors ) CV_OVERRIDE; + + /** @overload + * @param image image to extract descriptors + * @param roi region of interest within image + * @param descriptors resulted descriptors array for roi image pixels + */ + virtual void compute( InputArray image, Rect roi, OutputArray descriptors ) = 0; + + /**@overload + * @param image image to extract descriptors + * @param descriptors resulted descriptors array for all image pixels + */ + virtual void compute( InputArray image, OutputArray descriptors ) = 0; + + /** + * @param y position y on image + * @param x position x on image + * @param orientation orientation on image (0->360) + * @param descriptor supplied array for descriptor storage + */ + virtual void GetDescriptor( double y, double x, int orientation, float* descriptor ) const = 0; + + /** + * @param y position y on image + * @param x position x on image + * @param orientation orientation on image (0->360) + * @param descriptor supplied array for descriptor storage + * @param H homography matrix for warped grid + */ + virtual bool GetDescriptor( double y, double x, int orientation, float* descriptor, double* H ) const = 0; + + /** + * @param y position y on image + * @param x position x on image + * @param orientation orientation on image (0->360) + * @param descriptor supplied array for descriptor storage + */ + virtual void GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor ) const = 0; + + /** + * @param y position y on image + * @param x position x on image + * @param orientation orientation on image (0->360) + * @param descriptor supplied array for descriptor storage + * @param H homography matrix for warped grid + */ + virtual bool GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor , double *H ) const = 0; + +}; + +/** @brief Class implementing the MSD (*Maximal Self-Dissimilarity*) keypoint detector, described in @cite Tombari14. + +The algorithm implements a novel interest point detector stemming from the intuition that image patches +which are highly dissimilar over a relatively large extent of their surroundings hold the property of +being repeatable and distinctive. This concept of "contextual self-dissimilarity" reverses the key +paradigm of recent successful techniques such as the Local Self-Similarity descriptor and the Non-Local +Means filter, which build upon the presence of similar - rather than dissimilar - patches. Moreover, +it extends to contextual information the local self-dissimilarity notion embedded in established +detectors of corner-like interest points, thereby achieving enhanced repeatability, distinctiveness and +localization accuracy. + +*/ + +class CV_EXPORTS_W MSDDetector : public Feature2D { + +public: + + CV_WRAP static Ptr create(int m_patch_radius = 3, int m_search_area_radius = 5, + int m_nms_radius = 5, int m_nms_scale_radius = 0, float m_th_saliency = 250.0f, int m_kNN = 4, + float m_scale_factor = 1.25f, int m_n_scales = -1, bool m_compute_orientation = false); + + CV_WRAP virtual void setPatchRadius(int patch_radius) = 0; + CV_WRAP virtual int getPatchRadius() const = 0; + + CV_WRAP virtual void setSearchAreaRadius(int use_orientation) = 0; + CV_WRAP virtual int getSearchAreaRadius() const = 0; + + CV_WRAP virtual void setNmsRadius(int nms_radius) = 0; + CV_WRAP virtual int getNmsRadius() const = 0; + + CV_WRAP virtual void setNmsScaleRadius(int nms_scale_radius) = 0; + CV_WRAP virtual int getNmsScaleRadius() const = 0; + + CV_WRAP virtual void setThSaliency(float th_saliency) = 0; + CV_WRAP virtual float getThSaliency() const = 0; + + CV_WRAP virtual void setKNN(int kNN) = 0; + CV_WRAP virtual int getKNN() const = 0; + + CV_WRAP virtual void setScaleFactor(float scale_factor) = 0; + CV_WRAP virtual float getScaleFactor() const = 0; + + CV_WRAP virtual void setNScales(int use_orientation) = 0; + CV_WRAP virtual int getNScales() const = 0; + + CV_WRAP virtual void setComputeOrientation(bool compute_orientation) = 0; + CV_WRAP virtual bool getComputeOrientation() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing VGG (Oxford Visual Geometry Group) descriptor trained end to end +using "Descriptor Learning Using Convex Optimisation" (DLCO) aparatus described in @cite Simonyan14. + +@param desc type of descriptor to use, VGG::VGG_120 is default (120 dimensions float) +Available types are VGG::VGG_120, VGG::VGG_80, VGG::VGG_64, VGG::VGG_48 +@param isigma gaussian kernel value for image blur (default is 1.4f) +@param img_normalize use image sample intensity normalization (enabled by default) +@param use_orientation sample patterns using keypoints orientation, enabled by default +@param scale_factor adjust the sampling window of detected keypoints to 64.0f (VGG sampling window) +6.25f is default and fits for KAZE, SURF detected keypoints window ratio +6.75f should be the scale for SIFT detected keypoints window ratio +5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints window ratio +0.75f should be the scale for ORB keypoints ratio + +@param dsc_normalize clamp descriptors to 255 and convert to uchar CV_8UC1 (disabled by default) + + */ +class CV_EXPORTS_W VGG : public Feature2D +{ +public: + + CV_WRAP enum + { + VGG_120 = 100, VGG_80 = 101, VGG_64 = 102, VGG_48 = 103, + }; + + CV_WRAP static Ptr create( int desc = VGG::VGG_120, float isigma = 1.4f, + bool img_normalize = true, bool use_scale_orientation = true, + float scale_factor = 6.25f, bool dsc_normalize = false ); + + CV_WRAP String getDefaultName() const CV_OVERRIDE; + + CV_WRAP virtual void setSigma(const float isigma) = 0; + CV_WRAP virtual float getSigma() const = 0; + + CV_WRAP virtual void setUseNormalizeImage(const bool img_normalize) = 0; + CV_WRAP virtual bool getUseNormalizeImage() const = 0; + + CV_WRAP virtual void setUseScaleOrientation(const bool use_scale_orientation) = 0; + CV_WRAP virtual bool getUseScaleOrientation() const = 0; + + CV_WRAP virtual void setScaleFactor(const float scale_factor) = 0; + CV_WRAP virtual float getScaleFactor() const = 0; + + CV_WRAP virtual void setUseNormalizeDescriptor(const bool dsc_normalize) = 0; + CV_WRAP virtual bool getUseNormalizeDescriptor() const = 0; +}; + +/** @brief Class implementing BoostDesc (Learning Image Descriptors with Boosting), described in +@cite Trzcinski13a and @cite Trzcinski13b. + +@param desc type of descriptor to use, BoostDesc::BINBOOST_256 is default (256 bit long dimension) +Available types are: BoostDesc::BGM, BoostDesc::BGM_HARD, BoostDesc::BGM_BILINEAR, BoostDesc::LBGM, +BoostDesc::BINBOOST_64, BoostDesc::BINBOOST_128, BoostDesc::BINBOOST_256 +@param use_orientation sample patterns using keypoints orientation, enabled by default +@param scale_factor adjust the sampling window of detected keypoints +6.25f is default and fits for KAZE, SURF detected keypoints window ratio +6.75f should be the scale for SIFT detected keypoints window ratio +5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints window ratio +0.75f should be the scale for ORB keypoints ratio +1.50f was the default in original implementation + +@note BGM is the base descriptor where each binary dimension is computed as the output of a single weak learner. +BGM_HARD and BGM_BILINEAR refers to same BGM but use different type of gradient binning. In the BGM_HARD that +use ASSIGN_HARD binning type the gradient is assigned to the nearest orientation bin. In the BGM_BILINEAR that use +ASSIGN_BILINEAR binning type the gradient is assigned to the two neighbouring bins. In the BGM and all other modes that use +ASSIGN_SOFT binning type the gradient is assigned to 8 nearest bins according to the cosine value between the gradient +angle and the bin center. LBGM (alias FP-Boost) is the floating point extension where each dimension is computed +as a linear combination of the weak learner responses. BINBOOST and subvariants are the binary extensions of LBGM +where each bit is computed as a thresholded linear combination of a set of weak learners. +BoostDesc header files (boostdesc_*.i) was exported from original binaries with export-boostdesc.py script from +samples subfolder. + +*/ + +class CV_EXPORTS_W BoostDesc : public Feature2D +{ +public: + + CV_WRAP enum + { + BGM = 100, BGM_HARD = 101, BGM_BILINEAR = 102, LBGM = 200, + BINBOOST_64 = 300, BINBOOST_128 = 301, BINBOOST_256 = 302 + }; + + CV_WRAP static Ptr create( int desc = BoostDesc::BINBOOST_256, + bool use_scale_orientation = true, float scale_factor = 6.25f ); + + CV_WRAP String getDefaultName() const CV_OVERRIDE; + + CV_WRAP virtual void setUseScaleOrientation(const bool use_scale_orientation) = 0; + CV_WRAP virtual bool getUseScaleOrientation() const = 0; + + CV_WRAP virtual void setScaleFactor(const float scale_factor) = 0; + CV_WRAP virtual float getScaleFactor() const = 0; +}; + + +/* +* Position-Color-Texture signatures +*/ + +/** +* @brief Class implementing PCT (position-color-texture) signature extraction +* as described in @cite KrulisLS16. +* The algorithm is divided to a feature sampler and a clusterizer. +* Feature sampler produces samples at given set of coordinates. +* Clusterizer then produces clusters of these samples using k-means algorithm. +* Resulting set of clusters is the signature of the input image. +* +* A signature is an array of SIGNATURE_DIMENSION-dimensional points. +* Used dimensions are: +* weight, x, y position; lab color, contrast, entropy. +* @cite KrulisLS16 +* @cite BeecksUS10 +*/ +class CV_EXPORTS_W PCTSignatures : public Algorithm +{ +public: + /** + * @brief Lp distance function selector. + */ + enum DistanceFunction + { + L0_25, L0_5, L1, L2, L2SQUARED, L5, L_INFINITY + }; + + /** + * @brief Point distributions supported by random point generator. + */ + enum PointDistribution + { + UNIFORM, //!< Generate numbers uniformly. + REGULAR, //!< Generate points in a regular grid. + NORMAL //!< Generate points with normal (gaussian) distribution. + }; + + /** + * @brief Similarity function selector. + * @see + * Christian Beecks, Merih Seran Uysal, Thomas Seidl. + * Signature quadratic form distance. + * In Proceedings of the ACM International Conference on Image and Video Retrieval, pages 438-445. + * ACM, 2010. + * @cite BeecksUS10 + * @note For selected distance function: \f[ d(c_i, c_j) \f] and parameter: \f[ \alpha \f] + */ + enum SimilarityFunction + { + MINUS, //!< \f[ -d(c_i, c_j) \f] + GAUSSIAN, //!< \f[ e^{ -\alpha * d^2(c_i, c_j)} \f] + HEURISTIC //!< \f[ \frac{1}{\alpha + d(c_i, c_j)} \f] + }; + + + /** + * @brief Creates PCTSignatures algorithm using sample and seed count. + * It generates its own sets of sampling points and clusterization seed indexes. + * @param initSampleCount Number of points used for image sampling. + * @param initSeedCount Number of initial clusterization seeds. + * Must be lower or equal to initSampleCount + * @param pointDistribution Distribution of generated points. Default: UNIFORM. + * Available: UNIFORM, REGULAR, NORMAL. + * @return Created algorithm. + */ + CV_WRAP static Ptr create( + const int initSampleCount = 2000, + const int initSeedCount = 400, + const int pointDistribution = 0); + + /** + * @brief Creates PCTSignatures algorithm using pre-generated sampling points + * and number of clusterization seeds. It uses the provided + * sampling points and generates its own clusterization seed indexes. + * @param initSamplingPoints Sampling points used in image sampling. + * @param initSeedCount Number of initial clusterization seeds. + * Must be lower or equal to initSamplingPoints.size(). + * @return Created algorithm. + */ + CV_WRAP static Ptr create( + const std::vector& initSamplingPoints, + const int initSeedCount); + + /** + * @brief Creates PCTSignatures algorithm using pre-generated sampling points + * and clusterization seeds indexes. + * @param initSamplingPoints Sampling points used in image sampling. + * @param initClusterSeedIndexes Indexes of initial clusterization seeds. + * Its size must be lower or equal to initSamplingPoints.size(). + * @return Created algorithm. + */ + CV_WRAP static Ptr create( + const std::vector& initSamplingPoints, + const std::vector& initClusterSeedIndexes); + + + + /** + * @brief Computes signature of given image. + * @param image Input image of CV_8U type. + * @param signature Output computed signature. + */ + CV_WRAP virtual void computeSignature( + InputArray image, + OutputArray signature) const = 0; + + /** + * @brief Computes signatures for multiple images in parallel. + * @param images Vector of input images of CV_8U type. + * @param signatures Vector of computed signatures. + */ + CV_WRAP virtual void computeSignatures( + const std::vector& images, + std::vector& signatures) const = 0; + + /** + * @brief Draws signature in the source image and outputs the result. + * Signatures are visualized as a circle + * with radius based on signature weight + * and color based on signature color. + * Contrast and entropy are not visualized. + * @param source Source image. + * @param signature Image signature. + * @param result Output result. + * @param radiusToShorterSideRatio Determines maximal radius of signature in the output image. + * @param borderThickness Border thickness of the visualized signature. + */ + CV_WRAP static void drawSignature( + InputArray source, + InputArray signature, + OutputArray result, + float radiusToShorterSideRatio = 1.0 / 8, + int borderThickness = 1); + + /** + * @brief Generates initial sampling points according to selected point distribution. + * @param initPoints Output vector where the generated points will be saved. + * @param count Number of points to generate. + * @param pointDistribution Point distribution selector. + * Available: UNIFORM, REGULAR, NORMAL. + * @note Generated coordinates are in range [0..1) + */ + CV_WRAP static void generateInitPoints( + std::vector& initPoints, + const int count, + int pointDistribution); + + + /**** sampler ****/ + + /** + * @brief Number of initial samples taken from the image. + */ + CV_WRAP virtual int getSampleCount() const = 0; + + /** + * @brief Color resolution of the greyscale bitmap represented in allocated bits + * (i.e., value 4 means that 16 shades of grey are used). + * The greyscale bitmap is used for computing contrast and entropy values. + */ + CV_WRAP virtual int getGrayscaleBits() const = 0; + /** + * @brief Color resolution of the greyscale bitmap represented in allocated bits + * (i.e., value 4 means that 16 shades of grey are used). + * The greyscale bitmap is used for computing contrast and entropy values. + */ + CV_WRAP virtual void setGrayscaleBits(int grayscaleBits) = 0; + + /** + * @brief Size of the texture sampling window used to compute contrast and entropy + * (center of the window is always in the pixel selected by x,y coordinates + * of the corresponding feature sample). + */ + CV_WRAP virtual int getWindowRadius() const = 0; + /** + * @brief Size of the texture sampling window used to compute contrast and entropy + * (center of the window is always in the pixel selected by x,y coordinates + * of the corresponding feature sample). + */ + CV_WRAP virtual void setWindowRadius(int radius) = 0; + + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightX() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightX(float weight) = 0; + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightY() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightY(float weight) = 0; + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightL() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightL(float weight) = 0; + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightA() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightA(float weight) = 0; + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightB() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightB(float weight) = 0; + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightContrast() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightContrast(float weight) = 0; + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual float getWeightEntropy() const = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space + * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy) + */ + CV_WRAP virtual void setWeightEntropy(float weight) = 0; + + /** + * @brief Initial samples taken from the image. + * These sampled features become the input for clustering. + */ + CV_WRAP virtual std::vector getSamplingPoints() const = 0; + + + + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space. + * @param idx ID of the weight + * @param value Value of the weight + * @note + * WEIGHT_IDX = 0; + * X_IDX = 1; + * Y_IDX = 2; + * L_IDX = 3; + * A_IDX = 4; + * B_IDX = 5; + * CONTRAST_IDX = 6; + * ENTROPY_IDX = 7; + */ + CV_WRAP virtual void setWeight(int idx, float value) = 0; + /** + * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space. + * @param weights Values of all weights. + * @note + * WEIGHT_IDX = 0; + * X_IDX = 1; + * Y_IDX = 2; + * L_IDX = 3; + * A_IDX = 4; + * B_IDX = 5; + * CONTRAST_IDX = 6; + * ENTROPY_IDX = 7; + */ + CV_WRAP virtual void setWeights(const std::vector& weights) = 0; + + /** + * @brief Translations of the individual axes of the feature space. + * @param idx ID of the translation + * @param value Value of the translation + * @note + * WEIGHT_IDX = 0; + * X_IDX = 1; + * Y_IDX = 2; + * L_IDX = 3; + * A_IDX = 4; + * B_IDX = 5; + * CONTRAST_IDX = 6; + * ENTROPY_IDX = 7; + */ + CV_WRAP virtual void setTranslation(int idx, float value) = 0; + /** + * @brief Translations of the individual axes of the feature space. + * @param translations Values of all translations. + * @note + * WEIGHT_IDX = 0; + * X_IDX = 1; + * Y_IDX = 2; + * L_IDX = 3; + * A_IDX = 4; + * B_IDX = 5; + * CONTRAST_IDX = 6; + * ENTROPY_IDX = 7; + */ + CV_WRAP virtual void setTranslations(const std::vector& translations) = 0; + + /** + * @brief Sets sampling points used to sample the input image. + * @param samplingPoints Vector of sampling points in range [0..1) + * @note Number of sampling points must be greater or equal to clusterization seed count. + */ + CV_WRAP virtual void setSamplingPoints(std::vector samplingPoints) = 0; + + + + /**** clusterizer ****/ + /** + * @brief Initial seeds (initial number of clusters) for the k-means algorithm. + */ + CV_WRAP virtual std::vector getInitSeedIndexes() const = 0; + /** + * @brief Initial seed indexes for the k-means algorithm. + */ + CV_WRAP virtual void setInitSeedIndexes(std::vector initSeedIndexes) = 0; + /** + * @brief Number of initial seeds (initial number of clusters) for the k-means algorithm. + */ + CV_WRAP virtual int getInitSeedCount() const = 0; + + /** + * @brief Number of iterations of the k-means clustering. + * We use fixed number of iterations, since the modified clustering is pruning clusters + * (not iteratively refining k clusters). + */ + CV_WRAP virtual int getIterationCount() const = 0; + /** + * @brief Number of iterations of the k-means clustering. + * We use fixed number of iterations, since the modified clustering is pruning clusters + * (not iteratively refining k clusters). + */ + CV_WRAP virtual void setIterationCount(int iterationCount) = 0; + + /** + * @brief Maximal number of generated clusters. If the number is exceeded, + * the clusters are sorted by their weights and the smallest clusters are cropped. + */ + CV_WRAP virtual int getMaxClustersCount() const = 0; + /** + * @brief Maximal number of generated clusters. If the number is exceeded, + * the clusters are sorted by their weights and the smallest clusters are cropped. + */ + CV_WRAP virtual void setMaxClustersCount(int maxClustersCount) = 0; + + /** + * @brief This parameter multiplied by the index of iteration gives lower limit for cluster size. + * Clusters containing fewer points than specified by the limit have their centroid dismissed + * and points are reassigned. + */ + CV_WRAP virtual int getClusterMinSize() const = 0; + /** + * @brief This parameter multiplied by the index of iteration gives lower limit for cluster size. + * Clusters containing fewer points than specified by the limit have their centroid dismissed + * and points are reassigned. + */ + CV_WRAP virtual void setClusterMinSize(int clusterMinSize) = 0; + + /** + * @brief Threshold euclidean distance between two centroids. + * If two cluster centers are closer than this distance, + * one of the centroid is dismissed and points are reassigned. + */ + CV_WRAP virtual float getJoiningDistance() const = 0; + /** + * @brief Threshold euclidean distance between two centroids. + * If two cluster centers are closer than this distance, + * one of the centroid is dismissed and points are reassigned. + */ + CV_WRAP virtual void setJoiningDistance(float joiningDistance) = 0; + + /** + * @brief Remove centroids in k-means whose weight is lesser or equal to given threshold. + */ + CV_WRAP virtual float getDropThreshold() const = 0; + /** + * @brief Remove centroids in k-means whose weight is lesser or equal to given threshold. + */ + CV_WRAP virtual void setDropThreshold(float dropThreshold) = 0; + + /** + * @brief Distance function selector used for measuring distance between two points in k-means. + */ + CV_WRAP virtual int getDistanceFunction() const = 0; + /** + * @brief Distance function selector used for measuring distance between two points in k-means. + * Available: L0_25, L0_5, L1, L2, L2SQUARED, L5, L_INFINITY. + */ + CV_WRAP virtual void setDistanceFunction(int distanceFunction) = 0; + +}; + +/** +* @brief Class implementing Signature Quadratic Form Distance (SQFD). +* @see Christian Beecks, Merih Seran Uysal, Thomas Seidl. +* Signature quadratic form distance. +* In Proceedings of the ACM International Conference on Image and Video Retrieval, pages 438-445. +* ACM, 2010. +* @cite BeecksUS10 +*/ +class CV_EXPORTS_W PCTSignaturesSQFD : public Algorithm +{ +public: + + /** + * @brief Creates the algorithm instance using selected distance function, + * similarity function and similarity function parameter. + * @param distanceFunction Distance function selector. Default: L2 + * Available: L0_25, L0_5, L1, L2, L2SQUARED, L5, L_INFINITY + * @param similarityFunction Similarity function selector. Default: HEURISTIC + * Available: MINUS, GAUSSIAN, HEURISTIC + * @param similarityParameter Parameter of the similarity function. + */ + CV_WRAP static Ptr create( + const int distanceFunction = 3, + const int similarityFunction = 2, + const float similarityParameter = 1.0f); + + /** + * @brief Computes Signature Quadratic Form Distance of two signatures. + * @param _signature0 The first signature. + * @param _signature1 The second signature. + */ + CV_WRAP virtual float computeQuadraticFormDistance( + InputArray _signature0, + InputArray _signature1) const = 0; + + /** + * @brief Computes Signature Quadratic Form Distance between the reference signature + * and each of the other image signatures. + * @param sourceSignature The signature to measure distance of other signatures from. + * @param imageSignatures Vector of signatures to measure distance from the source signature. + * @param distances Output vector of measured distances. + */ + CV_WRAP virtual void computeQuadraticFormDistances( + const Mat& sourceSignature, + const std::vector& imageSignatures, + std::vector& distances) const = 0; + +}; + +/** +* @brief Elliptic region around an interest point. +*/ +class CV_EXPORTS Elliptic_KeyPoint : public KeyPoint +{ +public: + Size_ axes; //!< the lengths of the major and minor ellipse axes + float si; //!< the integration scale at which the parameters were estimated + Matx23f transf; //!< the transformation between image space and local patch space + Elliptic_KeyPoint(); + Elliptic_KeyPoint(Point2f pt, float angle, Size axes, float size, float si); + virtual ~Elliptic_KeyPoint(); +}; + +/** + * @brief Class implementing the Harris-Laplace feature detector as described in @cite Mikolajczyk2004. + */ +class CV_EXPORTS_W HarrisLaplaceFeatureDetector : public Feature2D +{ +public: + /** + * @brief Creates a new implementation instance. + * + * @param numOctaves the number of octaves in the scale-space pyramid + * @param corn_thresh the threshold for the Harris cornerness measure + * @param DOG_thresh the threshold for the Difference-of-Gaussians scale selection + * @param maxCorners the maximum number of corners to consider + * @param num_layers the number of intermediate scales per octave + */ + CV_WRAP static Ptr create( + int numOctaves=6, + float corn_thresh=0.01f, + float DOG_thresh=0.01f, + int maxCorners=5000, + int num_layers=4); + + CV_WRAP virtual void setNumOctaves(int numOctaves_) = 0; + CV_WRAP virtual int getNumOctaves() const = 0; + + CV_WRAP virtual void setCornThresh(float corn_thresh_) = 0; + CV_WRAP virtual float getCornThresh() const = 0; + + CV_WRAP virtual void setDOGThresh(float DOG_thresh_) = 0; + CV_WRAP virtual float getDOGThresh() const = 0; + + CV_WRAP virtual void setMaxCorners(int maxCorners_) = 0; + CV_WRAP virtual int getMaxCorners() const = 0; + + CV_WRAP virtual void setNumLayers(int num_layers_) = 0; + CV_WRAP virtual int getNumLayers() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +/** + * @brief Class implementing affine adaptation for key points. + * + * A @ref FeatureDetector and a @ref DescriptorExtractor are wrapped to augment the + * detected points with their affine invariant elliptic region and to compute + * the feature descriptors on the regions after warping them into circles. + * + * The interface is equivalent to @ref Feature2D, adding operations for + * @ref Elliptic_KeyPoint "Elliptic_KeyPoints" instead of @ref KeyPoint "KeyPoints". + */ +class CV_EXPORTS_W AffineFeature2D : public Feature2D +{ +public: + /** + * @brief Creates an instance wrapping the given keypoint detector and + * descriptor extractor. + */ + static Ptr create( + Ptr keypoint_detector, + Ptr descriptor_extractor); + + /** + * @brief Creates an instance where keypoint detector and descriptor + * extractor are identical. + */ + static Ptr create( + Ptr keypoint_detector) + { + return create(keypoint_detector, keypoint_detector); + } + + using Feature2D::detect; // overload, don't hide + /** + * @brief Detects keypoints in the image using the wrapped detector and + * performs affine adaptation to augment them with their elliptic regions. + */ + virtual void detect( + InputArray image, + CV_OUT std::vector& keypoints, + InputArray mask=noArray() ) = 0; + + using Feature2D::detectAndCompute; // overload, don't hide + /** + * @brief Detects keypoints and computes descriptors for their surrounding + * regions, after warping them into circles. + */ + virtual void detectAndCompute( + InputArray image, + InputArray mask, + CV_OUT std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ) = 0; +}; + +/** +@brief Class implementing the Tree Based Morse Regions (TBMR) as described in +@cite Najman2014 extended with scaled extraction ability. + +@param min_area prune areas smaller than minArea +@param max_area_relative prune areas bigger than maxArea = max_area_relative * +input_image_size +@param scale_factor scale factor for scaled extraction. +@param n_scales number of applications of the scale factor (octaves). + +@note This algorithm is based on Component Tree (Min/Max) as well as MSER but +uses a Morse-theory approach to extract features. + +Features are ellipses (similar to MSER, however a MSER feature can never be a +TBMR feature and vice versa). + +*/ +class CV_EXPORTS_W TBMR : public AffineFeature2D +{ +public: + CV_WRAP static Ptr create(int min_area = 60, + float max_area_relative = 0.01f, + float scale_factor = 1.25f, + int n_scales = -1); + + CV_WRAP virtual void setMinArea(int minArea) = 0; + CV_WRAP virtual int getMinArea() const = 0; + CV_WRAP virtual void setMaxAreaRelative(float maxArea) = 0; + CV_WRAP virtual float getMaxAreaRelative() const = 0; + CV_WRAP virtual void setScaleFactor(float scale_factor) = 0; + CV_WRAP virtual float getScaleFactor() const = 0; + CV_WRAP virtual void setNScales(int n_scales) = 0; + CV_WRAP virtual int getNScales() const = 0; +}; + +/** @brief Estimates cornerness for prespecified KeyPoints using the FAST algorithm + +@param image grayscale image where keypoints (corners) are detected. +@param keypoints keypoints which should be tested to fit the FAST criteria. Keypoints not being +detected as corners are removed. +@param threshold threshold on difference between intensity of the central pixel and pixels of a +circle around this pixel. +@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners +(keypoints). +@param type one of the three neighborhoods as defined in the paper: +FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, +FastFeatureDetector::TYPE_5_8 + +Detects corners using the FAST algorithm by @cite Rosten06 . + */ +CV_EXPORTS void FASTForPointSet( InputArray image, CV_IN_OUT std::vector& keypoints, + int threshold, bool nonmaxSuppression=true, cv::FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16); + + +//! @} + + +//! @addtogroup xfeatures2d_match +//! @{ + +/** @brief GMS (Grid-based Motion Statistics) feature matching strategy described in @cite Bian2017gms . + @param size1 Input size of image1. + @param size2 Input size of image2. + @param keypoints1 Input keypoints of image1. + @param keypoints2 Input keypoints of image2. + @param matches1to2 Input 1-nearest neighbor matches. + @param matchesGMS Matches returned by the GMS matching strategy. + @param withRotation Take rotation transformation into account. + @param withScale Take scale transformation into account. + @param thresholdFactor The higher, the less matches. + @note + Since GMS works well when the number of features is large, we recommend to use the ORB feature and set FastThreshold to 0 to get as many as possible features quickly. + If matching results are not satisfying, please add more features. (We use 10000 for images with 640 X 480). + If your images have big rotation and scale changes, please set withRotation or withScale to true. + */ +CV_EXPORTS_W void matchGMS(const Size& size1, const Size& size2, const std::vector& keypoints1, const std::vector& keypoints2, + const std::vector& matches1to2, CV_OUT std::vector& matchesGMS, const bool withRotation = false, + const bool withScale = false, const double thresholdFactor = 6.0); + +/** @brief LOGOS (Local geometric support for high-outlier spatial verification) feature matching strategy described in @cite Lowry2018LOGOSLG . + @param keypoints1 Input keypoints of image1. + @param keypoints2 Input keypoints of image2. + @param nn1 Index to the closest BoW centroid for each descriptors of image1. + @param nn2 Index to the closest BoW centroid for each descriptors of image2. + @param matches1to2 Matches returned by the LOGOS matching strategy. + @note + This matching strategy is suitable for features matching against large scale database. + First step consists in constructing the bag-of-words (BoW) from a representative image database. + Image descriptors are then represented by their closest codevector (nearest BoW centroid). + */ +CV_EXPORTS_W void matchLOGOS(const std::vector& keypoints1, const std::vector& keypoints2, + const std::vector& nn1, const std::vector& nn2, + std::vector& matches1to2); + +//! @} + +} +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d/cuda.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d/cuda.hpp new file mode 100644 index 00000000..ea4a3238 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d/cuda.hpp @@ -0,0 +1,202 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_XFEATURES2D_CUDA_HPP__ +#define __OPENCV_XFEATURES2D_CUDA_HPP__ + +#include "opencv2/core/cuda.hpp" + +namespace cv { namespace cuda { + +//! @addtogroup xfeatures2d_nonfree +//! @{ + +/** @brief Class used for extracting Speeded Up Robust Features (SURF) from an image. : + +The class SURF_CUDA implements Speeded Up Robust Features descriptor. There is a fast multi-scale +Hessian keypoint detector that can be used to find the keypoints (which is the default option). But +the descriptors can also be computed for the user-specified keypoints. Only 8-bit grayscale images +are supported. + +The class SURF_CUDA can store results in the GPU and CPU memory. It provides functions to convert +results between CPU and GPU version ( uploadKeypoints, downloadKeypoints, downloadDescriptors ). The +format of CPU results is the same as SURF results. GPU results are stored in GpuMat. The keypoints +matrix is \f$\texttt{nFeatures} \times 7\f$ matrix with the CV_32FC1 type. + +- keypoints.ptr\(X_ROW)[i] contains x coordinate of the i-th feature. +- keypoints.ptr\(Y_ROW)[i] contains y coordinate of the i-th feature. +- keypoints.ptr\(LAPLACIAN_ROW)[i] contains the laplacian sign of the i-th feature. +- keypoints.ptr\(OCTAVE_ROW)[i] contains the octave of the i-th feature. +- keypoints.ptr\(SIZE_ROW)[i] contains the size of the i-th feature. +- keypoints.ptr\(ANGLE_ROW)[i] contain orientation of the i-th feature. +- keypoints.ptr\(HESSIAN_ROW)[i] contains the response of the i-th feature. + +The descriptors matrix is \f$\texttt{nFeatures} \times \texttt{descriptorSize}\f$ matrix with the +CV_32FC1 type. + +The class SURF_CUDA uses some buffers and provides access to it. All buffers can be safely released +between function calls. + +@sa SURF + +@note + - An example for using the SURF keypoint matcher on GPU can be found at + opencv_source_code/samples/gpu/surf_keypoint_matcher.cpp + + */ +class CV_EXPORTS_W SURF_CUDA +{ +public: + enum KeypointLayout + { + X_ROW = 0, + Y_ROW, + LAPLACIAN_ROW, + OCTAVE_ROW, + SIZE_ROW, + ANGLE_ROW, + HESSIAN_ROW, + ROWS_COUNT + }; + + //! the default constructor + SURF_CUDA(); + //! the full constructor taking all the necessary parameters + explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4, + int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false); + + /** + @param _hessianThreshold Threshold for hessian keypoint detector used in SURF. + @param _nOctaves Number of pyramid octaves the keypoint detector will use. + @param _nOctaveLayers Number of octave layers within each octave. + @param _extended Extended descriptor flag (true - use extended 128-element descriptors; false - use + 64-element descriptors). + @param _keypointsRatio + @param _upright Up-right or rotated features flag (true - do not compute orientation of features; + false - compute orientation). + */ + CV_WRAP static Ptr create(double _hessianThreshold, int _nOctaves = 4, + int _nOctaveLayers = 2, bool _extended = false, float _keypointsRatio = 0.01f, bool _upright = false); + + //! returns the descriptor size in float's (64 or 128) + CV_WRAP int descriptorSize() const; + //! returns the default norm type + CV_WRAP int defaultNorm() const; + + //! upload host keypoints to device memory + void uploadKeypoints(const std::vector& keypoints, GpuMat& keypointsGPU); + //! download keypoints from device to host memory + CV_WRAP void downloadKeypoints(const GpuMat& keypointsGPU, CV_OUT std::vector& keypoints); + + //! download descriptors from device to host memory + void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector& descriptors); + + //! finds the keypoints using fast hessian detector used in SURF + //! supports CV_8UC1 images + //! keypoints will have nFeature cols and 6 rows + //! keypoints.ptr(X_ROW)[i] will contain x coordinate of i'th feature + //! keypoints.ptr(Y_ROW)[i] will contain y coordinate of i'th feature + //! keypoints.ptr(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature + //! keypoints.ptr(OCTAVE_ROW)[i] will contain octave of i'th feature + //! keypoints.ptr(SIZE_ROW)[i] will contain size of i'th feature + //! keypoints.ptr(ANGLE_ROW)[i] will contain orientation of i'th feature + //! keypoints.ptr(HESSIAN_ROW)[i] will contain response of i'th feature + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints); + //! finds the keypoints and computes their descriptors. + //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + /** @brief Finds the keypoints using fast hessian detector used in SURF + + @param img Source image, currently supports only CV_8UC1 images. + @param mask A mask image same size as src and of type CV_8UC1. + @param keypoints Detected keypoints. + */ + CV_WRAP inline void detect(const GpuMat& img, const GpuMat& mask, CV_OUT GpuMat& keypoints) { + (*this)(img, mask, keypoints); + } + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints); + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + /** @brief Finds the keypoints and computes their descriptors using fast hessian detector used in SURF + + @param img Source image, currently supports only CV_8UC1 images. + @param mask A mask image same size as src and of type CV_8UC1. + @param keypoints Detected keypoints. + @param descriptors Keypoint descriptors. + @param useProvidedKeypoints Compute descriptors for the user-provided keypoints and recompute keypoints direction. + */ + CV_WRAP inline void detectWithDescriptors(const GpuMat& img, const GpuMat& mask, CV_OUT GpuMat& keypoints, CV_OUT GpuMat& descriptors, + bool useProvidedKeypoints = false) { + (*this)(img, mask, keypoints, descriptors, useProvidedKeypoints); + } + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, std::vector& descriptors, + bool useProvidedKeypoints = false); + + void releaseMemory(); + + // SURF parameters + CV_PROP double hessianThreshold; + CV_PROP int nOctaves; + CV_PROP int nOctaveLayers; + CV_PROP bool extended; + CV_PROP bool upright; + + //! max keypoints = min(keypointsRatio * img.size().area(), 65535) + CV_PROP float keypointsRatio; + + GpuMat sum, mask1, maskSum; + + GpuMat det, trace; + + GpuMat maxPosBuffer; +}; + +//! @} + +}} // namespace cv { namespace cuda { + +#endif // __OPENCV_XFEATURES2D_CUDA_HPP__ diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp new file mode 100644 index 00000000..8eb11aa6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/include/opencv2/xfeatures2d/nonfree.hpp @@ -0,0 +1,124 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_XFEATURES2D_FEATURES_2D_HPP__ +#define __OPENCV_XFEATURES2D_FEATURES_2D_HPP__ + +#include "opencv2/features2d.hpp" + +namespace cv +{ +namespace xfeatures2d +{ + +/** @brief Class for extracting Speeded Up Robust Features from an image @cite Bay06 . + +The algorithm parameters: +- member int extended + - 0 means that the basic descriptors (64 elements each) shall be computed + - 1 means that the extended descriptors (128 elements each) shall be computed +- member int upright + - 0 means that detector computes orientation of each feature. + - 1 means that the orientation is not computed (which is much, much faster). For example, +if you match images from a stereo pair, or do image stitching, the matched features +likely have very similar angles, and you can speed up feature extraction by setting +upright=1. +- member double hessianThreshold +Threshold for the keypoint detector. Only features, whose hessian is larger than +hessianThreshold are retained by the detector. Therefore, the larger the value, the less +keypoints you will get. A good default value could be from 300 to 500, depending from the +image contrast. +- member int nOctaves +The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. +If you want to get very large features, use the larger value. If you want just small +features, decrease it. +- member int nOctaveLayers +The number of images within each octave of a gaussian pyramid. It is set to 2 by default. +@note + - An example using the SURF feature detector can be found at + opencv_source_code/samples/cpp/generic_descriptor_match.cpp + - Another example using the SURF feature detector, extractor and matcher can be found at + opencv_source_code/samples/cpp/matcher_simple.cpp + */ +class CV_EXPORTS_W SURF : public Feature2D +{ +public: + /** + @param hessianThreshold Threshold for hessian keypoint detector used in SURF. + @param nOctaves Number of pyramid octaves the keypoint detector will use. + @param nOctaveLayers Number of octave layers within each octave. + @param extended Extended descriptor flag (true - use extended 128-element descriptors; false - use + 64-element descriptors). + @param upright Up-right or rotated features flag (true - do not compute orientation of features; + false - compute orientation). + */ + CV_WRAP static Ptr create(double hessianThreshold=100, + int nOctaves = 4, int nOctaveLayers = 3, + bool extended = false, bool upright = false); + + CV_WRAP virtual void setHessianThreshold(double hessianThreshold) = 0; + CV_WRAP virtual double getHessianThreshold() const = 0; + + CV_WRAP virtual void setNOctaves(int nOctaves) = 0; + CV_WRAP virtual int getNOctaves() const = 0; + + CV_WRAP virtual void setNOctaveLayers(int nOctaveLayers) = 0; + CV_WRAP virtual int getNOctaveLayers() const = 0; + + CV_WRAP virtual void setExtended(bool extended) = 0; + CV_WRAP virtual bool getExtended() const = 0; + + CV_WRAP virtual void setUpright(bool upright) = 0; + CV_WRAP virtual bool getUpright() const = 0; + + CV_WRAP String getDefaultName() const CV_OVERRIDE; +}; + +typedef SURF SurfFeatureDetector; +typedef SURF SurfDescriptorExtractor; + +//! @} + +} +} /* namespace cv */ + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/DAISYDescriptorExtractorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/DAISYDescriptorExtractorTest.java new file mode 100644 index 00000000..97d3063c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/DAISYDescriptorExtractorTest.java @@ -0,0 +1,67 @@ +package org.opencv.test.features2d; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.xfeatures2d.DAISY; + +public class DAISYDescriptorExtractorTest extends OpenCVTestCase { + + DAISY extractor; + + @Override + protected void setUp() throws Exception { + super.setUp(); + extractor = DAISY.create(); // default (15, 3, 8, 8, 100, noArray, true, false) + } + + public void testCreate() { + assertNotNull(extractor); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPointMat() { + fail("Not yet implemented"); + } + + public void testEmpty() { + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.DAISY\"\nradius: 16.\nq_radius: 4\nq_theta: 9\nq_hist: 10\nnorm_type: 101\nenable_interpolation: 0\nuse_orientation: 1\n"); + + extractor.read(filename); + + assertEquals(16.0f, extractor.getRadius()); + assertEquals(4, extractor.getQRadius()); + assertEquals(9, extractor.getQTheta()); + assertEquals(10, extractor.getQHist()); + assertEquals(101, extractor.getNorm()); + assertEquals(false, extractor.getInterpolation()); + assertEquals(true, extractor.getUseOrientation()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + extractor.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.DAISY\"\nradius: 15.\nq_radius: 3\nq_theta: 8\nq_hist: 8\nnorm_type: 100\nenable_interpolation: 1\nuse_orientation: 0\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/FREAKDescriptorExtractorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/FREAKDescriptorExtractorTest.java new file mode 100644 index 00000000..4793a487 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/FREAKDescriptorExtractorTest.java @@ -0,0 +1,64 @@ +package org.opencv.test.features2d; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.xfeatures2d.FREAK; + +public class FREAKDescriptorExtractorTest extends OpenCVTestCase { + + FREAK extractor; + + @Override + protected void setUp() throws Exception { + super.setUp(); + extractor = FREAK.create(); // default (true,true,22,4) + } + + public void testCreate() { + assertNotNull(extractor); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPointMat() { + fail("Not yet implemented"); + } + + public void testEmpty() { + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.FREAK\"\norientationNormalized: 0\nscaleNormalized: 0\npatternScale: 23.\nnOctaves: 5\n"); + + extractor.read(filename); + + assertEquals(false, extractor.getOrientationNormalized()); + assertEquals(false, extractor.getScaleNormalized()); + assertEquals(23.0, extractor.getPatternScale()); + assertEquals(5, extractor.getNOctaves()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + extractor.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.FREAK\"\norientationNormalized: 1\nscaleNormalized: 1\npatternScale: 22.\nnOctaves: 4\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/HARRISFeatureDetectorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/HARRISFeatureDetectorTest.java new file mode 100644 index 00000000..2f45a88b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/HARRISFeatureDetectorTest.java @@ -0,0 +1,65 @@ +package org.opencv.test.features2d; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.xfeatures2d.HarrisLaplaceFeatureDetector; + +public class HARRISFeatureDetectorTest extends OpenCVTestCase { + + HarrisLaplaceFeatureDetector detector; + + @Override + protected void setUp() throws Exception { + super.setUp(); + detector = HarrisLaplaceFeatureDetector.create(); // default constructor have (6, 0.01, 0.01, 5000, 4) + } + + public void testCreate() { + assertNotNull(detector); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPointMat() { + fail("Not yet implemented"); + } + + public void testEmpty() { + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.HARRIS-LAPLACE\"\nnumOctaves: 5\ncorn_thresh: 0.02\nDOG_thresh: 0.03\nmaxCorners: 4000\nnum_layers: 2\n"); + detector.read(filename); + + assertEquals(5, detector.getNumOctaves()); + assertEquals(0.02f, detector.getCornThresh()); + assertEquals(0.03f, detector.getDOGThresh()); + assertEquals(4000, detector.getMaxCorners()); + assertEquals(2, detector.getNumLayers()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + detector.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.HARRIS-LAPLACE\"\nnumOctaves: 6\ncorn_thresh: 9.9999997764825821e-03\nDOG_thresh: 9.9999997764825821e-03\nmaxCorners: 5000\nnum_layers: 4\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/LATCHDescriptorExtractorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/LATCHDescriptorExtractorTest.java new file mode 100644 index 00000000..10802d21 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/LATCHDescriptorExtractorTest.java @@ -0,0 +1,64 @@ +package org.opencv.test.features2d; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.xfeatures2d.LATCH; + +public class LATCHDescriptorExtractorTest extends OpenCVTestCase { + + LATCH extractor; + + @Override + protected void setUp() throws Exception { + super.setUp(); + extractor = LATCH.create(); // default (32,true,3,2.0) + } + + public void testCreate() { + assertNotNull(extractor); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPointMat() { + fail("Not yet implemented"); + } + + public void testEmpty() { + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.LATCH\"\ndescriptorSize: 64\nrotationInvariance: 0\nhalf_ssd_size: 5\nsigma: 3.\n"); + + extractor.read(filename); + + assertEquals(64, extractor.getBytes()); + assertEquals(false, extractor.getRotationInvariance()); + assertEquals(5, extractor.getHalfSSDsize()); + assertEquals(3.0, extractor.getSigma()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + extractor.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.LATCH\"\ndescriptorSize: 32\nrotationInvariance: 1\nhalf_ssd_size: 3\nsigma: 2.\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/LUCIDDescriptorExtractorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/LUCIDDescriptorExtractorTest.java new file mode 100644 index 00000000..295fc215 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/LUCIDDescriptorExtractorTest.java @@ -0,0 +1,62 @@ +package org.opencv.test.features2d; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.xfeatures2d.LUCID; + +public class LUCIDDescriptorExtractorTest extends OpenCVTestCase { + + LUCID extractor; + + @Override + protected void setUp() throws Exception { + super.setUp(); + extractor = LUCID.create(); // default (1,2) + } + + public void testCreate() { + assertNotNull(extractor); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPointMat() { + fail("Not yet implemented"); + } + + public void testEmpty() { + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.LUCID\"\nlucid_kernel: 2\nblur_kernel: 3\n"); + + extractor.read(filename); + + assertEquals(2, extractor.getLucidKernel()); + assertEquals(3, extractor.getBlurKernel()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + extractor.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.LUCID\"\nlucid_kernel: 1\nblur_kernel: 2\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/MSDFeatureDetectorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/MSDFeatureDetectorTest.java new file mode 100644 index 00000000..8e37e013 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/MSDFeatureDetectorTest.java @@ -0,0 +1,69 @@ +package org.opencv.test.features2d; + +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.xfeatures2d.MSDDetector; + +public class MSDFeatureDetectorTest extends OpenCVTestCase { + + MSDDetector detector; + + @Override + protected void setUp() throws Exception { + super.setUp(); + detector = MSDDetector.create(); // default (3,5,5,0,250.4,',1.25,-1,false) + } + + public void testCreate() { + assertNotNull(detector); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPointMat() { + fail("Not yet implemented"); + } + + public void testEmpty() { + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.MSD\"\npatch_radius: 4\nsearch_area_radius: 6\nnms_radius: 7\nnms_scale_radius: 1\nth_saliency: 251.\nkNN: 2\nscale_factor: 1.26\nn_scales: 3\ncompute_orientation: 1\n"); + + detector.read(filename); + + assertEquals(4, detector.getPatchRadius()); + assertEquals(6, detector.getSearchAreaRadius()); + assertEquals(7, detector.getNmsRadius()); + assertEquals(1, detector.getNmsScaleRadius()); + assertEquals(251.0f, detector.getThSaliency()); + assertEquals(2, detector.getKNN()); + assertEquals(1.26f, detector.getScaleFactor()); + assertEquals(3, detector.getNScales()); + assertEquals(true, detector.getComputeOrientation()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + detector.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.MSD\"\npatch_radius: 3\nsearch_area_radius: 5\nnms_radius: 5\nnms_scale_radius: 0\nth_saliency: 250.\nkNN: 4\nscale_factor: 1.2500000000000000e+00\nn_scales: -1\ncompute_orientation: 0\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/STARFeatureDetectorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/STARFeatureDetectorTest.java new file mode 100644 index 00000000..88d3dade --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/STARFeatureDetectorTest.java @@ -0,0 +1,128 @@ +package org.opencv.test.features2d; + +import java.util.Arrays; + +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.Point; +import org.opencv.core.Scalar; +import org.opencv.core.KeyPoint; +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.imgproc.Imgproc; +import org.opencv.xfeatures2d.StarDetector; + +public class STARFeatureDetectorTest extends OpenCVTestCase { + + StarDetector detector; + int matSize; + KeyPoint[] truth; + + private Mat getMaskImg() { + Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255)); + Mat right = mask.submat(0, matSize, matSize / 2, matSize); + right.setTo(new Scalar(0)); + return mask; + } + + private Mat getTestImg() { + Scalar color = new Scalar(0); + int center = matSize / 2; + int radius = 6; + int offset = 40; + + Mat img = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255)); + Imgproc.circle(img, new Point(center - offset, center), radius, color, -1); + Imgproc.circle(img, new Point(center + offset, center), radius, color, -1); + Imgproc.circle(img, new Point(center, center - offset), radius, color, -1); + Imgproc.circle(img, new Point(center, center + offset), radius, color, -1); + Imgproc.circle(img, new Point(center, center), radius, color, -1); + return img; + } + + protected void setUp() throws Exception { + super.setUp(); + detector = createClassInstance(XFEATURES2D+"StarDetector", DEFAULT_FACTORY, null, null); + matSize = 200; + truth = new KeyPoint[] { + new KeyPoint( 95, 80, 22, -1, 31.5957f, 0, -1), + new KeyPoint(105, 80, 22, -1, 31.5957f, 0, -1), + new KeyPoint( 80, 95, 22, -1, 31.5957f, 0, -1), + new KeyPoint(120, 95, 22, -1, 31.5957f, 0, -1), + new KeyPoint(100, 100, 8, -1, 30.f, 0, -1), + new KeyPoint( 80, 105, 22, -1, 31.5957f, 0, -1), + new KeyPoint(120, 105, 22, -1, 31.5957f, 0, -1), + new KeyPoint( 95, 120, 22, -1, 31.5957f, 0, -1), + new KeyPoint(105, 120, 22, -1, 31.5957f, 0, -1) + }; + } + + public void testCreate() { + assertNotNull(detector); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + fail("Not yet implemented"); + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + Mat img = getTestImg(); + MatOfKeyPoint keypoints = new MatOfKeyPoint(); + + detector.detect(img, keypoints); + + assertListKeyPointEquals(Arrays.asList(truth), keypoints.toList(), EPS); + } + + public void testDetectMatListOfKeyPointMat() { + Mat img = getTestImg(); + Mat mask = getMaskImg(); + MatOfKeyPoint keypoints = new MatOfKeyPoint(); + + detector.detect(img, keypoints, mask); + + assertListKeyPointEquals(Arrays.asList(truth[0], truth[2], truth[5], truth[7]), keypoints.toList(), EPS); + } + + public void testEmpty() { +// assertFalse(detector.empty()); + fail("Not yet implemented"); + } + + public void testReadYml() { + Mat img = getTestImg(); + + MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); + detector.detect(img, keypoints1); + + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.STAR\"\nmaxSize: 45\nresponseThreshold: 150\nlineThresholdProjected: 10\nlineThresholdBinarized: 8\nsuppressNonmaxSize: 5\n"); + detector.read(filename); + + assertEquals(45, detector.getMaxSize()); + assertEquals(150, detector.getResponseThreshold()); + assertEquals(10, detector.getLineThresholdProjected()); + assertEquals(8, detector.getLineThresholdBinarized()); + assertEquals(5, detector.getSuppressNonmaxSize()); + + MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); + detector.detect(img, keypoints2); + + assertTrue(keypoints2.total() <= keypoints1.total()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + detector.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.STAR\"\nmaxSize: 45\nresponseThreshold: 30\nlineThresholdProjected: 10\nlineThresholdBinarized: 8\nsuppressNonmaxSize: 5\n"; + assertEquals(truth, readFile(filename)); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/SURFDescriptorExtractorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/SURFDescriptorExtractorTest.java new file mode 100644 index 00000000..e2bac918 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/SURFDescriptorExtractorTest.java @@ -0,0 +1,115 @@ +package org.opencv.test.features2d; + +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.Point; +import org.opencv.core.Scalar; +import org.opencv.core.KeyPoint; +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.imgproc.Imgproc; +import org.opencv.xfeatures2d.SURF; + +public class SURFDescriptorExtractorTest extends OpenCVTestCase { + + SURF extractor; + int matSize; + + private Mat getTestImg() { + Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255)); + Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2); + Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2); + + return cross; + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + + Class[] cParams = {double.class, int.class, int.class, boolean.class, boolean.class}; + Object[] oValues = {100, 2, 4, true, false}; + extractor = createClassInstance(XFEATURES2D+"SURF", DEFAULT_FACTORY, cParams, oValues); + + matSize = 100; + } + + public void testComputeListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testComputeMatListOfKeyPointMat() { + KeyPoint point = new KeyPoint(55.775577545166016f, 44.224422454833984f, 16, 9.754629f, 8617.863f, 1, -1); + MatOfKeyPoint keypoints = new MatOfKeyPoint(point); + Mat img = getTestImg(); + Mat descriptors = new Mat(); + + extractor.compute(img, keypoints, descriptors); + + Mat truth = new Mat(1, 128, CvType.CV_32FC1) { + { + put(0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0.058821894, 0.058821894, -0.045962855, 0.046261817, 0.0085156476, + 0.0085754395, -0.0064509804, 0.0064509804, 0.00044069235, 0.00044069235, 0, 0, 0.00025723741, + 0.00025723741, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00025723741, 0.00025723741, -0.00044069235, + 0.00044069235, 0, 0, 0.36278215, 0.36278215, -0.24688604, 0.26173124, 0.052068226, 0.052662034, + -0.032815345, 0.032815345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0064523756, + 0.0064523756, 0.0082002236, 0.0088908644, -0.059001274, 0.059001274, 0.045789491, 0.04648013, + 0.11961588, 0.22789426, -0.01322381, 0.18291828, -0.14042182, 0.23973691, 0.073782086, 0.23769434, + -0.027880307, 0.027880307, 0.049587864, 0.049587864, -0.33991757, 0.33991757, 0.21437603, 0.21437603, + -0.0020763327, 0.0020763327, 0.006245892, 0.006245892, -0.04067041, 0.04067041, 0.019361559, + 0.019361559, 0, 0, -0.0035977389, 0.0035977389, 0, 0, -0.00099993451, 0.00099993451, 0.040670406, + 0.040670406, -0.019361559, 0.019361559, 0.006245892, 0.006245892, -0.0020763327, 0.0020763327, + -0.00034532088, 0.00034532088, 0, 0, 0, 0, 0.00034532088, 0.00034532088, -0.00099993451, + 0.00099993451, 0, 0, 0, 0, 0.0035977389, 0.0035977389 + ); + } + }; + + assertMatEqual(truth, descriptors, EPS); + } + + public void testCreate() { + assertNotNull(extractor); + } + + public void testDescriptorSize() { + assertEquals(128, extractor.descriptorSize()); + } + + public void testDescriptorType() { + assertEquals(CvType.CV_32F, extractor.descriptorType()); + } + + public void testEmpty() { +// assertFalse(extractor.empty()); + fail("Not yet implemented"); + } + + public void testReadYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + writeFile(filename, "%YAML:1.0\n---\nname: \"Feature2D.SURF\"\nhessianThreshold: 100.\nextended: 1\nupright: 0\nnOctaves: 2\nnOctaveLayers: 4\n"); + + extractor.read(filename); + + assertEquals(128, extractor.descriptorSize()); + assertEquals(true, extractor.getExtended()); + assertEquals(false, extractor.getUpright()); + assertEquals(2, extractor.getNOctaves()); + assertEquals(4, extractor.getNOctaveLayers()); + assertEquals(100., extractor.getHessianThreshold()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + extractor.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.SURF\"\nhessianThreshold: 100.\nextended: 1\nupright: 0\nnOctaves: 2\nnOctaveLayers: 4\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/SURFFeatureDetectorTest.java b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/SURFFeatureDetectorTest.java new file mode 100644 index 00000000..6e46371b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/java/test/SURFFeatureDetectorTest.java @@ -0,0 +1,176 @@ +package org.opencv.test.features2d; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import org.opencv.core.CvType; +import org.opencv.core.Mat; +import org.opencv.core.MatOfKeyPoint; +import org.opencv.core.Point; +import org.opencv.core.Scalar; +import org.opencv.core.KeyPoint; +import org.opencv.test.OpenCVTestCase; +import org.opencv.test.OpenCVTestRunner; +import org.opencv.imgproc.Imgproc; +import org.opencv.xfeatures2d.SURF; + +public class SURFFeatureDetectorTest extends OpenCVTestCase { + + SURF detector; + int matSize; + KeyPoint[] truth; + + private Mat getMaskImg() { + Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255)); + Mat right = mask.submat(0, matSize, matSize / 2, matSize); + right.setTo(new Scalar(0)); + return mask; + } + + private Mat getTestImg() { + Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255)); + Imgproc.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2); + Imgproc.line(cross, new Point(matSize / 2, 20), new Point(matSize / 2, matSize - 21), new Scalar(100), 2); + + return cross; + } + + private void order(List points) { + Collections.sort(points, new Comparator() { + public int compare(KeyPoint p1, KeyPoint p2) { + if (p1.angle < p2.angle) + return -1; + if (p1.angle > p2.angle) + return 1; + return 0; + } + }); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + detector = createClassInstance(XFEATURES2D + "SURF", DEFAULT_FACTORY, null, null); + matSize = 100; + truth = new KeyPoint[] { + new KeyPoint(55.775578f, 55.775578f, 16, 80.245735f, 8617.8633f, 0, -1), + new KeyPoint(44.224422f, 55.775578f, 16, 170.24574f, 8617.8633f, 0, -1), + new KeyPoint(44.224422f, 44.224422f, 16, 260.24573f, 8617.8633f, 0, -1), + new KeyPoint(55.775578f, 44.224422f, 16, 350.24573f, 8617.8633f, 0, -1) + }; + } + + public void testCreate() { + assertNotNull(detector); + } + + public void testDetectListOfMatListOfListOfKeyPoint() { + + setProperty(detector, "hessianThreshold", "double", 8000); + setProperty(detector, "nOctaves", "int", 3); + setProperty(detector, "nOctaveLayers", "int", 4); + setProperty(detector, "upright", "boolean", false); + setProperty(detector, "extended", "boolean", true); + + List keypoints = new ArrayList(); + Mat cross = getTestImg(); + List crosses = new ArrayList(3); + crosses.add(cross); + crosses.add(cross); + crosses.add(cross); + + detector.detect(crosses, keypoints); + + assertEquals(3, keypoints.size()); + + for (MatOfKeyPoint mkp : keypoints) { + List lkp = mkp.toList(); + order(lkp); + assertListKeyPointEquals(Arrays.asList(truth), lkp, EPS); + } + } + + public void testDetectListOfMatListOfListOfKeyPointListOfMat() { + fail("Not yet implemented"); + } + + public void testDetectMatListOfKeyPoint() { + + setProperty(detector, "hessianThreshold", "double", 8000); + setProperty(detector, "nOctaves", "int", 3); + setProperty(detector, "nOctaveLayers", "int", 4); + setProperty(detector, "upright", "boolean", false); + + MatOfKeyPoint keypoints = new MatOfKeyPoint(); + Mat cross = getTestImg(); + + detector.detect(cross, keypoints); + + List lkp = keypoints.toList(); + order(lkp); + assertListKeyPointEquals(Arrays.asList(truth), lkp, EPS); + } + + public void testDetectMatListOfKeyPointMat() { + + setProperty(detector, "hessianThreshold", "double", 8000); + setProperty(detector, "nOctaves", "int", 3); + setProperty(detector, "nOctaveLayers", "int", 4); + setProperty(detector, "upright", "boolean", false); + setProperty(detector, "extended", "boolean", true); + + Mat img = getTestImg(); + Mat mask = getMaskImg(); + MatOfKeyPoint keypoints = new MatOfKeyPoint(); + + detector.detect(img, keypoints, mask); + + List lkp = keypoints.toList(); + order(lkp); + assertListKeyPointEquals(Arrays.asList(truth[1], truth[2]), lkp, EPS); + } + + public void testEmpty() { +// assertFalse(detector.empty()); + fail("Not yet implemented"); + } + + public void testReadYml() { + Mat cross = getTestImg(); + + MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); + detector.detect(cross, keypoints1); + + String filename = OpenCVTestRunner.getTempFileName("xml"); + writeFile(filename, "\n\nFeature2D.SURF\n8000.\n1\n0\n3\n4\n\n"); + + detector.read(filename); + + assertEquals(128, detector.descriptorSize()); + assertEquals(8000., detector.getHessianThreshold()); + assertEquals(true, detector.getExtended()); + assertEquals(false, detector.getUpright()); + assertEquals(3, detector.getNOctaves()); + assertEquals(4, detector.getNOctaveLayers()); + + MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); + detector.detect(cross, keypoints2); + + assertTrue(keypoints2.total() <= keypoints1.total()); + } + + public void testWriteYml() { + String filename = OpenCVTestRunner.getTempFileName("yml"); + + detector.write(filename); + + String truth = "%YAML:1.0\n---\nname: \"Feature2D.SURF\"\nhessianThreshold: 100.\nextended: 0\nupright: 0\nnOctaves: 4\nnOctaveLayers: 3\n"; + String actual = readFile(filename); + actual = actual.replaceAll("e([+-])0(\\d\\d)", "e$1$2"); // NOTE: workaround for different platforms double representation + assertEquals(truth, actual); + } + +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/objc/gen_dict.json b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/objc/gen_dict.json new file mode 100644 index 00000000..5d278b92 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/objc/gen_dict.json @@ -0,0 +1,17 @@ +{ + "class_ignore_list" : [ + "SURF_CUDA" + ], + "AdditionalImports" : { + "*" : [ "\"xfeatures2d.hpp\"" ] + }, + "func_arg_fix" : { + "DAISY" : { + "create" : { "norm" : { "ctype" : "NormalizationType", + "defval" : "cv::xfeatures2d::DAISY::NRM_NONE"} } + }, + "PCTSignatures" : { + "(PCTSignatures*)create:(NSArray*)initSamplingPoints initSeedCount:(int)initSeedCount" : { "create" : {"name" : "create2"} } + } + } +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/pyopencv_sift.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/pyopencv_sift.hpp new file mode 100644 index 00000000..2a15f5ef --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/pyopencv_sift.hpp @@ -0,0 +1,2 @@ +// Compatibility +#include "shadow_sift.hpp" diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/pyopencv_xfeatures2d.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/pyopencv_xfeatures2d.hpp new file mode 100644 index 00000000..17d90674 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/pyopencv_xfeatures2d.hpp @@ -0,0 +1,7 @@ +#ifdef HAVE_OPENCV_XFEATURES2D + +#include "opencv2/xfeatures2d.hpp" +using cv::xfeatures2d::DAISY; + +typedef DAISY::NormalizationType DAISY_NormalizationType; +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/shadow_sift.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/shadow_sift.hpp new file mode 100644 index 00000000..d7fd2324 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/shadow_sift.hpp @@ -0,0 +1,20 @@ +// Compatibility +// SIFT is moved to the main repository + +namespace cv { +namespace xfeatures2d { + +/** Use cv.SIFT_create() instead */ +CV_WRAP static inline +Ptr SIFT_create(int nfeatures = 0, int nOctaveLayers = 3, + double contrastThreshold = 0.04, double edgeThreshold = 10, + double sigma = 1.6) +{ + CV_LOG_ONCE_WARNING(NULL, "DEPRECATED: cv.xfeatures2d.SIFT_create() is deprecated due SIFT tranfer to the main repository. " + "https://github.com/opencv/opencv/issues/16736" + ); + + return SIFT::create(nfeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma); +} + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_cuda_xfeatures2d.py b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_cuda_xfeatures2d.py new file mode 100644 index 00000000..16c8e565 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_cuda_xfeatures2d.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +import os +import cv2 as cv +import numpy as np + +from tests_common import NewOpenCVTests, unittest + +class xfeatures2d_test(NewOpenCVTests): + def setUp(self): + super(xfeatures2d_test, self).setUp() + if not cv.cuda.getCudaEnabledDeviceCount(): + self.skipTest("No CUDA-capable device is detected") + + @unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ, + "OPENCV_TEST_DATA_PATH is not defined") + def test_surf(self): + img_path = os.environ['OPENCV_TEST_DATA_PATH'] + "/gpu/features2d/aloe.png" + hessianThreshold = 100 + nOctaves = 3 + nOctaveLayers = 2 + extended = False + keypointsRatio = 0.05 + upright = False + + npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2GRAY) + cuMat = cv.cuda_GpuMat(npMat) + + try: + cuSurf = cv.cuda_SURF_CUDA.create(hessianThreshold,nOctaves,nOctaveLayers,extended,keypointsRatio,upright) + surf = cv.xfeatures2d_SURF.create(hessianThreshold,nOctaves,nOctaveLayers,extended,upright) + except cv.error as e: + self.assertEqual(e.code, cv.Error.StsNotImplemented) + self.skipTest("OPENCV_ENABLE_NONFREE is not enabled in this build.") + + cuKeypoints = cuSurf.detect(cuMat,cv.cuda_GpuMat()) + keypointsHost = cuSurf.downloadKeypoints(cuKeypoints) + keypoints = surf.detect(npMat) + self.assertTrue(len(keypointsHost) == len(keypoints)) + + cuKeypoints, cuDescriptors = cuSurf.detectWithDescriptors(cuMat,cv.cuda_GpuMat(),cuKeypoints,useProvidedKeypoints=True) + keypointsHost = cuSurf.downloadKeypoints(cuKeypoints) + descriptorsHost = cuDescriptors.download() + keypoints, descriptors = surf.compute(npMat,keypoints) + + self.assertTrue(len(keypointsHost) == len(keypoints) and descriptorsHost.shape == descriptors.shape) + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_descriptors.py b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_descriptors.py new file mode 100644 index 00000000..ca8bbcbc --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_descriptors.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +# Python 2/3 compatibility +from __future__ import print_function + +import os +import numpy as np +import cv2 as cv + +from tests_common import NewOpenCVTests + +class MSDDetector_test(NewOpenCVTests): + + def test_create(self): + + msd = cv.xfeatures2d.MSDDetector_create() + self.assertFalse(msd is None) + + img1 = np.zeros((100, 100, 3), dtype=np.uint8) + kp1_ = msd.detect(img1, None) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_sift_compatibility.py b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_sift_compatibility.py new file mode 100644 index 00000000..96e22094 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/misc/python/test/test_sift_compatibility.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +# Python 2/3 compatibility +from __future__ import print_function + +import os +import numpy as np +import cv2 as cv + +from tests_common import NewOpenCVTests + +class sift_compatibility_test(NewOpenCVTests): + + def test_create(self): + + sift = cv.xfeatures2d.SIFT_create() + self.assertFalse(sift is None) + + img1 = np.zeros((100, 100, 3), dtype=np.uint8) + kp1_, des1_ = sift.detectAndCompute(img1, None) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_beblid.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_beblid.cpp new file mode 100644 index 00000000..2e2d3eb5 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_beblid.cpp @@ -0,0 +1,36 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam beblid; + +#define BEBLID_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +#ifdef OPENCV_ENABLE_NONFREE +PERF_TEST_P(beblid, extract, testing::Values(BEBLID_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr detector = SURF::create(); + vector points; + detector->detect(frame, points, mask); + + Ptr descriptor = BEBLID::create(6.25f); + cv::Mat descriptors; + TEST_CYCLE() descriptor->compute(frame, points, descriptors); + + SANITY_CHECK_NOTHING(); +} +#endif // NONFREE + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_daisy.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_daisy.cpp new file mode 100644 index 00000000..154d8eb1 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_daisy.cpp @@ -0,0 +1,33 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam daisy; + +#define DAISY_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +PERF_TEST_P(daisy, extract, testing::Values(DAISY_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr descriptor = DAISY::create(); + + vector points; + Mat_ descriptors; + // compute all daisies in image + TEST_CYCLE() descriptor->compute(frame, descriptors); + + SANITY_CHECK_NOTHING(); +} + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_latch.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_latch.cpp new file mode 100644 index 00000000..ac5dcd73 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_latch.cpp @@ -0,0 +1,36 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam latch; + +#define LATCH_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +#ifdef OPENCV_ENABLE_NONFREE +PERF_TEST_P(latch, extract, testing::Values(LATCH_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr detector = SURF::create(); + vector points; + detector->detect(frame, points, mask); + + Ptr descriptor = LATCH::create(); + vector descriptors; + TEST_CYCLE() descriptor->compute(frame, points, descriptors); + + SANITY_CHECK_NOTHING(); +} +#endif // NONFREE + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_main.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_main.cpp new file mode 100644 index 00000000..2c3cc5aa --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_main.cpp @@ -0,0 +1,14 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" +#include "opencv2/ts/cuda_perf.hpp" + +static const char * impls[] = { +#ifdef HAVE_CUDA + "cuda", +#endif + "plain" +}; + +CV_PERF_TEST_MAIN_WITH_IMPLS(xfeatures2d, impls, perf::printCudaInfo()) diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_msd.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_msd.cpp new file mode 100644 index 00000000..0505d692 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_msd.cpp @@ -0,0 +1,33 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam msd; + +#define MSD_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +PERF_TEST_P(msd, detect, testing::Values(MSD_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + + if (frame.empty()) + FAIL() << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame); + Ptr detector = MSDDetector::create(); + vector points; + + TEST_CYCLE() detector->detect(frame, points, mask); + + sort(points.begin(), points.end(), comparators::KeypointGreater()); + SANITY_CHECK_KEYPOINTS(points, 1e-3); +} + +}} // namespace \ No newline at end of file diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_precomp.hpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_precomp.hpp new file mode 100644 index 00000000..249d0798 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_precomp.hpp @@ -0,0 +1,25 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#ifndef __OPENCV_PERF_PRECOMP_HPP__ +#define __OPENCV_PERF_PRECOMP_HPP__ + +#include "cvconfig.h" + +#include "opencv2/ts.hpp" +#include "opencv2/xfeatures2d.hpp" + +#ifdef HAVE_OPENCV_OCL +# include "opencv2/ocl.hpp" +#endif + +#ifdef HAVE_CUDA +# include "opencv2/xfeatures2d/cuda.hpp" +#endif + +namespace opencv_test { +using namespace cv::xfeatures2d; +using namespace perf; +} + +#endif diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.cpp new file mode 100644 index 00000000..814fb1da --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.cpp @@ -0,0 +1,68 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +#ifdef OPENCV_ENABLE_NONFREE +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam surf; + +#define SURF_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +PERF_TEST_P(surf, detect, testing::Values(SURF_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + Ptr detector = SURF::create(); + vector points; + + TEST_CYCLE() detector->detect(frame, points, mask); + + SANITY_CHECK_NOTHING(); +} + +PERF_TEST_P(surf, extract, testing::Values(SURF_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr detector = SURF::create(); + vector points; + Mat descriptors; + detector->detect(frame, points, mask); + + TEST_CYCLE() detector->compute(frame, points, descriptors); + + SANITY_CHECK_NOTHING(); +} + +PERF_TEST_P(surf, full, testing::Values(SURF_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + Ptr detector = SURF::create(); + vector points; + Mat descriptors; + + TEST_CYCLE() detector->detectAndCompute(frame, mask, points, descriptors, false); + + SANITY_CHECK_NOTHING(); +} + +}} // namespace +#endif // NONFREE diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.cuda.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.cuda.cpp new file mode 100644 index 00000000..7421642c --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.cuda.cpp @@ -0,0 +1,102 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "perf_precomp.hpp" + +#if defined(HAVE_CUDA) && defined(OPENCV_ENABLE_NONFREE) + +#include "opencv2/ts/cuda_perf.hpp" + +namespace opencv_test { namespace { + +////////////////////////////////////////////////////////////////////// +// SURF + +#ifdef HAVE_OPENCV_CUDAARITHM + +DEF_PARAM_TEST_1(Image, string); + +PERF_TEST_P(Image, CUDA_SURF, + Values("gpu/perf/aloe.png")) +{ + declare.time(50.0); + + const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE); + ASSERT_FALSE(img.empty()); + + if (PERF_RUN_CUDA()) + { + cv::cuda::SURF_CUDA d_surf; + + const cv::cuda::GpuMat d_img(img); + cv::cuda::GpuMat d_keypoints, d_descriptors; + + TEST_CYCLE() d_surf(d_img, cv::cuda::GpuMat(), d_keypoints, d_descriptors); + + std::vector gpu_keypoints; + d_surf.downloadKeypoints(d_keypoints, gpu_keypoints); + + cv::Mat gpu_descriptors(d_descriptors); + + sortKeyPoints(gpu_keypoints, gpu_descriptors); + + SANITY_CHECK_KEYPOINTS(gpu_keypoints); + SANITY_CHECK(gpu_descriptors, 1e-3); + } + else + { + cv::Ptr surf = cv::xfeatures2d::SURF::create(); + std::vector cpu_keypoints; + cv::Mat cpu_descriptors; + + TEST_CYCLE() surf->detect(img, cpu_keypoints); + TEST_CYCLE() surf->compute(img, cpu_keypoints, cpu_descriptors); + + SANITY_CHECK_KEYPOINTS(cpu_keypoints); + SANITY_CHECK(cpu_descriptors); + } +} + +#endif // HAVE_OPENCV_CUDAARITHM + +}} // namespace +#endif // HAVE_CUDA && OPENCV_ENABLE_NONFREE diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.ocl.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.ocl.cpp new file mode 100644 index 00000000..2ba92a97 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_surf.ocl.cpp @@ -0,0 +1,108 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved. +// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// @Authors +// Peng Xiao, pengxiao@multicorewareinc.com +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "perf_precomp.hpp" + +#if defined(HAVE_OPENCV_OCL) && defined(OPENCV_ENABLE_NONFREE) + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam OCL_SURF; + +#define SURF_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +PERF_TEST_P(OCL_SURF, DISABLED_with_data_transfer, testing::Values(SURF_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat img = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(img.empty()); + + SURF_OCL d_surf; + oclMat d_keypoints; + oclMat d_descriptors; + Mat cpu_kp; + Mat cpu_dp; + + declare.time(60); + + TEST_CYCLE() + { + oclMat d_src(img); + + d_surf(d_src, oclMat(), d_keypoints, d_descriptors); + + d_keypoints.download(cpu_kp); + d_descriptors.download(cpu_dp); + } + + SANITY_CHECK_NOTHING(); +} + +PERF_TEST_P(OCL_SURF, DISABLED_without_data_transfer, testing::Values(SURF_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat img = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(img.empty()); + + SURF_OCL d_surf; + oclMat d_keypoints; + oclMat d_descriptors; + oclMat d_src(img); + + declare.time(60); + + TEST_CYCLE() d_surf(d_src, oclMat(), d_keypoints, d_descriptors); + + Mat cpu_kp; + Mat cpu_dp; + d_keypoints.download(cpu_kp); + d_descriptors.download(cpu_dp); + SANITY_CHECK_NOTHING(); +} + +}} // namespace +#endif // HAVE_OPENCV_OCL && OPENCV_ENABLE_NONFREE diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_teblid.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_teblid.cpp new file mode 100644 index 00000000..01a84336 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_teblid.cpp @@ -0,0 +1,36 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +typedef perf::TestBaseWithParam teblid; + +#define TEBLID_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +#ifdef OPENCV_ENABLE_NONFREE +PERF_TEST_P(teblid, extract, testing::Values(TEBLID_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr detector = SURF::create(); + vector points; + detector->detect(frame, points, mask); + + Ptr descriptor = TEBLID::create(6.25f); + cv::Mat descriptors; + TEST_CYCLE() descriptor->compute(frame, points, descriptors); + + SANITY_CHECK_NOTHING(); +} +#endif // NONFREE + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_vgg.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_vgg.cpp new file mode 100644 index 00000000..095a745d --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/perf/perf_vgg.cpp @@ -0,0 +1,39 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +#include "perf_precomp.hpp" + +namespace opencv_test { namespace { + +#ifdef OPENCV_XFEATURES2D_HAS_VGG_DATA + +typedef perf::TestBaseWithParam vgg; + +#define VGG_IMAGES \ + "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\ + "stitching/a3.png" + +PERF_TEST_P(vgg, extract, testing::Values(VGG_IMAGES)) +{ + string filename = getDataPath(GetParam()); + Mat frame = imread(filename, IMREAD_GRAYSCALE); + ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename; + + Mat mask; + declare.in(frame).time(90); + + Ptr detector = KAZE::create(); + vector points; + detector->detect(frame, points, mask); + + Ptr descriptor = VGG::create(); + Mat_ descriptors; + // compute keypoints descriptor + TEST_CYCLE() descriptor->compute(frame, points, descriptors); + + SANITY_CHECK_NOTHING(); +} + +#endif // OPENCV_XFEATURES2D_HAS_VGG_DATA + +}} // namespace diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/bagofwords_classification.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/bagofwords_classification.cpp new file mode 100644 index 00000000..222c949b --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/bagofwords_classification.cpp @@ -0,0 +1,2646 @@ +#include +#include "opencv2/opencv_modules.hpp" + +#ifdef HAVE_OPENCV_ML + +#include "opencv2/imgcodecs.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/xfeatures2d.hpp" +#include "opencv2/ml.hpp" + +#include +#include +#include + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#undef min +#undef max +#include "sys/types.h" +#endif +#include + +#define DEBUG_DESC_PROGRESS + +using namespace cv; +using namespace cv::xfeatures2d; +using namespace cv::ml; +using namespace std; + +const string paramsFile = "params.xml"; +const string vocabularyFile = "vocabulary.xml.gz"; +const string bowImageDescriptorsDir = "/bowImageDescriptors"; +const string svmsDir = "/svms"; +const string plotsDir = "/plots"; + +static void help(char** argv) +{ + cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n" + << "It shows how to use detectors, descriptors and recognition methods \n" + "Using OpenCV version %s\n" << CV_VERSION << "\n" + << "Call: \n" + << "Format:\n ./" << argv[0] << " [VOC path] [result directory] \n" + << " or: \n" + << " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n" + << "\n" + << "Input parameters: \n" + << "[VOC path] Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). Note: VOC2007-VOC2010 are supported. \n" + << "[result directory] Path to result diractory. Following folders will be created in [result directory]: \n" + << " bowImageDescriptors - to store image descriptors, \n" + << " svms - to store trained svms, \n" + << " plots - to store files for plots creating. \n" + << "[feature detector] Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n" + << " Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS \n" + << "[descriptor extractor] Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n" + << " Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF \n" + << "[descriptor matcher] Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n" + << " Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT \n" + << "\n"; +} + +static void makeDir( const string& dir ) +{ +#if defined WIN32 || defined _WIN32 + CreateDirectory( dir.c_str(), 0 ); +#else + mkdir( dir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH ); +#endif +} + +static void makeUsedDirs( const string& rootPath ) +{ + makeDir(rootPath + bowImageDescriptorsDir); + makeDir(rootPath + svmsDir); + makeDir(rootPath + plotsDir); +} + +/****************************************************************************************\ +* Classes to work with PASCAL VOC dataset * +\****************************************************************************************/ +// +// TODO: refactor this part of the code +// + + +//used to specify the (sub-)dataset over which operations are performed +enum ObdDatasetType {CV_OBD_TRAIN, CV_OBD_TEST}; + +class ObdObject +{ +public: + string object_class; + Rect boundingBox; +}; + +//extended object data specific to VOC +enum VocPose {CV_VOC_POSE_UNSPECIFIED, CV_VOC_POSE_FRONTAL, CV_VOC_POSE_REAR, CV_VOC_POSE_LEFT, CV_VOC_POSE_RIGHT}; +class VocObjectData +{ +public: + bool difficult; + bool occluded; + bool truncated; + VocPose pose; +}; +//enum VocDataset {CV_VOC2007, CV_VOC2008, CV_VOC2009, CV_VOC2010}; +enum VocPlotType {CV_VOC_PLOT_SCREEN, CV_VOC_PLOT_PNG}; +enum VocGT {CV_VOC_GT_NONE, CV_VOC_GT_DIFFICULT, CV_VOC_GT_PRESENT}; +enum VocConfCond {CV_VOC_CCOND_RECALL, CV_VOC_CCOND_SCORETHRESH}; +enum VocTask {CV_VOC_TASK_CLASSIFICATION, CV_VOC_TASK_DETECTION}; + +class ObdImage +{ +public: + ObdImage(string p_id, string p_path) : id(p_id), path(p_path) {} + string id; + string path; +}; + +//used by getDetectorGroundTruth to sort a two dimensional list of floats in descending order +class ObdScoreIndexSorter +{ +public: + float score; + int image_idx; + int obj_idx; + bool operator < (const ObdScoreIndexSorter& compare) const {return (score < compare.score);} +}; + +class VocData +{ +public: + VocData( const string& vocPath, bool useTestDataset ) + { initVoc( vocPath, useTestDataset ); } + ~VocData(){} + /* functions for returning classification/object data for multiple images given an object class */ + void getClassImages(const string& obj_class, const ObdDatasetType dataset, vector& images, vector& object_present); + void getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector& images, vector >& objects); + void getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector& images, vector >& objects, vector >& object_data, vector& ground_truth); + /* functions for returning object data for a single image given an image id */ + ObdImage getObjects(const string& id, vector& objects); + ObdImage getObjects(const string& id, vector& objects, vector& object_data); + ObdImage getObjects(const string& obj_class, const string& id, vector& objects, vector& object_data, VocGT& ground_truth); + /* functions for returning the ground truth (present/absent) for groups of images */ + void getClassifierGroundTruth(const string& obj_class, const vector& images, vector& ground_truth); + void getClassifierGroundTruth(const string& obj_class, const vector& images, vector& ground_truth); + int getDetectorGroundTruth(const string& obj_class, const ObdDatasetType dataset, const vector& images, const vector >& bounding_boxes, const vector >& scores, vector >& ground_truth, vector >& detection_difficult, bool ignore_difficult = true); + /* functions for writing VOC-compatible results files */ + void writeClassifierResultsFile(const string& out_dir, const string& obj_class, const ObdDatasetType dataset, const vector& images, const vector& scores, const int competition = 1, const bool overwrite_ifexists = false); + /* functions for calculating metrics from a set of classification/detection results */ + string getResultsFilename(const string& obj_class, const VocTask task, const ObdDatasetType dataset, const int competition = -1, const int number = -1); + void calcClassifierPrecRecall(const string& obj_class, const vector& images, const vector& scores, vector& precision, vector& recall, float& ap, vector& ranking); + void calcClassifierPrecRecall(const string& obj_class, const vector& images, const vector& scores, vector& precision, vector& recall, float& ap); + void calcClassifierPrecRecall(const string& input_file, vector& precision, vector& recall, float& ap, bool outputRankingFile = false); + /* functions for calculating confusion matrices */ + void calcClassifierConfMatRow(const string& obj_class, const vector& images, const vector& scores, const VocConfCond cond, const float threshold, vector& output_headers, vector& output_values); + void calcDetectorConfMatRow(const string& obj_class, const ObdDatasetType dataset, const vector& images, const vector >& scores, const vector >& bounding_boxes, const VocConfCond cond, const float threshold, vector& output_headers, vector& output_values, bool ignore_difficult = true); + /* functions for outputting gnuplot output files */ + void savePrecRecallToGnuplot(const string& output_file, const vector& precision, const vector& recall, const float ap, const string title = string(), const VocPlotType plot_type = CV_VOC_PLOT_SCREEN); + /* functions for reading in result/ground truth files */ + void readClassifierGroundTruth(const string& obj_class, const ObdDatasetType dataset, vector& images, vector& object_present); + void readClassifierResultsFile(const std:: string& input_file, vector& images, vector& scores); + void readDetectorResultsFile(const string& input_file, vector& images, vector >& scores, vector >& bounding_boxes); + /* functions for getting dataset info */ + const vector& getObjectClasses(); + string getResultsDirectory(); +protected: + void initVoc( const string& vocPath, const bool useTestDataset ); + void initVoc2007to2010( const string& vocPath, const bool useTestDataset); + void readClassifierGroundTruth(const string& filename, vector& image_codes, vector& object_present); + void readClassifierResultsFile(const string& input_file, vector& image_codes, vector& scores); + void readDetectorResultsFile(const string& input_file, vector& image_codes, vector >& scores, vector >& bounding_boxes); + void extractVocObjects(const string filename, vector& objects, vector& object_data); + string getImagePath(const string& input_str); + + void getClassImages_impl(const string& obj_class, const string& dataset_str, vector& images, vector& object_present); + void calcPrecRecall_impl(const vector& ground_truth, const vector& scores, vector& precision, vector& recall, float& ap, vector& ranking, int recall_normalization = -1); + + //test two bounding boxes to see if they meet the overlap criteria defined in the VOC documentation + float testBoundingBoxesForOverlap(const Rect detection, const Rect ground_truth); + //extract class and dataset name from a VOC-standard classification/detection results filename + void extractDataFromResultsFilename(const string& input_file, string& class_name, string& dataset_name); + //get classifier ground truth for a single image + bool getClassifierGroundTruthImage(const string& obj_class, const string& id); + + //utility functions + void getSortOrder(const vector& values, vector& order, bool descending = true); + int stringToInteger(const string input_str); + void readFileToString(const string filename, string& file_contents); + string integerToString(const int input_int); + string checkFilenamePathsep(const string filename, bool add_trailing_slash = false); + void convertImageCodesToObdImages(const vector& image_codes, vector& images); + int extractXMLBlock(const string src, const string tag, const int searchpos, string& tag_contents); + //utility sorter + struct orderingSorter + { + bool operator ()(std::pair::const_iterator> const& a, std::pair::const_iterator> const& b) + { + return (*a.second) > (*b.second); + } + }; + //data members + string m_vocPath; + string m_vocName; + //string m_resPath; + + string m_annotation_path; + string m_image_path; + string m_imageset_path; + string m_class_imageset_path; + + vector m_classifier_gt_all_ids; + vector m_classifier_gt_all_present; + string m_classifier_gt_class; + + //data members + string m_train_set; + string m_test_set; + + vector m_object_classes; + + + float m_min_overlap; + bool m_sampled_ap; +}; + + +//Return the classification ground truth data for all images of a given VOC object class +//-------------------------------------------------------------------------------------- +//INPUTS: +// - obj_class The VOC object class identifier string +// - dataset Specifies whether to extract images from the training or test set +//OUTPUTS: +// - images An array of ObdImage containing info of all images extracted from the ground truth file +// - object_present An array of bools specifying whether the object defined by 'obj_class' is present in each image or not +//NOTES: +// This function is primarily useful for the classification task, where only +// whether a given object is present or not in an image is required, and not each object instance's +// position etc. +void VocData::getClassImages(const string& obj_class, const ObdDatasetType dataset, vector& images, vector& object_present) +{ + string dataset_str; + //generate the filename of the classification ground-truth textfile for the object class + if (dataset == CV_OBD_TRAIN) + { + dataset_str = m_train_set; + } else { + dataset_str = m_test_set; + } + + getClassImages_impl(obj_class, dataset_str, images, object_present); +} + +void VocData::getClassImages_impl(const string& obj_class, const string& dataset_str, vector& images, vector& object_present) +{ + //generate the filename of the classification ground-truth textfile for the object class + string gtFilename = m_class_imageset_path; + gtFilename.replace(gtFilename.find("%s"),2,obj_class); + gtFilename.replace(gtFilename.find("%s"),2,dataset_str); + + //parse the ground truth file, storing in two separate vectors + //for the image code and the ground truth value + vector image_codes; + readClassifierGroundTruth(gtFilename, image_codes, object_present); + + //prepare output arrays + images.clear(); + + convertImageCodesToObdImages(image_codes, images); +} + +//Return the object data for all images of a given VOC object class +//----------------------------------------------------------------- +//INPUTS: +// - obj_class The VOC object class identifier string +// - dataset Specifies whether to extract images from the training or test set +//OUTPUTS: +// - images An array of ObdImage containing info of all images in chosen dataset (tag, path etc.) +// - objects Contains the extended object info (bounding box etc.) for each object instance in each image +// - object_data Contains VOC-specific extended object info (marked difficult etc.) +// - ground_truth Specifies whether there are any difficult/non-difficult instances of the current +// object class within each image +//NOTES: +// This function returns extended object information in addition to the absent/present +// classification data returned by getClassImages. The objects returned for each image in the 'objects' +// array are of all object classes present in the image, and not just the class defined by 'obj_class'. +// 'ground_truth' can be used to determine quickly whether an object instance of the given class is present +// in an image or not. +void VocData::getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector& images, vector >& objects) +{ + vector > object_data; + vector ground_truth; + + getClassObjects(obj_class,dataset,images,objects,object_data,ground_truth); +} + +void VocData::getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector& images, vector >& objects, vector >& object_data, vector& ground_truth) +{ + //generate the filename of the classification ground-truth textfile for the object class + string gtFilename = m_class_imageset_path; + gtFilename.replace(gtFilename.find("%s"),2,obj_class); + if (dataset == CV_OBD_TRAIN) + { + gtFilename.replace(gtFilename.find("%s"),2,m_train_set); + } else { + gtFilename.replace(gtFilename.find("%s"),2,m_test_set); + } + + //parse the ground truth file, storing in two separate vectors + //for the image code and the ground truth value + vector image_codes; + vector object_present; + readClassifierGroundTruth(gtFilename, image_codes, object_present); + + //prepare output arrays + images.clear(); + objects.clear(); + object_data.clear(); + ground_truth.clear(); + + string annotationFilename; + vector image_objects; + vector image_object_data; + VocGT image_gt; + + //transfer to output arrays and read in object data for each image + for (size_t i = 0; i < image_codes.size(); ++i) + { + ObdImage image = getObjects(obj_class, image_codes[i], image_objects, image_object_data, image_gt); + + images.push_back(image); + objects.push_back(image_objects); + object_data.push_back(image_object_data); + ground_truth.push_back(image_gt); + } +} + +//Return ground truth data for the objects present in an image with a given UID +//----------------------------------------------------------------------------- +//INPUTS: +// - id VOC Dataset unique identifier (string code in form YYYY_XXXXXX where YYYY is the year) +//OUTPUTS: +// - obj_class (*3) Specifies the object class to use to resolve 'ground_truth' +// - objects Contains the extended object info (bounding box etc.) for each object in the image +// - object_data (*2,3) Contains VOC-specific extended object info (marked difficult etc.) +// - ground_truth (*3) Specifies whether there are any difficult/non-difficult instances of the current +// object class within the image +//RETURN VALUE: +// ObdImage containing path and other details of image file with given code +//NOTES: +// There are three versions of this function +// * One returns a simple array of objects given an id [1] +// * One returns the same as (1) plus VOC specific object data [2] +// * One returns the same as (2) plus the ground_truth flag. This also requires an extra input obj_class [3] +ObdImage VocData::getObjects(const string& id, vector& objects) +{ + vector object_data; + ObdImage image = getObjects(id, objects, object_data); + + return image; +} + +ObdImage VocData::getObjects(const string& id, vector& objects, vector& object_data) +{ + //first generate the filename of the annotation file + string annotationFilename = m_annotation_path; + + annotationFilename.replace(annotationFilename.find("%s"),2,id); + + //extract objects contained in the current image from the xml + extractVocObjects(annotationFilename,objects,object_data); + + //generate image path from extracted string code + string path = getImagePath(id); + + ObdImage image(id, path); + return image; +} + +ObdImage VocData::getObjects(const string& obj_class, const string& id, vector& objects, vector& object_data, VocGT& ground_truth) +{ + + //extract object data (except for ground truth flag) + ObdImage image = getObjects(id,objects,object_data); + + //pregenerate a flag to indicate whether the current class is present or not in the image + ground_truth = CV_VOC_GT_NONE; + //iterate through all objects in current image + for (size_t j = 0; j < objects.size(); ++j) + { + if (objects[j].object_class == obj_class) + { + if (object_data[j].difficult == false) + { + //if at least one non-difficult example is present, this flag is always set to CV_VOC_GT_PRESENT + ground_truth = CV_VOC_GT_PRESENT; + break; + } else { + //set if at least one object instance is present, but it is marked difficult + ground_truth = CV_VOC_GT_DIFFICULT; + } + } + } + + return image; +} + +//Return ground truth data for the presence/absence of a given object class in an arbitrary array of images +//--------------------------------------------------------------------------------------------------------- +//INPUTS: +// - obj_class The VOC object class identifier string +// - images An array of ObdImage OR strings containing the images for which ground truth +// will be computed +//OUTPUTS: +// - ground_truth An output array indicating the presence/absence of obj_class within each image +void VocData::getClassifierGroundTruth(const string& obj_class, const vector& images, vector& ground_truth) +{ + vector(images.size()).swap(ground_truth); + + vector objects; + vector object_data; + vector::iterator gt_it = ground_truth.begin(); + for (vector::const_iterator it = images.begin(); it != images.end(); ++it, ++gt_it) + { + //getObjects(obj_class, it->id, objects, object_data, voc_ground_truth); + (*gt_it) = (getClassifierGroundTruthImage(obj_class, it->id)); + } +} + +void VocData::getClassifierGroundTruth(const string& obj_class, const vector& images, vector& ground_truth) +{ + vector(images.size()).swap(ground_truth); + + vector objects; + vector object_data; + vector::iterator gt_it = ground_truth.begin(); + for (vector::const_iterator it = images.begin(); it != images.end(); ++it, ++gt_it) + { + //getObjects(obj_class, (*it), objects, object_data, voc_ground_truth); + (*gt_it) = (getClassifierGroundTruthImage(obj_class, (*it))); + } +} + +//Return ground truth data for the accuracy of detection results +//-------------------------------------------------------------- +//INPUTS: +// - obj_class The VOC object class identifier string +// - images An array of ObdImage containing the images for which ground truth +// will be computed +// - bounding_boxes A 2D input array containing the bounding box rects of the objects of +// obj_class which were detected in each image +//OUTPUTS: +// - ground_truth A 2D output array indicating whether each object detection was accurate +// or not +// - detection_difficult A 2D output array indicating whether the detection fired on an object +// marked as 'difficult'. This allows it to be ignored if necessary +// (the voc documentation specifies objects marked as difficult +// have no effects on the results and are effectively ignored) +// - (ignore_difficult) If set to true, objects marked as difficult will be ignored when returning +// the number of hits for p-r normalization (default = true) +//RETURN VALUE: +// Returns the number of object hits in total in the gt to allow proper normalization +// of a p-r curve +//NOTES: +// As stated in the VOC documentation, multiple detections of the same object in an image are +// considered FALSE detections e.g. 5 detections of a single object is counted as 1 correct +// detection and 4 false detections - it is the responsibility of the participant's system +// to filter multiple detections from its output +int VocData::getDetectorGroundTruth(const string& obj_class, const ObdDatasetType dataset, const vector& images, const vector >& bounding_boxes, const vector >& scores, vector >& ground_truth, vector >& detection_difficult, bool ignore_difficult) +{ + int recall_normalization = 0; + + /* first create a list of indices referring to the elements of bounding_boxes and scores in + * descending order of scores */ + vector sorted_ids; + { + /* first count how many objects to allow preallocation */ + size_t obj_count = 0; + CV_Assert(images.size() == bounding_boxes.size()); + CV_Assert(scores.size() == bounding_boxes.size()); + for (size_t im_idx = 0; im_idx < scores.size(); ++im_idx) + { + CV_Assert(scores[im_idx].size() == bounding_boxes[im_idx].size()); + obj_count += scores[im_idx].size(); + } + /* preallocate id vector */ + sorted_ids.resize(obj_count); + /* now copy across scores and indexes to preallocated vector */ + int flat_pos = 0; + for (size_t im_idx = 0; im_idx < scores.size(); ++im_idx) + { + for (size_t ob_idx = 0; ob_idx < scores[im_idx].size(); ++ob_idx) + { + sorted_ids[flat_pos].score = scores[im_idx][ob_idx]; + sorted_ids[flat_pos].image_idx = (int)im_idx; + sorted_ids[flat_pos].obj_idx = (int)ob_idx; + ++flat_pos; + } + } + /* and sort the vector in descending order of score */ + std::sort(sorted_ids.begin(),sorted_ids.end()); + std::reverse(sorted_ids.begin(),sorted_ids.end()); + } + + /* prepare ground truth + difficult vector (1st dimension) */ + vector >(images.size()).swap(ground_truth); + vector >(images.size()).swap(detection_difficult); + vector > detected(images.size()); + + vector > img_objects(images.size()); + vector > img_object_data(images.size()); + /* preload object ground truth bounding box data */ + { + vector > img_objects_all(images.size()); + vector > img_object_data_all(images.size()); + for (size_t image_idx = 0; image_idx < images.size(); ++image_idx) + { + /* prepopulate ground truth bounding boxes */ + getObjects(images[image_idx].id, img_objects_all[image_idx], img_object_data_all[image_idx]); + /* meanwhile, also set length of target ground truth + difficult vector to same as number of object detections (2nd dimension) */ + ground_truth[image_idx].resize(bounding_boxes[image_idx].size()); + detection_difficult[image_idx].resize(bounding_boxes[image_idx].size()); + } + + /* save only instances of the object class concerned */ + for (size_t image_idx = 0; image_idx < images.size(); ++image_idx) + { + for (size_t obj_idx = 0; obj_idx < img_objects_all[image_idx].size(); ++obj_idx) + { + if (img_objects_all[image_idx][obj_idx].object_class == obj_class) + { + img_objects[image_idx].push_back(img_objects_all[image_idx][obj_idx]); + img_object_data[image_idx].push_back(img_object_data_all[image_idx][obj_idx]); + } + } + detected[image_idx].resize(img_objects[image_idx].size(), false); + } + } + + /* calculate the total number of objects in the ground truth for the current dataset */ + { + vector gt_images; + vector gt_object_present; + getClassImages(obj_class, dataset, gt_images, gt_object_present); + + for (size_t image_idx = 0; image_idx < gt_images.size(); ++image_idx) + { + vector gt_img_objects; + vector gt_img_object_data; + getObjects(gt_images[image_idx].id, gt_img_objects, gt_img_object_data); + for (size_t obj_idx = 0; obj_idx < gt_img_objects.size(); ++obj_idx) + { + if (gt_img_objects[obj_idx].object_class == obj_class) + { + if ((gt_img_object_data[obj_idx].difficult == false) || (ignore_difficult == false)) + ++recall_normalization; + } + } + } + } + +#ifdef PR_DEBUG + int printed_count = 0; +#endif + /* now iterate through detections in descending order of score, assigning to ground truth bounding boxes if possible */ + for (size_t detect_idx = 0; detect_idx < sorted_ids.size(); ++detect_idx) + { + //read in indexes to make following code easier to read + int im_idx = sorted_ids[detect_idx].image_idx; + int ob_idx = sorted_ids[detect_idx].obj_idx; + //set ground truth for the current object to false by default + ground_truth[im_idx][ob_idx] = false; + detection_difficult[im_idx][ob_idx] = false; + float maxov = -1.0; + bool max_is_difficult = false; + int max_gt_obj_idx = -1; + //-- for each detected object iterate through objects present in the bounding box ground truth -- + for (size_t gt_obj_idx = 0; gt_obj_idx < img_objects[im_idx].size(); ++gt_obj_idx) + { + if (detected[im_idx][gt_obj_idx] == false) + { + //check if the detected object and ground truth object overlap by a sufficient margin + float ov = testBoundingBoxesForOverlap(bounding_boxes[im_idx][ob_idx], img_objects[im_idx][gt_obj_idx].boundingBox); + if (ov != -1.0) + { + //if all conditions are met store the overlap score and index (as objects are assigned to the highest scoring match) + if (ov > maxov) + { + maxov = ov; + max_gt_obj_idx = (int)gt_obj_idx; + //store whether the maximum detection is marked as difficult or not + max_is_difficult = (img_object_data[im_idx][gt_obj_idx].difficult); + } + } + } + } + //-- if a match was found, set the ground truth of the current object to true -- + if (maxov != -1.0) + { + CV_Assert(max_gt_obj_idx != -1); + ground_truth[im_idx][ob_idx] = true; + //store whether the maximum detection was marked as 'difficult' or not + detection_difficult[im_idx][ob_idx] = max_is_difficult; + //remove the ground truth object so it doesn't match with subsequent detected objects + //** this is the behaviour defined by the voc documentation ** + detected[im_idx][max_gt_obj_idx] = true; + } +#ifdef PR_DEBUG + if (printed_count < 10) + { + cout << printed_count << ": id=" << images[im_idx].id << ", score=" << scores[im_idx][ob_idx] << " (" << ob_idx << ") [" << bounding_boxes[im_idx][ob_idx].x << "," << + bounding_boxes[im_idx][ob_idx].y << "," << bounding_boxes[im_idx][ob_idx].width + bounding_boxes[im_idx][ob_idx].x << + "," << bounding_boxes[im_idx][ob_idx].height + bounding_boxes[im_idx][ob_idx].y << "] detected=" << ground_truth[im_idx][ob_idx] << + ", difficult=" << detection_difficult[im_idx][ob_idx] << endl; + ++printed_count; + /* print ground truth */ + for (int gt_obj_idx = 0; gt_obj_idx < img_objects[im_idx].size(); ++gt_obj_idx) + { + cout << " GT: [" << img_objects[im_idx][gt_obj_idx].boundingBox.x << "," << + img_objects[im_idx][gt_obj_idx].boundingBox.y << "," << img_objects[im_idx][gt_obj_idx].boundingBox.width + img_objects[im_idx][gt_obj_idx].boundingBox.x << + "," << img_objects[im_idx][gt_obj_idx].boundingBox.height + img_objects[im_idx][gt_obj_idx].boundingBox.y << "]"; + if (gt_obj_idx == max_gt_obj_idx) cout << " <--- (" << maxov << " overlap)"; + cout << endl; + } + } +#endif + } + + return recall_normalization; +} + +//Write VOC-compliant classifier results file +//------------------------------------------- +//INPUTS: +// - obj_class The VOC object class identifier string +// - dataset Specifies whether working with the training or test set +// - images An array of ObdImage containing the images for which data will be saved to the result file +// - scores A corresponding array of confidence scores given a query +// - (competition) If specified, defines which competition the results are for (see VOC documentation - default 1) +//NOTES: +// The result file path and filename are determined automatically using m_results_directory as a base +void VocData::writeClassifierResultsFile( const string& out_dir, const string& obj_class, const ObdDatasetType dataset, const vector& images, const vector& scores, const int competition, const bool overwrite_ifexists) +{ + CV_Assert(images.size() == scores.size()); + + string output_file_base, output_file; + if (dataset == CV_OBD_TRAIN) + { + output_file_base = out_dir + "/comp" + integerToString(competition) + "_cls_" + m_train_set + "_" + obj_class; + } else { + output_file_base = out_dir + "/comp" + integerToString(competition) + "_cls_" + m_test_set + "_" + obj_class; + } + output_file = output_file_base + ".txt"; + + //check if file exists, and if so create a numbered new file instead + if (overwrite_ifexists == false) + { + struct stat stFileInfo; + if (stat(output_file.c_str(),&stFileInfo) == 0) + { + string output_file_new; + int filenum = 0; + do + { + ++filenum; + output_file_new = output_file_base + "_" + integerToString(filenum); + output_file = output_file_new + ".txt"; + } while (stat(output_file.c_str(),&stFileInfo) == 0); + } + } + + //output data to file + std::ofstream result_file(output_file.c_str()); + if (result_file.is_open()) + { + for (size_t i = 0; i < images.size(); ++i) + { + result_file << images[i].id << " " << scores[i] << endl; + } + result_file.close(); + } else { + string err_msg = "could not open classifier results file '" + output_file + "' for writing. Before running for the first time, a 'results' subdirectory should be created within the VOC dataset base directory. e.g. if the VOC data is stored in /VOC/VOC2010 then the path /VOC/results must be created."; + CV_Error(Error::StsError,err_msg.c_str()); + } +} + +//--------------------------------------- +//CALCULATE METRICS FROM VOC RESULTS DATA +//--------------------------------------- + +//Utility function to construct a VOC-standard classification results filename +//---------------------------------------------------------------------------- +//INPUTS: +// - obj_class The VOC object class identifier string +// - task Specifies whether to generate a filename for the classification or detection task +// - dataset Specifies whether working with the training or test set +// - (competition) If specified, defines which competition the results are for (see VOC documentation +// default of -1 means this is set to 1 for the classification task and 3 for the detection task) +// - (number) If specified and above 0, defines which of a number of duplicate results file produced for a given set of +// of settings should be used (this number will be added as a postfix to the filename) +//NOTES: +// This is primarily useful for returning the filename of a classification file previously computed using writeClassifierResultsFile +// for example when calling calcClassifierPrecRecall +string VocData::getResultsFilename(const string& obj_class, const VocTask task, const ObdDatasetType dataset, const int competition, const int number) +{ + if ((competition < 1) && (competition != -1)) + CV_Error(Error::StsBadArg,"competition argument should be a positive non-zero number or -1 to accept the default"); + if ((number < 1) && (number != -1)) + CV_Error(Error::StsBadArg,"number argument should be a positive non-zero number or -1 to accept the default"); + + string dset, task_type; + + if (dataset == CV_OBD_TRAIN) + { + dset = m_train_set; + } else { + dset = m_test_set; + } + + int comp = competition; + if (task == CV_VOC_TASK_CLASSIFICATION) + { + task_type = "cls"; + if (comp == -1) comp = 1; + } else { + task_type = "det"; + if (comp == -1) comp = 3; + } + + stringstream ss; + if (number < 1) + { + ss << "comp" << comp << "_" << task_type << "_" << dset << "_" << obj_class << ".txt"; + } else { + ss << "comp" << comp << "_" << task_type << "_" << dset << "_" << obj_class << "_" << number << ".txt"; + } + + string filename = ss.str(); + return filename; +} + +//Calculate metrics for classification results +//-------------------------------------------- +//INPUTS: +// - ground_truth A vector of booleans determining whether the currently tested class is present in each input image +// - scores A vector containing the similarity score for each input image (higher is more similar) +//OUTPUTS: +// - precision A vector containing the precision calculated at each datapoint of a p-r curve generated from the result set +// - recall A vector containing the recall calculated at each datapoint of a p-r curve generated from the result set +// - ap The ap metric calculated from the result set +// - (ranking) A vector of the same length as 'ground_truth' and 'scores' containing the order of the indices in both of +// these arrays when sorting by the ranking score in descending order +//NOTES: +// The result file path and filename are determined automatically using m_results_directory as a base +void VocData::calcClassifierPrecRecall(const string& obj_class, const vector& images, const vector& scores, vector& precision, vector& recall, float& ap, vector& ranking) +{ + vector res_ground_truth; + getClassifierGroundTruth(obj_class, images, res_ground_truth); + + calcPrecRecall_impl(res_ground_truth, scores, precision, recall, ap, ranking); +} + +void VocData::calcClassifierPrecRecall(const string& obj_class, const vector& images, const vector& scores, vector& precision, vector& recall, float& ap) +{ + vector res_ground_truth; + getClassifierGroundTruth(obj_class, images, res_ground_truth); + + vector ranking; + calcPrecRecall_impl(res_ground_truth, scores, precision, recall, ap, ranking); +} + +//< Overloaded version which accepts VOC classification result file input instead of array of scores/ground truth > +//INPUTS: +// - input_file The path to the VOC standard results file to use for calculating precision/recall +// If a full path is not specified, it is assumed this file is in the VOC standard results directory +// A VOC standard filename can be retrieved (as used by writeClassifierResultsFile) by calling getClassifierResultsFilename + +void VocData::calcClassifierPrecRecall(const string& input_file, vector& precision, vector& recall, float& ap, bool outputRankingFile) +{ + //read in classification results file + vector res_image_codes; + vector res_scores; + + string input_file_std = checkFilenamePathsep(input_file); + readClassifierResultsFile(input_file_std, res_image_codes, res_scores); + + //extract the object class and dataset from the results file filename + string class_name, dataset_name; + extractDataFromResultsFilename(input_file_std, class_name, dataset_name); + + //generate the ground truth for the images extracted from the results file + vector res_ground_truth; + + getClassifierGroundTruth(class_name, res_image_codes, res_ground_truth); + + if (outputRankingFile) + { + /* 1. store sorting order by score (descending) in 'order' */ + vector::const_iterator> > order(res_scores.size()); + + size_t n = 0; + for (vector::const_iterator it = res_scores.begin(); it != res_scores.end(); ++it, ++n) + order[n] = make_pair(n, it); + + std::sort(order.begin(),order.end(),orderingSorter()); + + /* 2. save ranking results to text file */ + string input_file_std1 = checkFilenamePathsep(input_file); + size_t fnamestart = input_file_std1.rfind("/"); + string scoregt_file_str = input_file_std1.substr(0,fnamestart+1) + "scoregt_" + class_name + ".txt"; + std::ofstream scoregt_file(scoregt_file_str.c_str()); + if (scoregt_file.is_open()) + { + for (size_t i = 0; i < res_scores.size(); ++i) + { + scoregt_file << res_image_codes[order[i].first] << " " << res_scores[order[i].first] << " " << res_ground_truth[order[i].first] << endl; + } + scoregt_file.close(); + } else { + string err_msg = "could not open scoregt file '" + scoregt_file_str + "' for writing."; + CV_Error(Error::StsError,err_msg.c_str()); + } + } + + //finally, calculate precision+recall+ap + vector ranking; + calcPrecRecall_impl(res_ground_truth,res_scores,precision,recall,ap,ranking); +} + +//< Protected implementation of Precision-Recall calculation used by both calcClassifierPrecRecall and calcDetectorPrecRecall > + +void VocData::calcPrecRecall_impl(const vector& ground_truth, const vector& scores, vector& precision, vector& recall, float& ap, vector& ranking, int recall_normalization) +{ + CV_Assert(ground_truth.size() == scores.size()); + + //add extra element for p-r at 0 recall (in case that first retrieved is positive) + vector(scores.size()+1).swap(precision); + vector(scores.size()+1).swap(recall); + + // SORT RESULTS BY THEIR SCORE + /* 1. store sorting order in 'order' */ + VocData::getSortOrder(scores, ranking); + +#ifdef PR_DEBUG + std::ofstream scoregt_file("D:/pr.txt"); + if (scoregt_file.is_open()) + { + for (int i = 0; i < scores.size(); ++i) + { + scoregt_file << scores[ranking[i]] << " " << ground_truth[ranking[i]] << endl; + } + scoregt_file.close(); + } +#endif + + // CALCULATE PRECISION+RECALL + + int retrieved_hits = 0; + + int recall_norm; + if (recall_normalization != -1) + { + recall_norm = recall_normalization; + } else { +#ifdef CV_CXX11 + recall_norm = (int)std::count_if(ground_truth.begin(),ground_truth.end(), + [](const char a) { return a == (char)1; }); +#else + recall_norm = (int)std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to(),(char)1)); +#endif + } + + ap = 0; + recall[0] = 0; + for (size_t idx = 0; idx < ground_truth.size(); ++idx) + { + if (ground_truth[ranking[idx]] != 0) ++retrieved_hits; + + precision[idx+1] = static_cast(retrieved_hits)/static_cast(idx+1); + recall[idx+1] = static_cast(retrieved_hits)/static_cast(recall_norm); + + if (idx == 0) + { + //add further point at 0 recall with the same precision value as the first computed point + precision[idx] = precision[idx+1]; + } + if (recall[idx+1] == 1.0) + { + //if recall = 1, then end early as all positive images have been found + recall.resize(idx+2); + precision.resize(idx+2); + break; + } + } + + /* ap calculation */ + if (m_sampled_ap == false) + { + // FOR VOC2010+ AP IS CALCULATED FROM ALL DATAPOINTS + /* make precision monotonically decreasing for purposes of calculating ap */ + vector precision_monot(precision.size()); + vector::iterator prec_m_it = precision_monot.begin(); + for (vector::iterator prec_it = precision.begin(); prec_it != precision.end(); ++prec_it, ++prec_m_it) + { + vector::iterator max_elem; + max_elem = std::max_element(prec_it,precision.end()); + (*prec_m_it) = (*max_elem); + } + /* calculate ap */ + for (size_t idx = 0; idx < (recall.size()-1); ++idx) + { + ap += (recall[idx+1] - recall[idx])*precision_monot[idx+1] + //no need to take min of prec - is monotonically decreasing + 0.5f*(recall[idx+1] - recall[idx])*std::abs(precision_monot[idx+1] - precision_monot[idx]); + } + } else { + // FOR BEFORE VOC2010 AP IS CALCULATED BY SAMPLING PRECISION AT RECALL 0.0,0.1,..,1.0 + + for (float recall_pos = 0.f; recall_pos <= 1.f; recall_pos += 0.1f) + { + //find iterator of the precision corresponding to the first recall >= recall_pos + vector::iterator recall_it = recall.begin(); + vector::iterator prec_it = precision.begin(); + + while ((*recall_it) < recall_pos) + { + ++recall_it; + ++prec_it; + if (recall_it == recall.end()) break; + } + + /* if no recall >= recall_pos found, this level of recall is never reached so stop adding to ap */ + if (recall_it == recall.end()) break; + + /* if the prec_it is valid, compute the max precision at this level of recall or higher */ + vector::iterator max_prec = std::max_element(prec_it,precision.end()); + + ap += (*max_prec)/11; + } + } +} + +/* functions for calculating confusion matrix rows */ + +//Calculate rows of a confusion matrix +//------------------------------------ +//INPUTS: +// - obj_class The VOC object class identifier string for the confusion matrix row to compute +// - images An array of ObdImage containing the images to use for the computation +// - scores A corresponding array of confidence scores for the presence of obj_class in each image +// - cond Defines whether to use a cut off point based on recall (CV_VOC_CCOND_RECALL) or score +// (CV_VOC_CCOND_SCORETHRESH) the latter is useful for classifier detections where positive +// values are positive detections and negative values are negative detections +// - threshold Threshold value for cond. In case of CV_VOC_CCOND_RECALL, is proportion recall (e.g. 0.5). +// In the case of CV_VOC_CCOND_SCORETHRESH is the value above which to count results. +//OUTPUTS: +// - output_headers An output vector of object class headers for the confusion matrix row +// - output_values An output vector of values for the confusion matrix row corresponding to the classes +// defined in output_headers +//NOTES: +// The methodology used by the classifier version of this function is that true positives have a single unit +// added to the obj_class column in the confusion matrix row, whereas false positives have a single unit +// distributed in proportion between all the columns in the confusion matrix row corresponding to the objects +// present in the image. +void VocData::calcClassifierConfMatRow(const string& obj_class, const vector& images, const vector& scores, const VocConfCond cond, const float threshold, vector& output_headers, vector& output_values) +{ + CV_Assert(images.size() == scores.size()); + + // SORT RESULTS BY THEIR SCORE + /* 1. store sorting order in 'ranking' */ + vector ranking; + VocData::getSortOrder(scores, ranking); + + // CALCULATE CONFUSION MATRIX ENTRIES + /* prepare object category headers */ + output_headers = m_object_classes; + vector(output_headers.size(),0.0).swap(output_values); + /* find the index of the target object class in the headers for later use */ + int target_idx; + { + vector::iterator target_idx_it = std::find(output_headers.begin(),output_headers.end(),obj_class); + /* if the target class can not be found, raise an exception */ + if (target_idx_it == output_headers.end()) + { + string err_msg = "could not find the target object class '" + obj_class + "' in list of valid classes."; + CV_Error(Error::StsError,err_msg.c_str()); + } + /* convert iterator to index */ + target_idx = (int)std::distance(output_headers.begin(),target_idx_it); + } + + /* prepare variables related to calculating recall if using the recall threshold */ + int retrieved_hits = 0; + int total_relevant = 0; + if (cond == CV_VOC_CCOND_RECALL) + { + vector ground_truth; + /* in order to calculate the total number of relevant images for normalization of recall + it's necessary to extract the ground truth for the images under consideration */ + getClassifierGroundTruth(obj_class, images, ground_truth); +#ifdef CV_CXX11 + total_relevant = (int)std::count_if(ground_truth.begin(),ground_truth.end(), + [](const char a) { return a == (char)1; }); +#else + total_relevant = (int)std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to(),(char)1)); +#endif + } + + /* iterate through images */ + vector img_objects; + vector img_object_data; + int total_images = 0; + for (size_t image_idx = 0; image_idx < images.size(); ++image_idx) + { + /* if using the score as the break condition, check for it now */ + if (cond == CV_VOC_CCOND_SCORETHRESH) + { + if (scores[ranking[image_idx]] <= threshold) break; + } + /* if continuing for this iteration, increment the image counter for later normalization */ + ++total_images; + /* for each image retrieve the objects contained */ + getObjects(images[ranking[image_idx]].id, img_objects, img_object_data); + //check if the tested for object class is present + if (getClassifierGroundTruthImage(obj_class, images[ranking[image_idx]].id)) + { + //if the target class is present, assign fully to the target class element in the confusion matrix row + output_values[target_idx] += 1.0; + if (cond == CV_VOC_CCOND_RECALL) ++retrieved_hits; + } else { + //first delete all objects marked as difficult + for (size_t obj_idx = 0; obj_idx < img_objects.size(); ++obj_idx) + { + if (img_object_data[obj_idx].difficult == true) + { + vector::iterator it1 = img_objects.begin(); + std::advance(it1,obj_idx); + img_objects.erase(it1); + vector::iterator it2 = img_object_data.begin(); + std::advance(it2,obj_idx); + img_object_data.erase(it2); + --obj_idx; + } + } + //if the target class is not present, add values to the confusion matrix row in equal proportions to all objects present in the image + for (size_t obj_idx = 0; obj_idx < img_objects.size(); ++obj_idx) + { + //find the index of the currently considered object + vector::iterator class_idx_it = std::find(output_headers.begin(),output_headers.end(),img_objects[obj_idx].object_class); + //if the class name extracted from the ground truth file could not be found in the list of available classes, raise an exception + if (class_idx_it == output_headers.end()) + { + string err_msg = "could not find object class '" + img_objects[obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes."; + CV_Error(Error::StsError,err_msg.c_str()); + } + /* convert iterator to index */ + int class_idx = (int)std::distance(output_headers.begin(),class_idx_it); + //add to confusion matrix row in proportion + output_values[class_idx] += 1.f/static_cast(img_objects.size()); + } + } + //check break conditions if breaking on certain level of recall + if (cond == CV_VOC_CCOND_RECALL) + { + if(static_cast(retrieved_hits)/static_cast(total_relevant) >= threshold) break; + } + } + /* finally, normalize confusion matrix row */ + for (vector::iterator it = output_values.begin(); it < output_values.end(); ++it) + { + (*it) /= static_cast(total_images); + } +} + +// NOTE: doesn't ignore repeated detections +void VocData::calcDetectorConfMatRow(const string& obj_class, const ObdDatasetType dataset, const vector& images, const vector >& scores, const vector >& bounding_boxes, const VocConfCond cond, const float threshold, vector& output_headers, vector& output_values, bool ignore_difficult) +{ + CV_Assert(images.size() == scores.size()); + CV_Assert(images.size() == bounding_boxes.size()); + + //collapse scores and ground_truth vectors into 1D vectors to allow ranking + /* define final flat vectors */ + vector images_flat; + vector scores_flat; + vector bounding_boxes_flat; + { + /* first count how many objects to allow preallocation */ + int obj_count = 0; + CV_Assert(scores.size() == bounding_boxes.size()); + for (size_t img_idx = 0; img_idx < scores.size(); ++img_idx) + { + CV_Assert(scores[img_idx].size() == bounding_boxes[img_idx].size()); + for (size_t obj_idx = 0; obj_idx < scores[img_idx].size(); ++obj_idx) + { + ++obj_count; + } + } + /* preallocate vectors */ + images_flat.resize(obj_count); + scores_flat.resize(obj_count); + bounding_boxes_flat.resize(obj_count); + /* now copy across to preallocated vectors */ + int flat_pos = 0; + for (size_t img_idx = 0; img_idx < scores.size(); ++img_idx) + { + for (size_t obj_idx = 0; obj_idx < scores[img_idx].size(); ++obj_idx) + { + images_flat[flat_pos] = images[img_idx].id; + scores_flat[flat_pos] = scores[img_idx][obj_idx]; + bounding_boxes_flat[flat_pos] = bounding_boxes[img_idx][obj_idx]; + ++flat_pos; + } + } + } + + // SORT RESULTS BY THEIR SCORE + /* 1. store sorting order in 'ranking' */ + vector ranking; + VocData::getSortOrder(scores_flat, ranking); + + // CALCULATE CONFUSION MATRIX ENTRIES + /* prepare object category headers */ + output_headers = m_object_classes; + output_headers.push_back("background"); + vector(output_headers.size(),0.0).swap(output_values); + + /* prepare variables related to calculating recall if using the recall threshold */ + int retrieved_hits = 0; + int total_relevant = 0; + if (cond == CV_VOC_CCOND_RECALL) + { +// vector ground_truth; +// /* in order to calculate the total number of relevant images for normalization of recall +// it's necessary to extract the ground truth for the images under consideration */ +// getClassifierGroundTruth(obj_class, images, ground_truth); +// total_relevant = std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to(),true)); + /* calculate the total number of objects in the ground truth for the current dataset */ + vector gt_images; + vector gt_object_present; + getClassImages(obj_class, dataset, gt_images, gt_object_present); + + for (size_t image_idx = 0; image_idx < gt_images.size(); ++image_idx) + { + vector gt_img_objects; + vector gt_img_object_data; + getObjects(gt_images[image_idx].id, gt_img_objects, gt_img_object_data); + for (size_t obj_idx = 0; obj_idx < gt_img_objects.size(); ++obj_idx) + { + if (gt_img_objects[obj_idx].object_class == obj_class) + { + if ((gt_img_object_data[obj_idx].difficult == false) || (ignore_difficult == false)) + ++total_relevant; + } + } + } + } + + /* iterate through objects */ + vector img_objects; + vector img_object_data; + int total_objects = 0; + for (size_t image_idx = 0; image_idx < images.size(); ++image_idx) + { + /* if using the score as the break condition, check for it now */ + if (cond == CV_VOC_CCOND_SCORETHRESH) + { + if (scores_flat[ranking[image_idx]] <= threshold) break; + } + /* increment the image counter for later normalization */ + ++total_objects; + /* for each image retrieve the objects contained */ + getObjects(images[ranking[image_idx]].id, img_objects, img_object_data); + + //find the ground truth object which has the highest overlap score with the detected object + float maxov = -1.0; + int max_gt_obj_idx = -1; + //-- for each detected object iterate through objects present in ground truth -- + for (size_t gt_obj_idx = 0; gt_obj_idx < img_objects.size(); ++gt_obj_idx) + { + //check difficulty flag + if (ignore_difficult || (img_object_data[gt_obj_idx].difficult == false)) + { + //if the class matches, then check if the detected object and ground truth object overlap by a sufficient margin + float ov = testBoundingBoxesForOverlap(bounding_boxes_flat[ranking[image_idx]], img_objects[gt_obj_idx].boundingBox); + if (ov != -1.f) + { + //if all conditions are met store the overlap score and index (as objects are assigned to the highest scoring match) + if (ov > maxov) + { + maxov = ov; + max_gt_obj_idx = (int)gt_obj_idx; + } + } + } + } + + //assign to appropriate object class if an object was detected + if (maxov != -1.0) + { + //find the index of the currently considered object + vector::iterator class_idx_it = std::find(output_headers.begin(),output_headers.end(),img_objects[max_gt_obj_idx].object_class); + //if the class name extracted from the ground truth file could not be found in the list of available classes, raise an exception + if (class_idx_it == output_headers.end()) + { + string err_msg = "could not find object class '" + img_objects[max_gt_obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes."; + CV_Error(Error::StsError,err_msg.c_str()); + } + /* convert iterator to index */ + int class_idx = (int)std::distance(output_headers.begin(),class_idx_it); + //add to confusion matrix row in proportion + output_values[class_idx] += 1.0; + } else { + //otherwise assign to background class + output_values[output_values.size()-1] += 1.0; + } + + //check break conditions if breaking on certain level of recall + if (cond == CV_VOC_CCOND_RECALL) + { + if(static_cast(retrieved_hits)/static_cast(total_relevant) >= threshold) break; + } + } + + /* finally, normalize confusion matrix row */ + for (vector::iterator it = output_values.begin(); it < output_values.end(); ++it) + { + (*it) /= static_cast(total_objects); + } +} + +//Save Precision-Recall results to a p-r curve in GNUPlot format +//-------------------------------------------------------------- +//INPUTS: +// - output_file The file to which to save the GNUPlot data file. If only a filename is specified, the data +// file is saved to the standard VOC results directory. +// - precision Vector of precisions as returned from calcClassifier/DetectorPrecRecall +// - recall Vector of recalls as returned from calcClassifier/DetectorPrecRecall +// - ap ap as returned from calcClassifier/DetectorPrecRecall +// - (title) Title to use for the plot (if not specified, just the ap is printed as the title) +// This also specifies the filename of the output file if printing to pdf +// - (plot_type) Specifies whether to instruct GNUPlot to save to a PDF file (CV_VOC_PLOT_PDF) or directly +// to screen (CV_VOC_PLOT_SCREEN) in the datafile +//NOTES: +// The GNUPlot data file can be executed using GNUPlot from the commandline in the following way: +// >> GNUPlot +// This will then display the p-r curve on the screen or save it to a pdf file depending on plot_type + +void VocData::savePrecRecallToGnuplot(const string& output_file, const vector& precision, const vector& recall, const float ap, const string title, const VocPlotType plot_type) +{ + string output_file_std = checkFilenamePathsep(output_file); + + //if no directory is specified, by default save the output file in the results directory +// if (output_file_std.find("/") == output_file_std.npos) +// { +// output_file_std = m_results_directory + output_file_std; +// } + + std::ofstream plot_file(output_file_std.c_str()); + + if (plot_file.is_open()) + { + plot_file << "set xrange [0:1]" << endl; + plot_file << "set yrange [0:1]" << endl; + plot_file << "set size square" << endl; + string title_text = title; + if (title_text.size() == 0) title_text = "Precision-Recall Curve"; + plot_file << "set title \"" << title_text << " (ap: " << ap << ")\"" << endl; + plot_file << "set xlabel \"Recall\"" << endl; + plot_file << "set ylabel \"Precision\"" << endl; + plot_file << "set style data lines" << endl; + plot_file << "set nokey" << endl; + if (plot_type == CV_VOC_PLOT_PNG) + { + plot_file << "set terminal png" << endl; + string pdf_filename; + if (title.size() != 0) + { + pdf_filename = title; + } else { + pdf_filename = "prcurve"; + } + plot_file << "set out \"" << title << ".png\"" << endl; + } + plot_file << "plot \"-\" using 1:2" << endl; + plot_file << "# X Y" << endl; + CV_Assert(precision.size() == recall.size()); + for (size_t i = 0; i < precision.size(); ++i) + { + plot_file << " " << recall[i] << " " << precision[i] << endl; + } + plot_file << "end" << endl; + if (plot_type == CV_VOC_PLOT_SCREEN) + { + plot_file << "pause -1" << endl; + } + plot_file.close(); + } else { + string err_msg = "could not open plot file '" + output_file_std + "' for writing."; + CV_Error(Error::StsError,err_msg.c_str()); + } +} + +void VocData::readClassifierGroundTruth(const string& obj_class, const ObdDatasetType dataset, vector& images, vector& object_present) +{ + images.clear(); + + string gtFilename = m_class_imageset_path; + gtFilename.replace(gtFilename.find("%s"),2,obj_class); + if (dataset == CV_OBD_TRAIN) + { + gtFilename.replace(gtFilename.find("%s"),2,m_train_set); + } else { + gtFilename.replace(gtFilename.find("%s"),2,m_test_set); + } + + vector image_codes; + readClassifierGroundTruth(gtFilename, image_codes, object_present); + + convertImageCodesToObdImages(image_codes, images); +} + +void VocData::readClassifierResultsFile(const std:: string& input_file, vector& images, vector& scores) +{ + images.clear(); + + string input_file_std = checkFilenamePathsep(input_file); + + //if no directory is specified, by default search for the input file in the results directory +// if (input_file_std.find("/") == input_file_std.npos) +// { +// input_file_std = m_results_directory + input_file_std; +// } + + vector image_codes; + readClassifierResultsFile(input_file_std, image_codes, scores); + + convertImageCodesToObdImages(image_codes, images); +} + +void VocData::readDetectorResultsFile(const string& input_file, vector& images, vector >& scores, vector >& bounding_boxes) +{ + images.clear(); + + string input_file_std = checkFilenamePathsep(input_file); + + //if no directory is specified, by default search for the input file in the results directory +// if (input_file_std.find("/") == input_file_std.npos) +// { +// input_file_std = m_results_directory + input_file_std; +// } + + vector image_codes; + readDetectorResultsFile(input_file_std, image_codes, scores, bounding_boxes); + + convertImageCodesToObdImages(image_codes, images); +} + +const vector& VocData::getObjectClasses() +{ + return m_object_classes; +} + +//string VocData::getResultsDirectory() +//{ +// return m_results_directory; +//} + +//--------------------------------------------------------- +// Protected Functions ------------------------------------ +//--------------------------------------------------------- + +static string getVocName( const string& vocPath ) +{ + size_t found = vocPath.rfind( '/' ); + if( found == string::npos ) + { + found = vocPath.rfind( '\\' ); + if( found == string::npos ) + return vocPath; + } + return vocPath.substr(found + 1, vocPath.size() - found); +} + +void VocData::initVoc( const string& vocPath, const bool useTestDataset ) +{ + initVoc2007to2010( vocPath, useTestDataset ); +} + +//Initialize file paths and settings for the VOC 2010 dataset +//----------------------------------------------------------- +void VocData::initVoc2007to2010( const string& vocPath, const bool useTestDataset ) +{ + //check format of root directory and modify if necessary + + m_vocName = getVocName( vocPath ); + + CV_Assert( !m_vocName.compare("VOC2007") || !m_vocName.compare("VOC2008") || + !m_vocName.compare("VOC2009") || !m_vocName.compare("VOC2010") ); + + m_vocPath = checkFilenamePathsep( vocPath, true ); + + if (useTestDataset) + { + m_train_set = "trainval"; + m_test_set = "test"; + } else { + m_train_set = "train"; + m_test_set = "val"; + } + + // initialize main classification/detection challenge paths + m_annotation_path = m_vocPath + "/Annotations/%s.xml"; + m_image_path = m_vocPath + "/JPEGImages/%s.jpg"; + m_imageset_path = m_vocPath + "/ImageSets/Main/%s.txt"; + m_class_imageset_path = m_vocPath + "/ImageSets/Main/%s_%s.txt"; + + //define available object_classes for VOC2010 dataset + m_object_classes.push_back("aeroplane"); + m_object_classes.push_back("bicycle"); + m_object_classes.push_back("bird"); + m_object_classes.push_back("boat"); + m_object_classes.push_back("bottle"); + m_object_classes.push_back("bus"); + m_object_classes.push_back("car"); + m_object_classes.push_back("cat"); + m_object_classes.push_back("chair"); + m_object_classes.push_back("cow"); + m_object_classes.push_back("diningtable"); + m_object_classes.push_back("dog"); + m_object_classes.push_back("horse"); + m_object_classes.push_back("motorbike"); + m_object_classes.push_back("person"); + m_object_classes.push_back("pottedplant"); + m_object_classes.push_back("sheep"); + m_object_classes.push_back("sofa"); + m_object_classes.push_back("train"); + m_object_classes.push_back("tvmonitor"); + + m_min_overlap = 0.5; + + //up until VOC 2010, ap was calculated by sampling p-r curve, not taking complete curve + m_sampled_ap = ((m_vocName == "VOC2007") || (m_vocName == "VOC2008") || (m_vocName == "VOC2009")); +} + +//Read a VOC classification ground truth text file for a given object class and dataset +//------------------------------------------------------------------------------------- +//INPUTS: +// - filename The path of the text file to read +//OUTPUTS: +// - image_codes VOC image codes extracted from the GT file in the form 20XX_XXXXXX where the first four +// digits specify the year of the dataset, and the last group specifies a unique ID +// - object_present For each image in the 'image_codes' array, specifies whether the object class described +// in the loaded GT file is present or not +void VocData::readClassifierGroundTruth(const string& filename, vector& image_codes, vector& object_present) +{ + image_codes.clear(); + object_present.clear(); + + std::ifstream gtfile(filename.c_str()); + if (!gtfile.is_open()) + { + string err_msg = "could not open VOC ground truth textfile '" + filename + "'."; + CV_Error(Error::StsError,err_msg.c_str()); + } + + string line; + string image; + int obj_present = 0; + while (!gtfile.eof()) + { + std::getline(gtfile,line); + std::istringstream iss(line); + iss >> image >> obj_present; + if (!iss.fail()) + { + image_codes.push_back(image); + object_present.push_back(obj_present == 1); + } else { + if (!gtfile.eof()) CV_Error(Error::StsParseError,"error parsing VOC ground truth textfile."); + } + } + gtfile.close(); +} + +void VocData::readClassifierResultsFile(const string& input_file, vector& image_codes, vector& scores) +{ + //check if results file exists + std::ifstream result_file(input_file.c_str()); + if (result_file.is_open()) + { + string line; + string image; + float score; + //read in the results file + while (!result_file.eof()) + { + std::getline(result_file,line); + std::istringstream iss(line); + iss >> image >> score; + if (!iss.fail()) + { + image_codes.push_back(image); + scores.push_back(score); + } else { + if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC classifier results file."); + } + } + result_file.close(); + } else { + string err_msg = "could not open classifier results file '" + input_file + "' for reading."; + CV_Error(Error::StsError,err_msg.c_str()); + } +} + +void VocData::readDetectorResultsFile(const string& input_file, vector& image_codes, vector >& scores, vector >& bounding_boxes) +{ + image_codes.clear(); + scores.clear(); + bounding_boxes.clear(); + + //check if results file exists + std::ifstream result_file(input_file.c_str()); + if (result_file.is_open()) + { + string line; + string image; + Rect bounding_box; + float score; + //read in the results file + while (!result_file.eof()) + { + std::getline(result_file,line); + std::istringstream iss(line); + iss >> image >> score >> bounding_box.x >> bounding_box.y >> bounding_box.width >> bounding_box.height; + if (!iss.fail()) + { + //convert right and bottom positions to width and height + bounding_box.width -= bounding_box.x; + bounding_box.height -= bounding_box.y; + //convert to 0-indexing + bounding_box.x -= 1; + bounding_box.y -= 1; + //store in output vectors + /* first check if the current image code has been seen before */ + vector::iterator image_codes_it = std::find(image_codes.begin(),image_codes.end(),image); + if (image_codes_it == image_codes.end()) + { + image_codes.push_back(image); + vector score_vect(1); + score_vect[0] = score; + scores.push_back(score_vect); + vector bounding_box_vect(1); + bounding_box_vect[0] = bounding_box; + bounding_boxes.push_back(bounding_box_vect); + } else { + /* if the image index has been seen before, add the current object below it in the 2D arrays */ + int image_idx = (int)std::distance(image_codes.begin(),image_codes_it); + scores[image_idx].push_back(score); + bounding_boxes[image_idx].push_back(bounding_box); + } + } else { + if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC detector results file."); + } + } + result_file.close(); + } else { + string err_msg = "could not open detector results file '" + input_file + "' for reading."; + CV_Error(Error::StsError,err_msg.c_str()); + } +} + + +//Read a VOC annotation xml file for a given image +//------------------------------------------------ +//INPUTS: +// - filename The path of the xml file to read +//OUTPUTS: +// - objects Array of VocObject describing all object instances present in the given image +void VocData::extractVocObjects(const string filename, vector& objects, vector& object_data) +{ +#ifdef PR_DEBUG + int block = 1; + cout << "SAMPLE VOC OBJECT EXTRACTION for " << filename << ":" << endl; +#endif + objects.clear(); + object_data.clear(); + + string contents, object_contents, tag_contents; + + readFileToString(filename, contents); + + //keep on extracting 'object' blocks until no more can be found + if (extractXMLBlock(contents, "annotation", 0, contents) != -1) + { + int searchpos = 0; + searchpos = extractXMLBlock(contents, "object", searchpos, object_contents); + while (searchpos != -1) + { +#ifdef PR_DEBUG + cout << "SEARCHPOS:" << searchpos << endl; + cout << "start block " << block << " ---------" << endl; + cout << object_contents << endl; + cout << "end block " << block << " -----------" << endl; + ++block; +#endif + + ObdObject object; + VocObjectData object_d; + + //object class ------------- + + if (extractXMLBlock(object_contents, "name", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing tag in object definition of '" + filename + "'"); + object.object_class.swap(tag_contents); + + //object bounding box ------------- + + int xmax, xmin, ymax, ymin; + + if (extractXMLBlock(object_contents, "xmax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing tag in object definition of '" + filename + "'"); + xmax = stringToInteger(tag_contents); + + if (extractXMLBlock(object_contents, "xmin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing tag in object definition of '" + filename + "'"); + xmin = stringToInteger(tag_contents); + + if (extractXMLBlock(object_contents, "ymax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing tag in object definition of '" + filename + "'"); + ymax = stringToInteger(tag_contents); + + if (extractXMLBlock(object_contents, "ymin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing tag in object definition of '" + filename + "'"); + ymin = stringToInteger(tag_contents); + + object.boundingBox.x = xmin-1; //convert to 0-based indexing + object.boundingBox.width = xmax - xmin; + object.boundingBox.y = ymin-1; + object.boundingBox.height = ymax - ymin; + + CV_Assert(xmin != 0); + CV_Assert(xmax > xmin); + CV_Assert(ymin != 0); + CV_Assert(ymax > ymin); + + + //object tags ------------- + + if (extractXMLBlock(object_contents, "difficult", 0, tag_contents) != -1) + { + object_d.difficult = (tag_contents == "1"); + } else object_d.difficult = false; + if (extractXMLBlock(object_contents, "occluded", 0, tag_contents) != -1) + { + object_d.occluded = (tag_contents == "1"); + } else object_d.occluded = false; + if (extractXMLBlock(object_contents, "truncated", 0, tag_contents) != -1) + { + object_d.truncated = (tag_contents == "1"); + } else object_d.truncated = false; + if (extractXMLBlock(object_contents, "pose", 0, tag_contents) != -1) + { + if (tag_contents == "Frontal") object_d.pose = CV_VOC_POSE_FRONTAL; + if (tag_contents == "Rear") object_d.pose = CV_VOC_POSE_REAR; + if (tag_contents == "Left") object_d.pose = CV_VOC_POSE_LEFT; + if (tag_contents == "Right") object_d.pose = CV_VOC_POSE_RIGHT; + } + + //add to array of objects + objects.push_back(object); + object_data.push_back(object_d); + + //extract next 'object' block from file if it exists + searchpos = extractXMLBlock(contents, "object", searchpos, object_contents); + } + } +} + +//Converts an image identifier string in the format YYYY_XXXXXX to a single index integer of form XXXXXXYYYY +//where Y represents a year and returns the image path +//---------------------------------------------------------------------------------------------------------- +string VocData::getImagePath(const string& input_str) +{ + string path = m_image_path; + path.replace(path.find("%s"),2,input_str); + return path; +} + +//Tests two boundary boxes for overlap (using the intersection over union metric) and returns the overlap if the objects +//defined by the two bounding boxes are considered to be matched according to the criterion outlined in +//the VOC documentation [namely intersection/union > some threshold] otherwise returns -1.0 (no match) +//---------------------------------------------------------------------------------------------------------- +float VocData::testBoundingBoxesForOverlap(const Rect detection, const Rect ground_truth) +{ + int detection_x2 = detection.x + detection.width; + int detection_y2 = detection.y + detection.height; + int ground_truth_x2 = ground_truth.x + ground_truth.width; + int ground_truth_y2 = ground_truth.y + ground_truth.height; + //first calculate the boundaries of the intersection of the rectangles + int intersection_x = std::max(detection.x, ground_truth.x); //rightmost left + int intersection_y = std::max(detection.y, ground_truth.y); //bottommost top + int intersection_x2 = std::min(detection_x2, ground_truth_x2); //leftmost right + int intersection_y2 = std::min(detection_y2, ground_truth_y2); //topmost bottom + //then calculate the width and height of the intersection rect + int intersection_width = intersection_x2 - intersection_x + 1; + int intersection_height = intersection_y2 - intersection_y + 1; + //if there is no overlap then return false straight away + if ((intersection_width <= 0) || (intersection_height <= 0)) return -1.0; + //otherwise calculate the intersection + int intersection_area = intersection_width*intersection_height; + + //now calculate the union + int union_area = (detection.width+1)*(detection.height+1) + (ground_truth.width+1)*(ground_truth.height+1) - intersection_area; + + //calculate the intersection over union and use as threshold as per VOC documentation + float overlap = static_cast(intersection_area)/static_cast(union_area); + if (overlap > m_min_overlap) + { + return overlap; + } else { + return -1.0; + } +} + +//Extracts the object class and dataset from the filename of a VOC standard results text file, which takes +//the format 'comp_{cls/det}__.txt' +//---------------------------------------------------------------------------------------------------------- +void VocData::extractDataFromResultsFilename(const string& input_file, string& class_name, string& dataset_name) +{ + string input_file_std = checkFilenamePathsep(input_file); + + size_t fnamestart = input_file_std.rfind("/"); + size_t fnameend = input_file_std.rfind(".txt"); + + if ((fnamestart == input_file_std.npos) || (fnameend == input_file_std.npos)) + CV_Error(Error::StsError,"Could not extract filename of results file."); + + ++fnamestart; + if (fnamestart >= fnameend) + CV_Error(Error::StsError,"Could not extract filename of results file."); + + //extract dataset and class names, triggering exception if the filename format is not correct + string filename = input_file_std.substr(fnamestart, fnameend-fnamestart); + size_t datasetstart = filename.find("_"); + datasetstart = filename.find("_",datasetstart+1); + size_t classstart = filename.find("_",datasetstart+1); + //allow for appended index after a further '_' by discarding this part if it exists + size_t classend = filename.find("_",classstart+1); + if (classend == filename.npos) classend = filename.size(); + if ((datasetstart == filename.npos) || (classstart == filename.npos)) + CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp_{cls/det}__.txt'?"); + ++datasetstart; + ++classstart; + if (((datasetstart-classstart) < 1) || ((classend-datasetstart) < 1)) + CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp_{cls/det}__.txt'?"); + + dataset_name = filename.substr(datasetstart,classstart-datasetstart-1); + class_name = filename.substr(classstart,classend-classstart); +} + +bool VocData::getClassifierGroundTruthImage(const string& obj_class, const string& id) +{ + /* if the classifier ground truth data for all images of the current class has not been loaded yet, load it now */ + if (m_classifier_gt_all_ids.empty() || (m_classifier_gt_class != obj_class)) + { + m_classifier_gt_all_ids.clear(); + m_classifier_gt_all_present.clear(); + m_classifier_gt_class = obj_class; + for (int i=0; i<2; ++i) //run twice (once over test set and once over training set) + { + //generate the filename of the classification ground-truth textfile for the object class + string gtFilename = m_class_imageset_path; + gtFilename.replace(gtFilename.find("%s"),2,obj_class); + if (i == 0) + { + gtFilename.replace(gtFilename.find("%s"),2,m_train_set); + } else { + gtFilename.replace(gtFilename.find("%s"),2,m_test_set); + } + + //parse the ground truth file, storing in two separate vectors + //for the image code and the ground truth value + vector image_codes; + vector object_present; + readClassifierGroundTruth(gtFilename, image_codes, object_present); + + m_classifier_gt_all_ids.insert(m_classifier_gt_all_ids.end(),image_codes.begin(),image_codes.end()); + m_classifier_gt_all_present.insert(m_classifier_gt_all_present.end(),object_present.begin(),object_present.end()); + + CV_Assert(m_classifier_gt_all_ids.size() == m_classifier_gt_all_present.size()); + } + } + + + //search for the image code + vector::iterator it = find (m_classifier_gt_all_ids.begin(), m_classifier_gt_all_ids.end(), id); + if (it != m_classifier_gt_all_ids.end()) + { + //image found, so return corresponding ground truth + return m_classifier_gt_all_present[std::distance(m_classifier_gt_all_ids.begin(),it)] != 0; + } + string err_msg = "could not find classifier ground truth for image '" + id + "' and class '" + obj_class + "'"; + CV_Error(Error::StsError,err_msg.c_str()); +} + +//------------------------------------------------------------------- +// Protected Functions (utility) ------------------------------------ +//------------------------------------------------------------------- + +//returns a vector containing indexes of the input vector in sorted ascending/descending order +void VocData::getSortOrder(const vector& values, vector& order, bool descending) +{ + /* 1. store sorting order in 'order_pair' */ + vector::const_iterator> > order_pair(values.size()); + + size_t n = 0; + for (vector::const_iterator it = values.begin(); it != values.end(); ++it, ++n) + order_pair[n] = make_pair(n, it); + + std::sort(order_pair.begin(),order_pair.end(),orderingSorter()); + if (descending == false) std::reverse(order_pair.begin(),order_pair.end()); + + vector(order_pair.size()).swap(order); + for (size_t i = 0; i < order_pair.size(); ++i) + { + order[i] = order_pair[i].first; + } +} + +void VocData::readFileToString(const string filename, string& file_contents) +{ + std::ifstream ifs(filename.c_str()); + if (!ifs.is_open()) CV_Error(Error::StsError,"could not open text file"); + + stringstream oss; + oss << ifs.rdbuf(); + + file_contents = oss.str(); +} + +int VocData::stringToInteger(const string input_str) +{ + int result = 0; + + stringstream ss(input_str); + if ((ss >> result).fail()) + { + CV_Error(Error::StsBadArg,"could not perform string to integer conversion"); + } + return result; +} + +string VocData::integerToString(const int input_int) +{ + string result; + + stringstream ss; + if ((ss << input_int).fail()) + { + CV_Error(Error::StsBadArg,"could not perform integer to string conversion"); + } + result = ss.str(); + return result; +} + +string VocData::checkFilenamePathsep( const string filename, bool add_trailing_slash ) +{ + string filename_new = filename; + + size_t pos = filename_new.find("\\\\"); + while (pos != filename_new.npos) + { + filename_new.replace(pos,2,"/"); + pos = filename_new.find("\\\\", pos); + } + pos = filename_new.find("\\"); + while (pos != filename_new.npos) + { + filename_new.replace(pos,1,"/"); + pos = filename_new.find("\\", pos); + } + if (add_trailing_slash) + { + //add training slash if this is missing + if (filename_new.rfind("/") != filename_new.length()-1) filename_new += "/"; + } + + return filename_new; +} + +void VocData::convertImageCodesToObdImages(const vector& image_codes, vector& images) +{ + images.clear(); + images.reserve(image_codes.size()); + + string path; + //transfer to output arrays + for (size_t i = 0; i < image_codes.size(); ++i) + { + //generate image path and indices from extracted string code + path = getImagePath(image_codes[i]); + images.push_back(ObdImage(image_codes[i], path)); + } +} + +//Extract text from within a given tag from an XML file +//----------------------------------------------------- +//INPUTS: +// - src XML source file +// - tag XML tag delimiting block to extract +// - searchpos position within src at which to start search +//OUTPUTS: +// - tag_contents text extracted between and tags +//RETURN VALUE: +// - the position of the final character extracted in tag_contents within src +// (can be used to call extractXMLBlock recursively to extract multiple blocks) +// returns -1 if the tag could not be found +int VocData::extractXMLBlock(const string src, const string tag, const int searchpos, string& tag_contents) +{ + size_t startpos, next_startpos, endpos; + int embed_count = 1; + + //find position of opening tag + startpos = src.find("<" + tag + ">", searchpos); + if (startpos == string::npos) return -1; + + //initialize endpos - + // start searching for end tag anywhere after opening tag + endpos = startpos; + + //find position of next opening tag + next_startpos = src.find("<" + tag + ">", startpos+1); + + //match opening tags with closing tags, and only + //accept final closing tag of same level as original + //opening tag + while (embed_count > 0) + { + endpos = src.find("", endpos+1); + if (endpos == string::npos) return -1; + + //the next code is only executed if there are embedded tags with the same name + if (next_startpos != string::npos) + { + while (next_startpos", next_startpos+1); + if (next_startpos == string::npos) break; + } + } + //passing end tag so decrement nesting level + --embed_count; + } + + //finally, extract the tag region + startpos += tag.length() + 2; + if (startpos > src.length()) return -1; + if (endpos > src.length()) return -1; + tag_contents = src.substr(startpos,endpos-startpos); + return static_cast(endpos); +} + +/****************************************************************************************\ +* Sample on image classification * +\****************************************************************************************/ +// +// This part of the code was a little refactor +// +struct DDMParams +{ + DDMParams() : detectorType("SURF"), descriptorType("SURF"), matcherType("BruteForce") {} + DDMParams( const string _detectorType, const string _descriptorType, const string& _matcherType ) : + detectorType(_detectorType), descriptorType(_descriptorType), matcherType(_matcherType){} + void read( const FileNode& fn ) + { + fn["detectorType"] >> detectorType; + fn["descriptorType"] >> descriptorType; + fn["matcherType"] >> matcherType; + } + void write( FileStorage& fs ) const + { + fs << "detectorType" << detectorType; + fs << "descriptorType" << descriptorType; + fs << "matcherType" << matcherType; + } + void print() const + { + cout << "detectorType: " << detectorType << endl; + cout << "descriptorType: " << descriptorType << endl; + cout << "matcherType: " << matcherType << endl; + } + + string detectorType; + string descriptorType; + string matcherType; +}; + +struct VocabTrainParams +{ + VocabTrainParams() : trainObjClass("chair"), vocabSize(1000), memoryUse(200), descProportion(0.3f) {} + VocabTrainParams( const string _trainObjClass, size_t _vocabSize, size_t _memoryUse, float _descProportion ) : + trainObjClass(_trainObjClass), vocabSize((int)_vocabSize), memoryUse((int)_memoryUse), descProportion(_descProportion) {} + void read( const FileNode& fn ) + { + fn["trainObjClass"] >> trainObjClass; + fn["vocabSize"] >> vocabSize; + fn["memoryUse"] >> memoryUse; + fn["descProportion"] >> descProportion; + } + void write( FileStorage& fs ) const + { + fs << "trainObjClass" << trainObjClass; + fs << "vocabSize" << vocabSize; + fs << "memoryUse" << memoryUse; + fs << "descProportion" << descProportion; + } + void print() const + { + cout << "trainObjClass: " << trainObjClass << endl; + cout << "vocabSize: " << vocabSize << endl; + cout << "memoryUse: " << memoryUse << endl; + cout << "descProportion: " << descProportion << endl; + } + + + string trainObjClass; // Object class used for training visual vocabulary. + // It shouldn't matter which object class is specified here - visual vocab will still be the same. + int vocabSize; //number of visual words in vocabulary to train + int memoryUse; // Memory to preallocate (in MB) when training vocab. + // Change this depending on the size of the dataset/available memory. + float descProportion; // Specifies the number of descriptors to use from each image as a proportion of the total num descs. +}; + +struct SVMTrainParamsExt +{ + SVMTrainParamsExt() : descPercent(0.5f), targetRatio(0.4f), balanceClasses(true) {} + SVMTrainParamsExt( float _descPercent, float _targetRatio, bool _balanceClasses ) : + descPercent(_descPercent), targetRatio(_targetRatio), balanceClasses(_balanceClasses) {} + void read( const FileNode& fn ) + { + fn["descPercent"] >> descPercent; + fn["targetRatio"] >> targetRatio; + fn["balanceClasses"] >> balanceClasses; + } + void write( FileStorage& fs ) const + { + fs << "descPercent" << descPercent; + fs << "targetRatio" << targetRatio; + fs << "balanceClasses" << balanceClasses; + } + void print() const + { + cout << "descPercent: " << descPercent << endl; + cout << "targetRatio: " << targetRatio << endl; + cout << "balanceClasses: " << balanceClasses << endl; + } + + float descPercent; // Percentage of extracted descriptors to use for training. + float targetRatio; // Try to get this ratio of positive to negative samples (minimum). + bool balanceClasses; // Balance class weights by number of samples in each (if true cSvmTrainTargetRatio is ignored). +}; + +static void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams, VocabTrainParams& vocabTrainParams, SVMTrainParamsExt& svmTrainParamsExt ) +{ + fn["vocName"] >> vocName; + + FileNode currFn = fn; + + currFn = fn["ddmParams"]; + ddmParams.read( currFn ); + + currFn = fn["vocabTrainParams"]; + vocabTrainParams.read( currFn ); + + currFn = fn["svmTrainParamsExt"]; + svmTrainParamsExt.read( currFn ); +} + +static void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, const SVMTrainParamsExt& svmTrainParamsExt ) +{ + fs << "vocName" << vocName; + + fs << "ddmParams" << "{"; + ddmParams.write(fs); + fs << "}"; + + fs << "vocabTrainParams" << "{"; + vocabTrainParams.write(fs); + fs << "}"; + + fs << "svmTrainParamsExt" << "{"; + svmTrainParamsExt.write(fs); + fs << "}"; +} + +static void printUsedParams( const string& vocPath, const string& resDir, + const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, + const SVMTrainParamsExt& svmTrainParamsExt ) +{ + cout << "CURRENT CONFIGURATION" << endl; + cout << "----------------------------------------------------------------" << endl; + cout << "vocPath: " << vocPath << endl; + cout << "resDir: " << resDir << endl; + cout << endl; ddmParams.print(); + cout << endl; vocabTrainParams.print(); + cout << endl; svmTrainParamsExt.print(); + cout << "----------------------------------------------------------------" << endl << endl; +} + +static bool readVocabulary( const string& filename, Mat& vocabulary ) +{ + cout << "Reading vocabulary..."; + FileStorage fs( filename, FileStorage::READ ); + if( fs.isOpened() ) + { + fs["vocabulary"] >> vocabulary; + cout << "done" << endl; + return true; + } + return false; +} + +static bool writeVocabulary( const string& filename, const Mat& vocabulary ) +{ + cout << "Saving vocabulary..." << endl; + FileStorage fs( filename, FileStorage::WRITE ); + if( fs.isOpened() ) + { + fs << "vocabulary" << vocabulary; + return true; + } + return false; +} + +static Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainParams& trainParams, + const Ptr& fdetector, const Ptr& dextractor ) +{ + Mat vocabulary; + if( !readVocabulary( filename, vocabulary) ) + { + CV_Assert( dextractor->descriptorType() == CV_32FC1 ); + const int elemSize = CV_ELEM_SIZE(dextractor->descriptorType()); + const int descByteSize = dextractor->descriptorSize() * elemSize; + const int bytesInMB = 1048576; + const int maxDescCount = (trainParams.memoryUse * bytesInMB) / descByteSize; // Total number of descs to use for training. + + cout << "Extracting VOC data..." << endl; + vector images; + vector objectPresent; + vocData.getClassImages( trainParams.trainObjClass, CV_OBD_TRAIN, images, objectPresent ); + + cout << "Computing descriptors..." << endl; + RNG& rng = theRNG(); + TermCriteria terminate_criterion; + terminate_criterion.epsilon = FLT_EPSILON; + BOWKMeansTrainer bowTrainer( trainParams.vocabSize, terminate_criterion, 3, KMEANS_PP_CENTERS ); + + while( images.size() > 0 ) + { + if( bowTrainer.descriptorsCount() > maxDescCount ) + { +#ifdef DEBUG_DESC_PROGRESS + cout << "Breaking due to full memory ( descriptors count = " << bowTrainer.descriptorsCount() + << "; descriptor size in bytes = " << descByteSize << "; all used memory = " + << bowTrainer.descriptorsCount()*descByteSize << endl; +#endif + break; + } + + // Randomly pick an image from the dataset which hasn't yet been seen + // and compute the descriptors from that image. + int randImgIdx = rng( (unsigned)images.size() ); + Mat colorImage = imread( images[randImgIdx].path ); + vector imageKeypoints; + fdetector->detect( colorImage, imageKeypoints ); + Mat imageDescriptors; + dextractor->compute( colorImage, imageKeypoints, imageDescriptors ); + + //check that there were descriptors calculated for the current image + if( !imageDescriptors.empty() ) + { + int descCount = imageDescriptors.rows; + // Extract trainParams.descProportion descriptors from the image, breaking if the 'allDescriptors' matrix becomes full + int descsToExtract = static_cast(trainParams.descProportion * static_cast(descCount)); + // Fill mask of used descriptors + vector usedMask( descCount, false ); + fill( usedMask.begin(), usedMask.begin() + descsToExtract, true ); + for( int i = 0; i < descCount; i++ ) + { + int i1 = rng(descCount), i2 = rng(descCount); + char tmp = usedMask[i1]; usedMask[i1] = usedMask[i2]; usedMask[i2] = tmp; + } + + for( int i = 0; i < descCount; i++ ) + { + if( usedMask[i] && bowTrainer.descriptorsCount() < maxDescCount ) + bowTrainer.add( imageDescriptors.row(i) ); + } + } + +#ifdef DEBUG_DESC_PROGRESS + cout << images.size() << " images left, " << images[randImgIdx].id << " processed - " + <(bowTrainer.descriptorsCount())/static_cast(maxDescCount))*100.0) + << " % memory used" << ( imageDescriptors.empty() ? " -> no descriptors extracted, skipping" : "") << endl; +#endif + + // Delete the current element from images so it is not added again + images.erase( images.begin() + randImgIdx ); + } + + cout << "Maximum allowed descriptor count: " << maxDescCount << ", Actual descriptor count: " << bowTrainer.descriptorsCount() << endl; + + cout << "Training vocabulary..." << endl; + vocabulary = bowTrainer.cluster(); + + if( !writeVocabulary(filename, vocabulary) ) + { + cout << "Error: file " << filename << " can not be opened to write" << endl; + exit(-1); + } + } + return vocabulary; +} + +static bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor ) +{ + FileStorage fs( file, FileStorage::READ ); + if( fs.isOpened() ) + { + fs["imageDescriptor"] >> bowImageDescriptor; + return true; + } + return false; +} + +static bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor ) +{ + FileStorage fs( file, FileStorage::WRITE ); + if( fs.isOpened() ) + { + fs << "imageDescriptor" << bowImageDescriptor; + return true; + } + return false; +} + +// Load in the bag of words vectors for a set of images, from file if possible +static void calculateImageDescriptors( const vector& images, vector& imageDescriptors, + Ptr& bowExtractor, const Ptr& fdetector, + const string& resPath ) +{ + CV_Assert( !bowExtractor->getVocabulary().empty() ); + imageDescriptors.resize( images.size() ); + + for( size_t i = 0; i < images.size(); i++ ) + { + string filename = resPath + bowImageDescriptorsDir + "/" + images[i].id + ".xml.gz"; + if( readBowImageDescriptor( filename, imageDescriptors[i] ) ) + { +#ifdef DEBUG_DESC_PROGRESS + cout << "Loaded bag of word vector for image " << i+1 << " of " << images.size() << " (" << images[i].id << ")" << endl; +#endif + } + else + { + Mat colorImage = imread( images[i].path ); +#ifdef DEBUG_DESC_PROGRESS + cout << "Computing descriptors for image " << i+1 << " of " << images.size() << " (" << images[i].id << ")" << flush; +#endif + vector keypoints; + fdetector->detect( colorImage, keypoints ); +#ifdef DEBUG_DESC_PROGRESS + cout << " + generating BoW vector" << std::flush; +#endif + bowExtractor->compute( colorImage, keypoints, imageDescriptors[i] ); +#ifdef DEBUG_DESC_PROGRESS + cout << " ...DONE " << static_cast(static_cast(i+1)/static_cast(images.size())*100.0) + << " % complete" << endl; +#endif + if( !imageDescriptors[i].empty() ) + { + if( !writeBowImageDescriptor( filename, imageDescriptors[i] ) ) + { + cout << "Error: file " << filename << "can not be opened to write bow image descriptor" << endl; + exit(-1); + } + } + } + } +} + +static void removeEmptyBowImageDescriptors( vector& images, vector& bowImageDescriptors, + vector& objectPresent ) +{ + CV_Assert( !images.empty() ); + for( int i = (int)images.size() - 1; i >= 0; i-- ) + { + bool res = bowImageDescriptors[i].empty(); + if( res ) + { + cout << "Removing image " << images[i].id << " due to no descriptors..." << endl; + images.erase( images.begin() + i ); + bowImageDescriptors.erase( bowImageDescriptors.begin() + i ); + objectPresent.erase( objectPresent.begin() + i ); + } + } +} + +static void removeBowImageDescriptorsByCount( vector& images, vector bowImageDescriptors, vector objectPresent, + const SVMTrainParamsExt& svmParamsExt, int descsToDelete ) +{ + RNG& rng = theRNG(); + int pos_ex = (int)std::count( objectPresent.begin(), objectPresent.end(), (char)1 ); + int neg_ex = (int)std::count( objectPresent.begin(), objectPresent.end(), (char)0 ); + + while( descsToDelete != 0 ) + { + int randIdx = rng((unsigned)images.size()); + + // Prefer positive training examples according to svmParamsExt.targetRatio if required + if( objectPresent[randIdx] ) + { + if( (static_cast(pos_ex)/static_cast(neg_ex+pos_ex) < svmParamsExt.targetRatio) && + (neg_ex > 0) && (svmParamsExt.balanceClasses == false) ) + { continue; } + else + { pos_ex--; } + } + else + { neg_ex--; } + + images.erase( images.begin() + randIdx ); + bowImageDescriptors.erase( bowImageDescriptors.begin() + randIdx ); + objectPresent.erase( objectPresent.begin() + randIdx ); + + descsToDelete--; + } + CV_Assert( bowImageDescriptors.size() == objectPresent.size() ); +} + +static void setSVMParams( Ptr & svm, const Mat& responses, bool balanceClasses ) +{ + int pos_ex = countNonZero(responses == 1); + int neg_ex = countNonZero(responses == -1); + cout << pos_ex << " positive training samples; " << neg_ex << " negative training samples" << endl; + + svm->setType(SVM::C_SVC); + svm->setKernel(SVM::RBF); + if( balanceClasses ) + { + Mat class_wts( 2, 1, CV_32FC1 ); + // The first training sample determines the '+1' class internally, even if it is negative, + // so store whether this is the case so that the class weights can be reversed accordingly. + bool reversed_classes = (responses.at(0) < 0.f); + if( reversed_classes == false ) + { + class_wts.at(0) = static_cast(pos_ex)/static_cast(pos_ex+neg_ex); // weighting for costs of positive class + 1 (i.e. cost of false positive - larger gives greater cost) + class_wts.at(1) = static_cast(neg_ex)/static_cast(pos_ex+neg_ex); // weighting for costs of negative class - 1 (i.e. cost of false negative) + } + else + { + class_wts.at(0) = static_cast(neg_ex)/static_cast(pos_ex+neg_ex); + class_wts.at(1) = static_cast(pos_ex)/static_cast(pos_ex+neg_ex); + } + svm->setClassWeights(class_wts); + } +} + +static void setSVMTrainAutoParams( ParamGrid& c_grid, ParamGrid& gamma_grid, + ParamGrid& p_grid, ParamGrid& nu_grid, + ParamGrid& coef_grid, ParamGrid& degree_grid ) +{ + c_grid = SVM::getDefaultGrid(SVM::C); + + gamma_grid = SVM::getDefaultGrid(SVM::GAMMA); + + p_grid = SVM::getDefaultGrid(SVM::P); + p_grid.logStep = 0; + + nu_grid = SVM::getDefaultGrid(SVM::NU); + nu_grid.logStep = 0; + + coef_grid = SVM::getDefaultGrid(SVM::COEF); + coef_grid.logStep = 0; + + degree_grid = SVM::getDefaultGrid(SVM::DEGREE); + degree_grid.logStep = 0; +} + +static Ptr trainSVMClassifier( const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData, + Ptr& bowExtractor, const Ptr& fdetector, + const string& resPath ) +{ + /* first check if a previously trained svm for the current class has been saved to file */ + string svmFilename = resPath + svmsDir + "/" + objClassName + ".xml.gz"; + Ptr svm; + + FileStorage fs( svmFilename, FileStorage::READ); + if( fs.isOpened() ) + { + cout << "*** LOADING SVM CLASSIFIER FOR CLASS " << objClassName << " ***" << endl; + svm = StatModel::load( svmFilename ); + } + else + { + cout << "*** TRAINING CLASSIFIER FOR CLASS " << objClassName << " ***" << endl; + cout << "CALCULATING BOW VECTORS FOR TRAINING SET OF " << objClassName << "..." << endl; + + // Get classification ground truth for images in the training set + vector images; + vector bowImageDescriptors; + vector objectPresent; + vocData.getClassImages( objClassName, CV_OBD_TRAIN, images, objectPresent ); + + // Compute the bag of words vector for each image in the training set. + calculateImageDescriptors( images, bowImageDescriptors, bowExtractor, fdetector, resPath ); + + // Remove any images for which descriptors could not be calculated + removeEmptyBowImageDescriptors( images, bowImageDescriptors, objectPresent ); + + CV_Assert( svmParamsExt.descPercent > 0.f && svmParamsExt.descPercent <= 1.f ); + if( svmParamsExt.descPercent < 1.f ) + { + int descsToDelete = static_cast(static_cast(images.size())*(1.0-svmParamsExt.descPercent)); + + cout << "Using " << (images.size() - descsToDelete) << " of " << images.size() << + " descriptors for training (" << svmParamsExt.descPercent*100.0 << " %)" << endl; + removeBowImageDescriptorsByCount( images, bowImageDescriptors, objectPresent, svmParamsExt, descsToDelete ); + } + + // Prepare the input matrices for SVM training. + Mat trainData( (int)images.size(), bowExtractor->getVocabulary().rows, CV_32FC1 ); + Mat responses( (int)images.size(), 1, CV_32SC1 ); + + // Transfer bag of words vectors and responses across to the training data matrices + for( size_t imageIdx = 0; imageIdx < images.size(); imageIdx++ ) + { + // Transfer image descriptor (bag of words vector) to training data matrix + Mat submat = trainData.row((int)imageIdx); + if( bowImageDescriptors[imageIdx].cols != bowExtractor->descriptorSize() ) + { + cout << "Error: computed bow image descriptor size " << bowImageDescriptors[imageIdx].cols + << " differs from vocabulary size" << bowExtractor->getVocabulary().cols << endl; + exit(-1); + } + bowImageDescriptors[imageIdx].copyTo( submat ); + + // Set response value + responses.at((int)imageIdx) = objectPresent[imageIdx] ? 1 : -1; + } + + cout << "TRAINING SVM FOR CLASS ..." << objClassName << "..." << endl; + svm = SVM::create(); + setSVMParams( svm, responses, svmParamsExt.balanceClasses ); + ParamGrid c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid; + setSVMTrainAutoParams( c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ); + + svm->trainAuto(TrainData::create(trainData, ROW_SAMPLE, responses), 10, + c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid); + cout << "SVM TRAINING FOR CLASS " << objClassName << " COMPLETED" << endl; + + svm->save( svmFilename ); + cout << "SAVED CLASSIFIER TO FILE" << endl; + } + return svm; +} + +static void computeConfidences( const Ptr& svm, const string& objClassName, VocData& vocData, + Ptr& bowExtractor, const Ptr& fdetector, + const string& resPath ) +{ + cout << "*** CALCULATING CONFIDENCES FOR CLASS " << objClassName << " ***" << endl; + cout << "CALCULATING BOW VECTORS FOR TEST SET OF " << objClassName << "..." << endl; + // Get classification ground truth for images in the test set + vector images; + vector bowImageDescriptors; + vector objectPresent; + vocData.getClassImages( objClassName, CV_OBD_TEST, images, objectPresent ); + + // Compute the bag of words vector for each image in the test set + calculateImageDescriptors( images, bowImageDescriptors, bowExtractor, fdetector, resPath ); + // Remove any images for which descriptors could not be calculated + removeEmptyBowImageDescriptors( images, bowImageDescriptors, objectPresent); + + // Use the bag of words vectors to calculate classifier output for each image in test set + cout << "CALCULATING CONFIDENCE SCORES FOR CLASS " << objClassName << "..." << endl; + vector confidences( images.size() ); + float signMul = 1.f; + for( size_t imageIdx = 0; imageIdx < images.size(); imageIdx++ ) + { + if( imageIdx == 0 ) + { + // In the first iteration, determine the sign of the positive class + float classVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), 0 ); + float scoreVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT ); + signMul = (classVal < 0) == (scoreVal < 0) ? 1.f : -1.f; + } + // svm output of decision function + confidences[imageIdx] = signMul * svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT ); + } + + cout << "WRITING QUERY RESULTS TO VOC RESULTS FILE FOR CLASS " << objClassName << "..." << endl; + vocData.writeClassifierResultsFile( resPath + plotsDir, objClassName, CV_OBD_TEST, images, confidences, 1, true ); + + cout << "DONE - " << objClassName << endl; + cout << "---------------------------------------------------------------" << endl; +} + +static void computeGnuPlotOutput( const string& resPath, const string& objClassName, VocData& vocData ) +{ + vector precision, recall; + float ap; + + const string resultFile = vocData.getResultsFilename( objClassName, CV_VOC_TASK_CLASSIFICATION, CV_OBD_TEST); + const string plotFile = resultFile.substr(0, resultFile.size()-4) + ".plt"; + + cout << "Calculating precision recall curve for class '" < createByName(const String& name) +{ + if( name == "SIFT" ) + return SIFT::create(); + if( name == "SURF" ) + return SURF::create(); + if( name == "ORB" ) + return ORB::create(); + if( name == "BRISK" ) + return BRISK::create(); + if( name == "KAZE" ) + return KAZE::create(); + if( name == "AKAZE" ) + return AKAZE::create(); + return Ptr(); +} + +int main(int argc, char** argv) +{ + if( argc != 3 && argc != 6 ) + { + help(argv); + return -1; + } + + const string vocPath = argv[1], resPath = argv[2]; + + // Read or set default parameters + string vocName; + DDMParams ddmParams; + VocabTrainParams vocabTrainParams; + SVMTrainParamsExt svmTrainParamsExt; + + makeUsedDirs( resPath ); + + FileStorage paramsFS( resPath + "/" + paramsFile, FileStorage::READ ); + if( paramsFS.isOpened() ) + { + readUsedParams( paramsFS.root(), vocName, ddmParams, vocabTrainParams, svmTrainParamsExt ); + CV_Assert( vocName == getVocName(vocPath) ); + } + else + { + vocName = getVocName(vocPath); + if( argc!= 6 ) + { + cout << "Feature detector, descriptor extractor, descriptor matcher must be set" << endl; + return -1; + } + ddmParams = DDMParams( argv[3], argv[4], argv[5] ); // from command line + // vocabTrainParams and svmTrainParamsExt is set by defaults + paramsFS.open( resPath + "/" + paramsFile, FileStorage::WRITE ); + if( paramsFS.isOpened() ) + { + writeUsedParams( paramsFS, vocName, ddmParams, vocabTrainParams, svmTrainParamsExt ); + paramsFS.release(); + } + else + { + cout << "File " << (resPath + "/" + paramsFile) << "can not be opened to write" << endl; + return -1; + } + } + + // Create detector, descriptor, matcher. + if( ddmParams.detectorType != ddmParams.descriptorType ) + { + cout << "detector and descriptor should be the same\n"; + return -1; + } + Ptr featureDetector = createByName( ddmParams.detectorType ); + Ptr descExtractor = featureDetector; + Ptr bowExtractor; + if( !featureDetector || !descExtractor ) + { + cout << "featureDetector or descExtractor was not created" << endl; + return -1; + } + { + Ptr descMatcher = DescriptorMatcher::create( ddmParams.matcherType ); + if( !featureDetector || !descExtractor || !descMatcher ) + { + cout << "descMatcher was not created" << endl; + return -1; + } + bowExtractor = makePtr( descExtractor, descMatcher ); + } + + // Print configuration to screen + printUsedParams( vocPath, resPath, ddmParams, vocabTrainParams, svmTrainParamsExt ); + // Create object to work with VOC + VocData vocData( vocPath, false ); + + // 1. Train visual word vocabulary if a pre-calculated vocabulary file doesn't already exist from previous run + Mat vocabulary = trainVocabulary( resPath + "/" + vocabularyFile, vocData, vocabTrainParams, + featureDetector, descExtractor ); + bowExtractor->setVocabulary( vocabulary ); + + // 2. Train a classifier and run a sample query for each object class + const vector& objClasses = vocData.getObjectClasses(); // object class list + for( size_t classIdx = 0; classIdx < objClasses.size(); ++classIdx ) + { + // Train a classifier on train dataset + Ptr svm = trainSVMClassifier( svmTrainParamsExt, objClasses[classIdx], vocData, + bowExtractor, featureDetector, resPath ); + + // Now use the classifier over all images on the test dataset and rank according to score order + // also calculating precision-recall etc. + computeConfidences( svm, objClasses[classIdx], vocData, + bowExtractor, featureDetector, resPath ); + // Calculate precision/recall/ap and use GNUPlot to output to a pdf file + computeGnuPlotOutput( resPath, objClasses[classIdx], vocData ); + } + return 0; +} + +#else + +int main() +{ + std::cerr << "OpenCV was built without ml module" << std::endl; + return 0; +} + +#endif // HAVE_OPENCV_ML diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/export-boostdesc.py b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/export-boostdesc.py new file mode 100644 index 00000000..0d89c812 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/export-boostdesc.py @@ -0,0 +1,293 @@ +#!/usr/bin/python + +""" + +/********************************************************************* + * Software License Agreement (BSD License) + * + * Copyright (c) 2016 + * + * Balint Cristian + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + *********************************************************************/ + +/* export-boostdesc.py */ +/* Export C headers from binary data */ +// [http://infoscience.epfl.ch/record/186246/files/boostDesc_1.0.tar.gz] + +""" + +import sys +import struct + + + +def float_to_hex(f): + return struct.unpack( ' " % sys.argv[0] ) + sys.exit(0) + + + if ( ( sys.argv[1] != "BGM" ) and + ( sys.argv[1] != "LBGM" ) and + ( sys.argv[1] != "BINBOOST" ) ): + print( "Invalid type [%s]" % sys.argv[1] ) + sys.exit(0) + + # enum literals + Assign = [ "ASSIGN_HARD", + "ASSIGN_BILINEAR", + "ASSIGN_SOFT", + "ASSIGN_HARD_MAGN", + "ASSIGN_SOFT_MAGN" ] + + # open binary data file + f = open( sys.argv[2], 'rb' ) + + # header + print "/*" + print " *" + print " * Header exported from binary." + print " * [%s %s %s]" % ( sys.argv[0], sys.argv[1], sys.argv[2] ) + print " *" + print " */" + + # ini + nDim = 1; + nWLs = 0; + + # dimensionality (where is the case) + if ( ( sys.argv[1] == "LBGM" ) or + ( sys.argv[1] == "BINBOOST" ) ): + nDim = struct.unpack( ' +#include +#include +#include +#include +#include +#include +#include + +using namespace cv; +using namespace cv::xfeatures2d; + +//////////////////////////////////////////////////// +// This program demonstrates the GMS matching strategy. +int main(int argc, char* argv[]) +{ + const char* keys = + "{ h help | | print help message }" + "{ l left | | specify left (reference) image }" + "{ r right | | specify right (query) image }" + "{ camera | 0 | specify the camera device number }" + "{ nfeatures | 10000 | specify the maximum number of ORB features }" + "{ fastThreshold | 20 | specify the FAST threshold }" + "{ drawSimple | true | do not draw not matched keypoints }" + "{ withRotation | false | take rotation into account }" + "{ withScale | false | take scale into account }"; + + CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) + { + std::cout << "Usage: gms_matcher [options]" << std::endl; + std::cout << "Available options:" << std::endl; + cmd.printMessage(); + return EXIT_SUCCESS; + } + + Ptr orb = ORB::create(cmd.get("nfeatures")); + orb.dynamicCast()->setFastThreshold(cmd.get("fastThreshold")); + Ptr matcher = DescriptorMatcher::create("BruteForce-Hamming"); + + if (!cmd.get("left").empty() && !cmd.get("right").empty()) + { + Mat imgL = imread(cmd.get("left")); + Mat imgR = imread(cmd.get("right")); + + std::vector kpRef, kpCur; + Mat descRef, descCur; + orb->detectAndCompute(imgL, noArray(), kpRef, descRef); + orb->detectAndCompute(imgR, noArray(), kpCur, descCur); + + std::vector matchesAll, matchesGMS; + matcher->match(descCur, descRef, matchesAll); + + matchGMS(imgR.size(), imgL.size(), kpCur, kpRef, matchesAll, matchesGMS, cmd.get("withRotation"), cmd.get("withScale")); + std::cout << "matchesGMS: " << matchesGMS.size() << std::endl; + + Mat frameMatches; + if (cmd.get("drawSimple")) + drawMatches(imgR, kpCur, imgL, kpRef, matchesGMS, frameMatches, Scalar::all(-1), Scalar::all(-1), + std::vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); + else + drawMatches(imgR, kpCur, imgL, kpRef, matchesGMS, frameMatches); + imshow("Matches GMS", frameMatches); + waitKey(); + } + else + { + std::vector kpRef; + Mat descRef; + + VideoCapture capture(cmd.get("camera")); + //Camera warm-up + for (int i = 0; i < 10; i++) + { + Mat frame; + capture >> frame; + } + + Mat frameRef; + for (;;) + { + Mat frame; + capture >> frame; + + if (frameRef.empty()) + { + frame.copyTo(frameRef); + orb->detectAndCompute(frameRef, noArray(), kpRef, descRef); + } + + TickMeter tm; + tm.start(); + std::vector kp; + Mat desc; + orb->detectAndCompute(frame, noArray(), kp, desc); + tm.stop(); + double t_orb = tm.getTimeMilli(); + + tm.reset(); + tm.start(); + std::vector matchesAll, matchesGMS; + matcher->match(desc, descRef, matchesAll); + tm.stop(); + double t_match = tm.getTimeMilli(); + + matchGMS(frame.size(), frameRef.size(), kp, kpRef, matchesAll, matchesGMS, cmd.get("withRotation"), cmd.get("withScale")); + tm.stop(); + Mat frameMatches; + if (cmd.get("drawSimple")) + drawMatches(frame, kp, frameRef, kpRef, matchesGMS, frameMatches, Scalar::all(-1), Scalar::all(-1), + std::vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); + else + drawMatches(frame, kp, frameRef, kpRef, matchesGMS, frameMatches); + + String label = format("ORB: %.2f ms", t_orb); + putText(frameMatches, label, Point(20, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,255)); + label = format("Matching: %.2f ms", t_match); + putText(frameMatches, label, Point(20, 40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,255)); + label = format("GMS matching: %.2f ms", tm.getTimeMilli()); + putText(frameMatches, label, Point(20, 60), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,255)); + putText(frameMatches, "Press r to reinitialize the reference image.", Point(frameMatches.cols-380, 20), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,255)); + putText(frameMatches, "Press esc to quit.", Point(frameMatches.cols-180, 40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,255)); + + imshow("Matches GMS", frameMatches); + int c = waitKey(30); + if (c == 27) + break; + else if (c == 'r') + { + frame.copyTo(frameRef); + orb->detectAndCompute(frameRef, noArray(), kpRef, descRef); + } + } + + } + + return EXIT_SUCCESS; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/pct_signatures.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/pct_signatures.cpp new file mode 100644 index 00000000..adbc34a8 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/pct_signatures.cpp @@ -0,0 +1,172 @@ +/* +By downloading, copying, installing or using the software you agree to this license. +If you do not agree to this license, do not download, install, +copy or use the software. + + + License Agreement + For Open Source Computer Vision Library + (3-clause BSD License) + +Copyright (C) 2000-2016, Intel Corporation, all rights reserved. +Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. +Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved. +Copyright (C) 2015-2016, Itseez Inc., all rights reserved. +Third party copyrights are property of their respective owners. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the names of the copyright holders nor the names of the contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +This software is provided by the copyright holders and contributors "as is" and +any express or implied warranties, including, but not limited to, the implied +warranties of merchantability and fitness for a particular purpose are disclaimed. +In no event shall copyright holders or contributors be liable for any direct, +indirect, incidental, special, exemplary, or consequential damages +(including, but not limited to, procurement of substitute goods or services; +loss of use, data, or profits; or business interruption) however caused +and on any theory of liability, whether in contract, strict liability, +or tort (including negligence or otherwise) arising in any way out of +the use of this software, even if advised of the possibility of such damage. +*/ + +/* +Contributed by Gregor Kovalcik + based on code provided by Martin Krulis, Jakub Lokoc and Tomas Skopal. + +References: + Martin Krulis, Jakub Lokoc, Tomas Skopal. + Efficient Extraction of Clustering-Based Feature Signatures Using GPU Architectures. + Multimedia tools and applications, 75(13), pp.: 80718103, Springer, ISSN: 1380-7501, 2016 + + Christian Beecks, Merih Seran Uysal, Thomas Seidl. + Signature quadratic form distance. + In Proceedings of the ACM International Conference on Image and Video Retrieval, pages 438-445. + ACM, 2010. +*/ + +#include +#include +#include + +#include +#include + +using namespace std; +using namespace cv; +using namespace xfeatures2d; + + +void printHelpMessage(void); +void printHelpMessage(void) +{ + cout << "Example of the PCTSignatures algorithm computing and visualizing\n" + "image signature for one image, or comparing multiple images with the first\n" + "image using the signature quadratic form distance.\n\n" + "Usage: pct_signatures ImageToProcessAndDisplay\n" + "or: pct_signatures ReferenceImage [ImagesToCompareWithTheReferenceImage]\n\n" + "The program has 2 modes:\n" + "- single argument: program computes and visualizes the image signature\n" + "- multiple arguments: program compares the first image to the others\n" + " using pct signatures and signature quadratic form distance (SQFD)"; +} + +/** @brief + +Example of the PCTSignatures algorithm. + +The program has 2 modes: +- single argument mode, where the program computes and visualizes the image signature +- multiple argument mode, where the program compares the first image to the others +using signatures and signature quadratic form distance (SQFD) + +*/ +int main(int argc, char** argv) +{ + if (argc < 2) // Check arguments + { + printHelpMessage(); + return 1; + } + + Mat source; + source = imread(argv[1]); // Read the file + + if (!source.data) // Check for invalid input + { + cerr << "Could not open or find the image: " << argv[1]; + return -1; + } + + Mat signature, result; // define variables + int initSampleCount = 2000; + int initSeedCount = 400; + int grayscaleBitsPerPixel = 4; + vector initPoints; + + namedWindow("Source", WINDOW_AUTOSIZE); // Create windows for display. + namedWindow("Result", WINDOW_AUTOSIZE); + + // create the algorithm + PCTSignatures::generateInitPoints(initPoints, initSampleCount, PCTSignatures::UNIFORM); + Ptr pctSignatures = PCTSignatures::create(initPoints, initSeedCount); + pctSignatures->setGrayscaleBits(grayscaleBitsPerPixel); + + // compute and visualize the first image + double start = (double)getTickCount(); + pctSignatures->computeSignature(source, signature); + double end = (double)getTickCount(); + cout << "Signature of the reference image computed in " << (end - start) / (getTickFrequency() * 1.0f) << " seconds." << endl; + PCTSignatures::drawSignature(source, signature, result); + + imshow("Source", source); // show the result + imshow("Result", result); + + if (argc == 2) // single image -> finish right after the visualization + { + waitKey(0); // Wait for user input + return 0; + } + // multiple images -> compare to the first one + else + { + vector images; + vector signatures; + vector distances; + + for (int i = 2; i < argc; i++) + { + Mat image = imread(argv[i]); + if (!source.data) // Check for invalid input + { + cerr << "Could not open or find the image: " << argv[i] << std::endl; + return 1; + } + images.push_back(image); + } + + pctSignatures->computeSignatures(images, signatures); + Ptr pctSQFD = PCTSignaturesSQFD::create(); + pctSQFD->computeQuadraticFormDistances(signature, signatures, distances); + + for (int i = 0; i < (int)(distances.size()); i++) + { + cout << "Image: " << argv[i + 2] << ", similarity: " << distances[i] << endl; + } + waitKey(0); // Wait for user input + } + + return 0; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/pct_webcam.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/pct_webcam.cpp new file mode 100644 index 00000000..a524eb1f --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/pct_webcam.cpp @@ -0,0 +1,170 @@ +/* +By downloading, copying, installing or using the software you agree to this license. +If you do not agree to this license, do not download, install, +copy or use the software. + + + License Agreement + For Open Source Computer Vision Library + (3-clause BSD License) + +Copyright (C) 2000-2016, Intel Corporation, all rights reserved. +Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. +Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved. +Copyright (C) 2015-2016, Itseez Inc., all rights reserved. +Third party copyrights are property of their respective owners. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the names of the copyright holders nor the names of the contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +This software is provided by the copyright holders and contributors "as is" and +any express or implied warranties, including, but not limited to, the implied +warranties of merchantability and fitness for a particular purpose are disclaimed. +In no event shall copyright holders or contributors be liable for any direct, +indirect, incidental, special, exemplary, or consequential damages +(including, but not limited to, procurement of substitute goods or services; +loss of use, data, or profits; or business interruption) however caused +and on any theory of liability, whether in contract, strict liability, +or tort (including negligence or otherwise) arising in any way out of +the use of this software, even if advised of the possibility of such damage. +*/ + +/* +Contributed by Gregor Kovalcik + based on code provided by Martin Krulis, Jakub Lokoc and Tomas Skopal. + +References: + Martin Krulis, Jakub Lokoc, Tomas Skopal. + Efficient Extraction of Clustering-Based Feature Signatures Using GPU Architectures. + Multimedia tools and applications, 75(13), pp.: 80718103, Springer, ISSN: 1380-7501, 2016 + + Christian Beecks, Merih Seran Uysal, Thomas Seidl. + Signature quadratic form distance. + In Proceedings of the ACM International Conference on Image and Video Retrieval, pages 438-445. + ACM, 2010. +*/ + +#include +#include +#include + +#include +#include + +using namespace std; +using namespace cv; +using namespace xfeatures2d; + + +void printHelpMessage(void); +void printHelpMessage(void) +{ + cout << "Example of the PCTSignatures algorithm.\n\n" + "This program computes and visualizes position-color-texture signatures\n" + "using images from webcam if available.\n\n" + "Usage:\n" + "pct_webcam [sample_count] [seed_count]\n" + "Note: sample_count must be greater or equal to seed_count."; +} + + +/** @brief + +Example of the PCTSignatures algorithm. + +This program computes and visualizes position-color-texture signatures +of images taken from webcam if available. +*/ +int main(int argc, char** argv) +{ + // define variables + Mat frame, signature, result; + int initSampleCount = 2000; + int initSeedCount = 400; + int grayscaleBitsPerPixel = 4; + + // parse for help argument + { + for (int i = 1; i < argc; i++) + { + if ((string)argv[i] == "-h" || (string)argv[i] == "--help") + { + printHelpMessage(); + return 0; + } + } + } + + // parse optional arguments + if (argc > 1) // sample count + { + initSampleCount = atoi(argv[1]); + if (initSampleCount <= 0) + { + cerr << "Sample count have to be a positive integer: " << argv[1] << endl; + return 1; + } + initSeedCount = (int)floor(static_cast(initSampleCount / 4)); + initSeedCount = std::max(1, initSeedCount); // fallback if sample count == 1 + } + if (argc > 2) // seed count + { + initSeedCount = atoi(argv[2]); + if (initSeedCount <= 0) + { + cerr << "Seed count have to be a positive integer: " << argv[2] << endl; + return 1; + } + if (initSeedCount > initSampleCount) + { + cerr << "Seed count have to be lower or equal to sample count!" << endl; + return 1; + } + } + + // create algorithm + Ptr pctSignatures = PCTSignatures::create(initSampleCount, initSeedCount, PCTSignatures::UNIFORM); + pctSignatures->setGrayscaleBits(grayscaleBitsPerPixel); + + // open video capture device + VideoCapture videoCapture; + if (!videoCapture.open(0)) + { + cerr << "Unable to open the first video capture device with ID = 0!" << endl; + return 1; + } + + // Create windows for display. + namedWindow("Source", WINDOW_AUTOSIZE); + namedWindow("Result", WINDOW_AUTOSIZE); + + // run drawing loop + for (;;) + { + videoCapture >> frame; + if (frame.empty()) break; // end of video stream + + pctSignatures->computeSignature(frame, signature); + PCTSignatures::drawSignature(Mat::zeros(frame.size(), frame.type()), signature, result); + + imshow("Source", frame); // Show our images inside the windows. + imshow("Result", result); + + if (waitKey(1) == 27) break; // stop videocapturing by pressing ESC + } + + return 0; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/shape_transformation.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/shape_transformation.cpp new file mode 100644 index 00000000..35526c77 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/shape_transformation.cpp @@ -0,0 +1,89 @@ +/* + * shape_context.cpp -- Shape context demo for shape matching + */ +#include +#include "opencv2/opencv_modules.hpp" + +#ifdef HAVE_OPENCV_SHAPE + +#include "opencv2/shape.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/xfeatures2d.hpp" +#include "opencv2/core/utility.hpp" +#include + +using namespace std; +using namespace cv; +using namespace cv::xfeatures2d; + +static void help() +{ + printf("\nThis program demonstrates how to use common interface for shape transformers\n" + "Call\n" + "shape_transformation [image1] [image2]\n"); +} + +int main(int argc, char** argv) +{ + help(); + if (argc < 3) + { + printf("Not enough parameters\n"); + return -1; + } + Mat img1 = imread(argv[1], IMREAD_GRAYSCALE); + Mat img2 = imread(argv[2], IMREAD_GRAYSCALE); + if(img1.empty() || img2.empty()) + { + printf("Can't read one of the images\n"); + return -1; + } + + // detecting keypoints & computing descriptors + Ptr surf = SURF::create(5000); + vector keypoints1, keypoints2; + Mat descriptors1, descriptors2; + surf->detectAndCompute(img1, Mat(), keypoints1, descriptors1); + surf->detectAndCompute(img2, Mat(), keypoints2, descriptors2); + + // matching descriptors + BFMatcher matcher(surf->defaultNorm()); + vector matches; + matcher.match(descriptors1, descriptors2, matches); + + // drawing the results + namedWindow("matches", 1); + Mat img_matches; + drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); + imshow("matches", img_matches); + + // extract points + vector pts1, pts2; + for (size_t ii=0; ii mytps = createThinPlateSplineShapeTransformer(25000); //TPS with a relaxed constraint + mytps->estimateTransformation(pts1, pts2, matches); + mytps->warpImage(img2, img2); + + imshow("Tranformed", img2); + waitKey(0); + + return 0; +} + +#else + +int main() +{ + std::cerr << "OpenCV was built without shape module" << std::endl; + return 0; +} + +#endif // HAVE_OPENCV_SHAPE diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/surf_matcher.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/surf_matcher.cpp new file mode 100644 index 00000000..5e3d5924 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/surf_matcher.cpp @@ -0,0 +1,225 @@ +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/core/utility.hpp" +#include "opencv2/core/ocl.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/calib3d.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/xfeatures2d.hpp" + +using namespace cv; +using namespace cv::xfeatures2d; + +const int LOOP_NUM = 10; +const int GOOD_PTS_MAX = 50; +const float GOOD_PORTION = 0.15f; + +int64 work_begin = 0; +int64 work_end = 0; + +static void workBegin() +{ + work_begin = getTickCount(); +} + +static void workEnd() +{ + work_end = getTickCount() - work_begin; +} + +static double getTime() +{ + return work_end /((double)getTickFrequency() )* 1000.; +} + +struct SURFDetector +{ + Ptr surf; + SURFDetector(double hessian = 800.0) + { + surf = SURF::create(hessian); + } + template + void operator()(const T& in, const T& mask, std::vector& pts, T& descriptors, bool useProvided = false) + { + surf->detectAndCompute(in, mask, pts, descriptors, useProvided); + } +}; + +template +struct SURFMatcher +{ + KPMatcher matcher; + template + void match(const T& in1, const T& in2, std::vector& matches) + { + matcher.match(in1, in2, matches); + } +}; + +static Mat drawGoodMatches( + const Mat& img1, + const Mat& img2, + const std::vector& keypoints1, + const std::vector& keypoints2, + std::vector& matches, + std::vector& scene_corners_ + ) +{ + //-- Sort matches and preserve top 10% matches + std::sort(matches.begin(), matches.end()); + std::vector< DMatch > good_matches; + double minDist = matches.front().distance; + double maxDist = matches.back().distance; + + const int ptsPairs = std::min(GOOD_PTS_MAX, (int)(matches.size() * GOOD_PORTION)); + for( int i = 0; i < ptsPairs; i++ ) + { + good_matches.push_back( matches[i] ); + } + std::cout << "\nMax distance: " << maxDist << std::endl; + std::cout << "Min distance: " << minDist << std::endl; + + std::cout << "Calculating homography using " << ptsPairs << " point pairs." << std::endl; + + // drawing the results + Mat img_matches; + + drawMatches( img1, keypoints1, img2, keypoints2, + good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), + std::vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); + + //-- Localize the object + std::vector obj; + std::vector scene; + + for( size_t i = 0; i < good_matches.size(); i++ ) + { + //-- Get the keypoints from the good matches + obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt ); + scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt ); + } + //-- Get the corners from the image_1 ( the object to be "detected" ) + std::vector obj_corners(4); + obj_corners[0] = Point(0,0); + obj_corners[1] = Point( img1.cols, 0 ); + obj_corners[2] = Point( img1.cols, img1.rows ); + obj_corners[3] = Point( 0, img1.rows ); + std::vector scene_corners(4); + + Mat H = findHomography( obj, scene, RANSAC ); + perspectiveTransform( obj_corners, scene_corners, H); + + scene_corners_ = scene_corners; + + //-- Draw lines between the corners (the mapped object in the scene - image_2 ) + line( img_matches, + scene_corners[0] + Point2f( (float)img1.cols, 0), scene_corners[1] + Point2f( (float)img1.cols, 0), + Scalar( 0, 255, 0), 2, LINE_AA ); + line( img_matches, + scene_corners[1] + Point2f( (float)img1.cols, 0), scene_corners[2] + Point2f( (float)img1.cols, 0), + Scalar( 0, 255, 0), 2, LINE_AA ); + line( img_matches, + scene_corners[2] + Point2f( (float)img1.cols, 0), scene_corners[3] + Point2f( (float)img1.cols, 0), + Scalar( 0, 255, 0), 2, LINE_AA ); + line( img_matches, + scene_corners[3] + Point2f( (float)img1.cols, 0), scene_corners[0] + Point2f( (float)img1.cols, 0), + Scalar( 0, 255, 0), 2, LINE_AA ); + return img_matches; +} + +//////////////////////////////////////////////////// +// This program demonstrates the usage of SURF_OCL. +// use cpu findHomography interface to calculate the transformation matrix +int main(int argc, char* argv[]) +{ + const char* keys = + "{ h help | | print help message }" + "{ l left | box.png | specify left image }" + "{ r right | box_in_scene.png | specify right image }" + "{ o output | SURF_output.jpg | specify output save path }" + "{ m cpu_mode | | run without OpenCL }"; + + CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) + { + std::cout << "Usage: surf_matcher [options]" << std::endl; + std::cout << "Available options:" << std::endl; + cmd.printMessage(); + return EXIT_SUCCESS; + } + if (cmd.has("cpu_mode")) + { + ocl::setUseOpenCL(false); + std::cout << "OpenCL was disabled" << std::endl; + } + + UMat img1, img2; + + std::string outpath = cmd.get("o"); + + std::string leftName = cmd.get("l"); + imread(leftName, IMREAD_GRAYSCALE).copyTo(img1); + if(img1.empty()) + { + std::cout << "Couldn't load " << leftName << std::endl; + cmd.printMessage(); + return EXIT_FAILURE; + } + + std::string rightName = cmd.get("r"); + imread(rightName, IMREAD_GRAYSCALE).copyTo(img2); + if(img2.empty()) + { + std::cout << "Couldn't load " << rightName << std::endl; + cmd.printMessage(); + return EXIT_FAILURE; + } + + double surf_time = 0.; + + //declare input/output + std::vector keypoints1, keypoints2; + std::vector matches; + + UMat _descriptors1, _descriptors2; + Mat descriptors1 = _descriptors1.getMat(ACCESS_RW), + descriptors2 = _descriptors2.getMat(ACCESS_RW); + + //instantiate detectors/matchers + SURFDetector surf; + + SURFMatcher matcher; + + //-- start of timing section + + for (int i = 0; i <= LOOP_NUM; i++) + { + if(i == 1) workBegin(); + surf(img1.getMat(ACCESS_READ), Mat(), keypoints1, descriptors1); + surf(img2.getMat(ACCESS_READ), Mat(), keypoints2, descriptors2); + matcher.match(descriptors1, descriptors2, matches); + } + workEnd(); + std::cout << "FOUND " << keypoints1.size() << " keypoints on first image" << std::endl; + std::cout << "FOUND " << keypoints2.size() << " keypoints on second image" << std::endl; + + surf_time = getTime(); + std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n"; + + + std::vector corner; + Mat img_matches = drawGoodMatches(img1.getMat(ACCESS_READ), img2.getMat(ACCESS_READ), keypoints1, keypoints2, matches, corner); + + //-- Show detected matches + + namedWindow("surf matches", 0); + imshow("surf matches", img_matches); + imwrite(outpath, img_matches); + + waitKey(0); + return EXIT_SUCCESS; +} diff --git a/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/video_homography.cpp b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/video_homography.cpp new file mode 100644 index 00000000..709936e6 --- /dev/null +++ b/C++/opencv-4.7.0/opencv_contrib/modules/xfeatures2d/samples/video_homography.cpp @@ -0,0 +1,248 @@ +/* +* video_homography.cpp +* +* Created on: Oct 18, 2010 +* Author: erublee +*/ + +#include +#include "opencv2/opencv_modules.hpp" + +#ifdef HAVE_OPENCV_CALIB3D + +#include "opencv2/calib3d.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/xfeatures2d.hpp" +#include +#include + +using namespace std; +using namespace cv; +using namespace cv::xfeatures2d; + +static void help(char **av) +{ + cout << "\nThis program demonstrated the use of features2d with the Fast corner detector and brief descriptors\n" + << "to track planar objects by computing their homography from the key (training) image to the query (test) image\n\n" << endl; + cout << "usage: " << av[0] << "