inital commit

This commit is contained in:
Carl Philipp Klemm 2025-07-23 18:07:23 +02:00
commit fe1f6c1a76
3 changed files with 854 additions and 0 deletions

14
CMakeLists.txt Normal file
View file

@ -0,0 +1,14 @@
cmake_minimum_required(VERSION 2.4)
project(scannassembler)
include_directories("/usr/include/opencv4")
add_executable(scannassembler main.cpp )
target_link_libraries( scannassembler -lopencv_core -lopencv_imgcodecs -lopencv_highgui -lopencv_aruco -lopencv_bgsegm -lopencv_calib3d -lopencv_ccalib -lopencv_datasets -lopencv_dnn -lopencv_dpm -lopencv_features2d -lopencv_flann -lopencv_freetype -lopencv_fuzzy -lopencv_hdf -lopencv_imgcodecs -lopencv_imgproc -lopencv_video -lopencv_videoio -lopencv_stitching -lopencv_xfeatures2d -lomp)
set_target_properties( scannassembler PROPERTIES COMPILE_FLAGS -m64 LINK_FLAGS -m64)
add_definitions(" -fopenmp -s -std=c++11 -Wall")
set(EXECUTABLE_OUTPUT_PATH ./build)
install(TARGETS scannassembler RUNTIME DESTINATION bin)

656
main.cpp Normal file
View file

@ -0,0 +1,656 @@
#include <iostream>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/bgsegm.hpp>
#include <opencv2/ccalib.hpp>
#include <opencv2/stitching/detail/autocalib.hpp>
#include <opencv2/stitching/detail/blenders.hpp>
#include <opencv2/stitching/detail/timelapsers.hpp>
#include <opencv2/stitching/detail/camera.hpp>
#include <opencv2/stitching/detail/exposure_compensate.hpp>
#include <opencv2/stitching/detail/matchers.hpp>
#include <opencv2/stitching/detail/motion_estimators.hpp>
#include <opencv2/stitching/detail/seam_finders.hpp>
#include <opencv2/stitching/detail/warpers.hpp>
#include <opencv2/stitching/warpers.hpp>
#include <opencv2/stitching/detail/seam_finders.hpp>
#include <opencv2/core/ocl.hpp>
#include <math.h>
#include <unistd.h>
#include <vector>
#include <string>
#include <sstream>
#include <sys/stat.h>
#include <dirent.h>
#include <algorithm>
#include "strnatcmp.h"
bool verbose = false;
class Config
{
public:
std::vector<std::string> inputImages;
std::string outputImage = "out.png";
int gridSizeX = 11;
int gridSizeY = 10;
int sampleSize = 2;
float confidence = 1.5f;
int startSearch = 0;
bool warp = false;
float deltaX = -1;
float deltaY = -1;
bool illum = true;
bool seamAdjust = true;
};
void cd_to_exe_dir( char *argv[] )
{
std::string path = argv[0];
int ii = path.length();
while ( !( path[ii] == '/' || path[ii] == '\\' ) && ii > 0 )
{
ii--;
}
path.erase( ii, 100 );
chdir( path.c_str() );
}
static void printUsage()
{
std::cout<<"Usage: scannassembler [OPTION]... [FILES/DIRECTORY]...\n"
<<"This program assembles a regular grid of overlapping images into one large image. [FILES/DIRECTORY] is either a list of at least 4 files or a directory containing only images, with naturaly sorted file names.\n\n"
<<"Options:\n"
<<" -h, --help Display this help.\n"
<<" -o, --output Output image file name.\n"
<<" -v, --verbose Make application verbose.\n"
<<" -x, --xSize X size of grid.\n"
<<" -y, --ySize Y size of grid.\n"
<<" -c, --confidence Confidence of image feature matiching, default 1.2.\n"
<<" --start Image number where to start matching search\n"
<<" --warp Attempt to warp the image to correct for camera intrinsics. Defaults to off\n"
<<" --no-warp Disable warp.\n"
<<" -d, --delta [x] [y] Presett distance between images in pixels instead of automatic recognition\n"
<<" --no-seam blend only mode\n"
<<" --illum correct for differences in image brigeness. Defaults to on\n"
<<" --no-illum Disable image brigeness correction\n";
}
static int parseCmdArgs(int argc, char** argv, Config *config)
{
if (argc == 1)
{
printUsage();
return -1;
}
for (int i = 1; i < argc; i++)
{
std::string currentToken(argv[i]);
if (currentToken == "--help" || currentToken == "-h")
{
printUsage();
return -1;
}
else if (currentToken == "--output" || currentToken == "-o")
{
i++;
if(argc > i) config->outputImage = argv[i];
else return -1;
}
else if (currentToken == "--verbose" || currentToken == "-v")
{
verbose = true;
std::cout<<"Verbose mode on\n";
}
else if (currentToken == "--xSize" || currentToken == "-x")
{
i++;
if(argc > i) config->gridSizeX = atoi(argv[i]);
else return -1;
}
else if (currentToken == "--ySize" || currentToken == "-y")
{
i++;
if(argc > i) config->gridSizeY = atoi(argv[i]);
else return -1;
}
else if (currentToken == "--confidence" || currentToken == "-c")
{
i++;
if(argc > i) config->confidence = atof(argv[i]);
else return -1;
}
else if (currentToken == "--start")
{
i++;
if(argc > i) config->startSearch = atoi(argv[i]);
else return -1;
}
else if (currentToken == "--delta" || currentToken == "-d")
{
if(argc > i+2)
{
config->deltaX = atof(argv[i+1]);
config->deltaY = atof(argv[i+2]);
i+=2;
}
else return -1;
}
else if (currentToken == "--warp")
{
config->warp = true;
}
else if (currentToken == "--no-warp")
{
config->warp = false;
}
else if (currentToken == "--illum")
{
config->illum = true;
}
else if (currentToken == "--no-illum")
{
config->illum = false;
}
else if (currentToken == "--no-seam")
{
config->seamAdjust = false;
}
else config->inputImages.push_back(argv[i]);
}
return 0;
}
void showImages(std::vector<cv::Mat> images)
{
cv::namedWindow( "Viewer", cv::WINDOW_AUTOSIZE );
for(uint i = 0; i< images.size(); i++)
{
cv::imshow( "Viewer", images[i] );
cv::waitKey(0);
}
cv::destroyWindow("Viewer");
}
void showImages(std::vector<cv::UMat> images)
{
cv::namedWindow( "Viewer", cv::WINDOW_AUTOSIZE );
for(uint i = 0; i< images.size(); i++)
{
cv::imshow( "Viewer", images[i] );
cv::waitKey(0);
}
cv::destroyWindow("Viewer");
}
void showImages(cv::Mat image)
{
cv::namedWindow( "Viewer", cv::WINDOW_AUTOSIZE );
cv::imshow( "Viewer", image );
cv::waitKey(0);
cv::destroyWindow("Viewer");
}
std::vector<cv::Mat> getSample(Config config, std::vector<cv::Mat> images, const int centralIndex, const int gridSizeX = 2, const int gridSizeY = 2)
{
std::vector<cv::Mat> sampleImages;
if(images.size() < (uint)centralIndex+gridSizeX+gridSizeY*(config.gridSizeX)) return sampleImages;
for(int y = 0; y < gridSizeY; y++)
{
for(int x = 0; x < gridSizeX; x++)
{
sampleImages.push_back(images[centralIndex+x+y*(config.gridSizeX)]);
}
}
return sampleImages;
}
bool calculateCameraIntrinsics(std::vector<cv::Mat> sampleImages, std::vector<cv::detail::CameraParams>* cameras, float confidence = 1.2f)
{
std::vector<cv::detail::ImageFeatures> features(sampleImages.size());
cv::Ptr<cv::Feature2D> featurefinder = cv::KAZE::create();
for(uint i = 0; i < sampleImages.size(); i++) featurefinder->detectAndCompute(sampleImages[i], cv::Mat(), features[i].keypoints, features[i].descriptors);
for(uint i = 0; i<sampleImages.size(); i++)std::cout<<"Image "<<i<<" features: " << features[i].keypoints.size()<<'\n';
cv::detail::BestOf2NearestMatcher matcher;
std::vector<cv::detail::MatchesInfo> pairwiseMatches;
matcher(features, pairwiseMatches);
std::vector<int> ids = cv::detail::leaveBiggestComponent(features, pairwiseMatches, confidence);
if(ids.size() != sampleImages.size())
{
std::cout<<"Match failure only "<<ids.size()<<" matches\n";
return false;
}
std::cout<<"Matched!\nCaluclating Camera Matrixes\n";
cv::detail::HomographyBasedEstimator estimator;
if(!(estimator(features, pairwiseMatches, *cameras)))
{
std::cout << "Homography estimation failed.\n";
return false;
}
for (uint i = 0; i < cameras->size(); i++)
{
cv::Mat R;
cameras->at(i).R.convertTo(R, CV_32F);
cameras->at(i).R = R;
}
cv::detail::BundleAdjusterRay adjuster;
adjuster.setRefinementMask(cv::Mat::ones(3, 3, CV_8U));
if(!adjuster(features, pairwiseMatches, *cameras))
{
std::cout << "Camera parameters adjusting failed.\n";
return false;
}
return true;
}
std::vector<std::string> processFileNames(std::vector<std::string> fileNames, bool sort = true)
{
if(fileNames.size() < 4)
{
if(verbose)std::cout<<"assuming directory\n";
DIR *directory;
directory = opendir(fileNames[0].c_str());
if(directory == NULL)
{
std::cout<<"Cant open directory "<<fileNames[0]<<'\n';
return fileNames;
}
std::string dirName = fileNames[0];
fileNames.clear();
struct dirent *dirEntry;
while ((dirEntry = readdir(directory)) != NULL)
{
if(dirEntry->d_type == DT_REG) fileNames.push_back(dirName + std::string(dirEntry->d_name));
}
}
if(sort) std::sort(fileNames.begin(),fileNames.end(), compareNat);
if(verbose)for(unsigned i = 0; i < fileNames.size(); i++) std::cout<<fileNames[i]<<'\n';
return fileNames;
}
std::vector<cv::Mat> loadImages(std::vector<std::string> fileNames)
{
std::vector<cv::Mat> images;
for(uint i = 0; i < fileNames.size(); i++)
{
cv::Mat tmpImage = cv::imread(fileNames[i]);
if(tmpImage.data)images.push_back(tmpImage);
else
{
std::cout<<"can not read image "<<i<<" from "<<fileNames[i]<<'\n';
}
}
return images;
}
cv::Mat illumCorrection(cv::Mat bgr_image, int clipLmt = 2)
{
// READ RGB color image and convert it to Lab
cv::Mat lab_image;
cv::cvtColor(bgr_image, lab_image, cv::COLOR_BGR2Lab);
// Extract the L channel
std::vector<cv::Mat> lab_planes(3);
cv::split(lab_image, lab_planes); // now we have the L image in lab_planes[0]
// apply the CLAHE algorithm to the L channel
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
clahe->setClipLimit(clipLmt);
cv::Mat dst;
clahe->apply(lab_planes[0], dst);
// Merge the the color planes back into an Lab image
dst.copyTo(lab_planes[0]);
cv::merge(lab_planes, lab_image);
// convert back to RGB
cv::Mat result;
cv::cvtColor(lab_image, result, cv::COLOR_Lab2BGR);
return result;
}
cv::Mat accumulate( std::vector< cv::Mat > images, int type = -1 )
{
cv::Mat accBuffer = cv::Mat::zeros(images.at(0).size(), CV_64FC3);
for(uint i = 0; i< images.size(); i++)
{
cv::Mat tmp;
images[i].convertTo(tmp, CV_64FC3);
cv::add(tmp, accBuffer, accBuffer);
}
accBuffer = accBuffer / images.size();
(type == -1) ? accBuffer.convertTo(accBuffer, images.at(0).type()) : accBuffer.convertTo(accBuffer, type);
return accBuffer;
}
void removeMean(std::vector< cv::Mat >* images)
{
cv::Mat acuumulated = accumulate(*images, CV_64FC3);
acuumulated = acuumulated - cv::mean(acuumulated);
for(uint i = 0; i< images->size(); i++)
{
cv::Mat tmp;
images->at(i).convertTo(tmp, CV_64FC3);
tmp = tmp - acuumulated;
tmp.convertTo(images->at(i), images->at(i).type());
}
}
cv::Point2f projectTopLeftPoint(cv::Size src_size, cv::InputArray K, cv::InputArray R, float flocal)
{
float tl_uf = (std::numeric_limits<float>::max)();
float tl_vf = (std::numeric_limits<float>::max)();
cv::detail::SphericalProjector projector;
projector.scale = flocal;
projector.setCameraParams(K,R);
float u, v;
for (int y = 0; y < src_size.height; ++y)
{
for (int x = 0; x < src_size.width; ++x)
{
projector.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
tl_uf = (std::min)(tl_uf, u);
tl_vf = (std::min)(tl_vf, v);
}
}
cv::Point2f result;
result.x = tl_uf;
result.y = tl_vf;
return result;
}
cv::Point2f wheigtedMean( std::vector< std::vector<cv::Point2f> > input )
{
//findDeltas
std::vector<cv::Point2f> delta;
for(uint i = 0; i < input.size(); i++)
{
cv::Point2f tmp;
tmp.x = ((input[i][1].x-input[i][0].x)+(input[i][3].x-input[i][2].x))/2;
tmp.y = ((input[i][2].y-input[i][0].y)+(input[i][3].y-input[i][1].y))/2;
delta.push_back(tmp);
}
//compute Mean
cv::Point2f avDelta(0,0);
for(uint i = 0; i < delta.size(); i++)
{
avDelta.x = avDelta.x + delta[i].x;
avDelta.y = avDelta.y + delta[i].y;
}
avDelta.x = avDelta.x/delta.size();
avDelta.y = avDelta.y/delta.size();
std::cout<<"Mean Delta x: "<<avDelta.x<<" y: "<<avDelta.y<<std::endl;
//refine mean
for(uint i = 0; i < delta.size(); i++)
{
if( abs(delta[i].x-avDelta.x)/avDelta.x > 0.2 || abs(delta[i].y-avDelta.y)/avDelta.y > 0.2 )
{
std::cout<<"removing x: "<<delta[i].x<<" y: "<<delta[i].y<<std::endl;
delta.erase(delta.begin()+i);
i--;
}
}
//recompute
avDelta.x=0;
avDelta.y=0;
for(uint i = 0; i < delta.size(); i++)
{
avDelta.x = avDelta.x + delta[i].x;
avDelta.y = avDelta.y + delta[i].y;
}
avDelta.x = avDelta.x/delta.size();
avDelta.y = avDelta.y/delta.size();
std::cout<<"refined Mean Delta x: "<<avDelta.x<<" y: "<<avDelta.y<<std::endl;
return avDelta;
}
int main(int argc, char* argv[])
{
cv::ocl::setUseOpenCL(false);
std::cout<<"UVOS scann sticher v2.1\n";
cd_to_exe_dir( argv );
Config config;
int parseReturn = parseCmdArgs(argc, argv, &config);
if(parseReturn != 0) return parseReturn;
std::vector<std::string> fileNames = processFileNames(config.inputImages);
if(fileNames.size() != (uint)config.gridSizeX*config.gridSizeY)
{
std::cout<<"This grid needs "<<config.gridSizeX*config.gridSizeY<<" images. "<<config.inputImages.size()<<" given.\n";
return -1;
}
std::vector<cv::Mat> images = loadImages(fileNames);
if(images.size() != fileNames.size())
{
images.size() == 0 ? std::cout<<"Could not read any images.\n" : std::cout<<"Could not read all images.\n";
return -1;
}
std::vector<cv::Size> sizes(images.size());
for(uint i = 0; i < images.size(); i++) sizes[i] = images[i].size();
cv::Point2f avDelta;
std::vector<cv::Mat> masks(images.size());
std::vector<cv::Mat> imagesWarped(images.size());
if(config.deltaX == -1 || config.deltaY == -1)
{
double avFocal = 1;
//Caluclate camera Intrinsics
std::vector< std::vector<cv::detail::CameraParams> > cameras;
{
std::vector<cv::Mat> samples;
int bottomLeftSampleImageIndex = config.startSearch;
int subsequentMatches = 0;
do
{
std::cout<<"trying set "<<bottomLeftSampleImageIndex<<'\n';
samples = getSample(config, images, bottomLeftSampleImageIndex,2,2);
bottomLeftSampleImageIndex+=(subsequentMatches*subsequentMatches+1)*2;
std::vector<cv::detail::CameraParams> tmpCameras;
if(samples.size() != 0 && calculateCameraIntrinsics(samples, &tmpCameras, config.confidence))
{
cameras.push_back(tmpCameras);
subsequentMatches++;
}
else subsequentMatches = 0;
}while( samples.size() != 0 );
}
if(cameras.size() == 0 )
{
std::cout<<"Can not find match.";
return -1;
}
//Caluclate mean focal length
for(uint i = 0; i < cameras.size(); i++)
{
for(uint u = 0; u < cameras[i].size(); u++)
{
avFocal += cameras[i][u].focal;
}
}
avFocal = avFocal/(cameras.size()*cameras[0].size());
//Caluclate Master Matrixes
cv::Mat_<double> avCameraMatrix = cv::Mat::zeros(3,3,CV_64F);
cv::Mat_<double> avRotationMatrix = cv::Mat::zeros(3,3,CV_64F);
for(uint i = 0; i< cameras.size(); i++)
{
for(uint u = 0; u < cameras[i].size(); u++)
{
cv::add(avCameraMatrix, cameras[i][u].K(), avCameraMatrix);
cv::add(avRotationMatrix, cameras[i][u].R, avRotationMatrix);
}
}
avCameraMatrix = avCameraMatrix/(cameras.size()*cameras[0].size());
avRotationMatrix = avRotationMatrix/(cameras.size()*cameras[0].size());
std::cout<<"Master Camera Intrinsics :\nK:\n" << avCameraMatrix << "\nR:\n" <<avRotationMatrix<<'\n'<<"Focal: "<<avFocal<<'\n';
// Caluclate smaple corners
std::vector< std::vector<cv::Point2f> > sampleCorners(cameras.size());
for(uint i = 0; i < cameras.size(); i++)
{
sampleCorners[i] = std::vector<cv::Point2f>(cameras[i].size());
for(uint u = 0; u < cameras[i].size(); u++)
{
cv::Mat_<float> K;
cameras[i][u].K().convertTo(K, CV_32F);
cv::Mat_<float> R;
cameras[i][u].R.convertTo(R, CV_32F);
cv::Mat uxmap, uymap;
sampleCorners[i][u] = projectTopLeftPoint(images[0].size(), K, R, avFocal);
std::cout<<"corner of set "<<i<<" sample "<<u<<" : x: "<<sampleCorners[i][u].x<<" y: "<<sampleCorners[i][u].y<<'\n';
}
}
avDelta = wheigtedMean(sampleCorners);
if(isnan(avDelta.x) || isnan(avDelta.y))
{
std::cout<<"Deltas not convergant.\n";
return 1;
}
if(config.warp)
{
std::cout<<"Warping Images\n";
cv::Ptr<cv::WarperCreator> warperCreator = cv::makePtr<cv::SphericalWarper>();
cv::Ptr<cv::detail::RotationWarper> warper = warperCreator->create(avFocal);
cv::Mat_<float> avCameraMatrix32f;
cv::Mat_<float> avRotationMatrix32f;
avCameraMatrix.convertTo(avCameraMatrix32f, CV_32F);
avRotationMatrix.convertTo(avRotationMatrix32f, CV_32F);
for (uint i = 0; i < images.size(); i++)
{
if(config.warp) warper->warp(images[i], avCameraMatrix32f, avRotationMatrix32f, cv::INTER_LINEAR, cv::BORDER_REFLECT, imagesWarped[i]);
images[i].release();
}
//warp masks
for (uint i = 0; i < imagesWarped.size(); i++)
{
masks[i].create(sizes[i], CV_8U);
masks[i].setTo(cv::Scalar::all(255));
warper->warp(masks[i], avCameraMatrix32f, avRotationMatrix32f, cv::INTER_LINEAR, cv::BORDER_REFLECT, masks[i]);
}
}
else
{
for (uint i = 0; i < images.size(); i++)
{
masks[i].create(sizes[i], CV_8U);
masks[i].setTo(cv::Scalar::all(255));
imagesWarped[i] = images[i];
images[i].release();
}
}
}
else
{
avDelta.x = config.deltaX;
avDelta.y = config.deltaY;
for (uint i = 0; i < images.size(); i++)
{
imagesWarped[i] = images[i];
masks[i].create(images[i].size(), CV_8U);
masks[i].setTo(cv::Scalar::all(255));
images[i].release();
}
}
//calculate Corners of all images
std::vector<cv::Point> corners(imagesWarped.size());
for(uint i = 0; i < imagesWarped.size(); i++)
{
cv::Point2i postionOfCurrentImage;
postionOfCurrentImage.x = i % (config.gridSizeX);
postionOfCurrentImage.y = (i - i % (config.gridSizeX))/config.gridSizeX;
if(verbose)std::cout<<"postition of image "<<i<<" file name: "<<config.inputImages[i]<<" x: "<<postionOfCurrentImage.x<<" y: "<<postionOfCurrentImage.y<<'\n';
corners[i].x = postionOfCurrentImage.x*avDelta.x;
corners[i].y = -avDelta.y*config.gridSizeY + postionOfCurrentImage.y*avDelta.y;
}
if(config.seamAdjust)
{
std::cout<<"Adjusting seams\n";
std::vector<cv::UMat> imagesWarped32f(imagesWarped.size());
std::vector<cv::UMat> masksUmat(imagesWarped.size());
for (uint i = 0; i < imagesWarped.size(); i++)
{
imagesWarped[i].convertTo(imagesWarped32f[i], CV_32F);
masks[i].copyTo(masksUmat[i]);
}
cv::detail::VoronoiSeamFinder seamFinder;
seamFinder.find(imagesWarped32f, corners, masksUmat);
for (uint i = 0; i < imagesWarped.size(); i++)
{
masksUmat[i].copyTo(masks[i]);
masksUmat[i].release();
imagesWarped32f[i].release();
}
imagesWarped32f.clear();
masksUmat.clear();
}
if(config.illum)
{
std::cout<<"Adjusting Illumination\n";
for(uint i = 0; i < imagesWarped.size(); i++) imagesWarped[i] = illumCorrection(imagesWarped[i], 3);
}
std::cout<<"Removeing Mean\n";
removeMean(&imagesWarped);
//showImages(accumulate(images));
std::cout<<"Composing\n";
//compose
cv::Ptr<cv::detail::Blender> blender;
blender = cv::detail::Blender::createDefault(cv::detail::Blender::Blender::MULTI_BAND, false);
cv::detail::MultiBandBlender* mb = dynamic_cast<cv::detail::MultiBandBlender*>(blender.get());
mb->setNumBands(5);
blender->prepare(corners, sizes);
for (uint i = 0; i < imagesWarped.size(); i++)
{
blender->feed(imagesWarped[i], masks[i], corners[i]);
}
cv::Mat result;
cv::Mat result_mask;
blender->blend(result, result_mask);
result.convertTo(result, CV_8UC3);
cv::imwrite(config.outputImage, result);
return 0;
}

184
strnatcmp.h Normal file
View file

@ -0,0 +1,184 @@
/* -*- mode: c; c-file-style: "k&r" -*-
strnatcmp.c -- Perform 'natural order' comparisons of strings in C.
Copyright (C) 2000, 2004 by Martin Pool <mbp sourcefrog net>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
/***********************************************************************
* Now let's make some changes:
*
* 1. std::string support
* 2. usable with std::sort and std::vectors
*
* ********************************************************************/
#ifndef __STRNATCMP_HPP
#define __STRNATCMP_HPP
#include <cstddef> /* size_t */
#include <cctype>
#include <string>
typedef char nat_char;
int strnatcmp(nat_char const *a, nat_char const *b);
int strnatcasecmp(nat_char const *a, nat_char const *b);
static inline int nat_isdigit(nat_char a){
return isdigit((unsigned char) a);
}
static inline int nat_isspace(nat_char a){
return isspace((unsigned char) a);
}
static inline nat_char nat_toupper(nat_char a){
return toupper((unsigned char) a);
}
static int compare_right(nat_char const *a, nat_char const *b){
int bias = 0;
/* The longest run of digits wins. That aside, the greatest
value wins, but we can't know that it will until we've scanned
both numbers to know that they have the same magnitude, so we
remember it in BIAS. */
for (;; a++, b++) {
if (!nat_isdigit(*a) && !nat_isdigit(*b))
return bias;
if (!nat_isdigit(*a))
return -1;
if (!nat_isdigit(*b))
return +1;
if (*a < *b) {
if (!bias)
bias = -1;
} else if (*a > *b) {
if (!bias)
bias = +1;
} else if (!*a && !*b)
return bias;
}
return 0;
}
static int compare_left(nat_char const *a, nat_char const *b){
/* Compare two left-aligned numbers: the first to have a
different value wins. */
for (;; a++, b++) {
if (!nat_isdigit(*a) && !nat_isdigit(*b))
return 0;
if (!nat_isdigit(*a))
return -1;
if (!nat_isdigit(*b))
return +1;
if (*a < *b)
return -1;
if (*a > *b)
return +1;
}
return 0;
}
static int strnatcmp0(nat_char const *a, nat_char const *b, int fold_case){
int ai, bi;
nat_char ca, cb;
int fractional, result;
ai = bi = 0;
while (1) {
ca = a[ai]; cb = b[bi];
/* skip over leading spaces or zeros */
while (nat_isspace(ca))
ca = a[++ai];
while (nat_isspace(cb))
cb = b[++bi];
/* process run of digits */
if (nat_isdigit(ca) && nat_isdigit(cb)) {
fractional = (ca == '0' || cb == '0');
if (fractional) {
if ((result = compare_left(a+ai, b+bi)) != 0)
return result;
} else {
if ((result = compare_right(a+ai, b+bi)) != 0)
return result;
}
}
if (!ca && !cb) {
/* The strings compare the same. Perhaps the caller
will want to call strcmp to break the tie. */
return 0;
}
if (fold_case) {
ca = nat_toupper(ca);
cb = nat_toupper(cb);
}
if (ca < cb)
return -1;
if (ca > cb)
return +1;
++ai; ++bi;
}
}
/*The following doesn't consider case of string*/
int strnatcmp(nat_char const *a, nat_char const *b) {
return strnatcmp0(a, b, 0);
}
/* Compare, recognizing numeric string and ignoring case.
* This one is the general case where lower-case and upper-
* case are considered as same.
* */
int strnatcasecmp(nat_char const *a, nat_char const *b) {
return strnatcmp0(a, b, 1);
}
//You can use this function with std::sort and vector
bool compareNat(const std::string& a,const std::string& b){
if(strnatcasecmp(a.c_str(),b.c_str())==-1) return true;
else return false;
}
//You can use this function with qsort
int compareNatq(const void *a, const void *b){
char const *pa = *(char const **)a, *pb = *(char const **)b;
return strnatcasecmp(pa, pb);
}
#endif