code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
/******************************************************************************
** Filename: float2int.h
** Purpose: Routines for converting float features to int features
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef FLOAT2INT_H
#define FLOAT2INT_H
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include "intmatcher.h"
#include "ocrfeatures.h"
#define INT_FEAT_RANGE 256
#define BASELINE_Y_SHIFT (0.25)
#endif
|
2301_81045437/tesseract
|
src/classify/float2int.h
|
C
|
apache-2.0
| 1,258
|
/******************************************************************************
** Filename: fpoint.cpp
** Purpose: Abstract data type for a 2D point (floating point coords)
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#define _USE_MATH_DEFINES // for M_PI
#include "fpoint.h"
#include <cmath> // for M_PI
#include <cstdio>
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
float DistanceBetween(FPOINT A, FPOINT B) {
const double xd = XDelta(A, B);
const double yd = YDelta(A, B);
return sqrt(static_cast<double>(xd * xd + yd * yd));
}
/**
* Return the angle from Point1 to Point2 normalized to
* lie in the range 0 to FullScale (where FullScale corresponds
* to 2*pi or 360 degrees).
* @param Point1 points to compute angle between
* @param Point2 points to compute angle between
* @param FullScale value to associate with 2*pi
* @return angle
*/
float NormalizedAngleFrom(FPOINT *Point1, FPOINT *Point2, float FullScale) {
float NumRadsInCircle = 2.0 * M_PI;
float Angle = AngleFrom(*Point1, *Point2);
if (Angle < 0.0) {
Angle += NumRadsInCircle;
}
Angle *= FullScale / NumRadsInCircle;
if (Angle < 0.0 || Angle >= FullScale) {
Angle = 0.0;
}
return (Angle);
}
|
2301_81045437/tesseract
|
src/classify/fpoint.cpp
|
C++
|
apache-2.0
| 2,220
|
/******************************************************************************
** Filename: fpoint.h
** Purpose: Abstract data type for 2D points (floating point coords)
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef FPOINT_H
#define FPOINT_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include <cmath>
#include <cstdio>
/* define data structure to hold 2D points or vectors using floating point */
struct FPOINT {
float x, y;
};
using FVECTOR = FPOINT;
/**----------------------------------------------------------------------------
Macros
----------------------------------------------------------------------------**/
/* macros for computing miscellaneous functions of 2 points */
#define XDelta(A, B) ((B).x - (A).x)
#define YDelta(A, B) ((B).y - (A).y)
#define SlopeFrom(A, B) (YDelta(A, B) / XDelta(A, B))
#define AngleFrom(A, B) (atan2((double)YDelta(A, B), (double)XDelta(A, B)))
#define XIntersectionOf(A, B, X) (SlopeFrom(A, B) * ((X)-A.x) + A.y)
/*-------------------------------------------------------------------------
Public Function Prototypes
---------------------------------------------------------------------------*/
float DistanceBetween(FPOINT A, FPOINT B);
float NormalizedAngleFrom(FPOINT *Point1, FPOINT *Point2, float FullScale);
#endif
|
2301_81045437/tesseract
|
src/classify/fpoint.h
|
C
|
apache-2.0
| 2,190
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturespace.cpp
// Description: Indexed feature space based on INT_FEATURE_STRUCT.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#define _USE_MATH_DEFINES // for M_PI
#include "intfeaturespace.h"
#include <cmath> // for M_PI
#include "intfx.h"
namespace tesseract {
IntFeatureSpace::IntFeatureSpace() : x_buckets_(0), y_buckets_(0), theta_buckets_(0) {}
void IntFeatureSpace::Init(uint8_t xbuckets, uint8_t ybuckets, uint8_t thetabuckets) {
x_buckets_ = xbuckets;
y_buckets_ = ybuckets;
theta_buckets_ = thetabuckets;
}
// Serializes the feature space definition to the given file.
// Returns false on error.
bool IntFeatureSpace::Serialize(FILE *fp) const {
if (fwrite(&x_buckets_, sizeof(x_buckets_), 1, fp) != 1) {
return false;
}
if (fwrite(&y_buckets_, sizeof(y_buckets_), 1, fp) != 1) {
return false;
}
if (fwrite(&theta_buckets_, sizeof(theta_buckets_), 1, fp) != 1) {
return false;
}
return true;
}
// Returns an INT_FEATURE_STRUCT corresponding to the given index.
// This is the inverse of the Index member.
INT_FEATURE_STRUCT IntFeatureSpace::PositionFromIndex(int index) const {
return PositionFromBuckets(index / (y_buckets_ * theta_buckets_),
index / theta_buckets_ % y_buckets_, index % theta_buckets_);
}
// Bulk calls to Index. Maps the given array of features to a vector of
// int32_t indices in the same order as the input.
void IntFeatureSpace::IndexFeatures(const INT_FEATURE_STRUCT *features, int num_features,
std::vector<int> *mapped_features) const {
mapped_features->clear();
for (int f = 0; f < num_features; ++f) {
mapped_features->push_back(Index(features[f]));
}
}
// Bulk calls to Index. Maps the given array of features to a vector of
// sorted int32_t indices.
void IntFeatureSpace::IndexAndSortFeatures(const INT_FEATURE_STRUCT *features, int num_features,
std::vector<int> *sorted_features) const {
sorted_features->clear();
for (int f = 0; f < num_features; ++f) {
sorted_features->push_back(Index(features[f]));
}
std::sort(sorted_features->begin(), sorted_features->end());
}
// Returns a feature space index for the given x,y position in a display
// window, or -1 if the feature is a miss.
int IntFeatureSpace::XYToFeatureIndex(int x, int y) const {
// Round the x,y position to a feature. Search for a valid theta.
INT_FEATURE_STRUCT feature(x, y, 0);
int index = -1;
for (int theta = 0; theta <= UINT8_MAX && index < 0; ++theta) {
feature.Theta = theta;
index = Index(feature);
}
if (index < 0) {
tprintf("(%d,%d) does not exist in feature space!\n", x, y);
return -1;
}
feature = PositionFromIndex(index);
tprintf("Click at (%d, %d) ->(%d, %d), ->(%d, %d)\n", x, y, feature.X, feature.Y, x - feature.X,
y - feature.Y);
// Get the relative position of x,y from the rounded feature.
x -= feature.X;
y -= feature.Y;
if (x != 0 || y != 0) {
double angle = atan2(static_cast<double>(y), static_cast<double>(x)) + M_PI;
angle *= kIntFeatureExtent / (2.0 * M_PI);
feature.Theta = static_cast<uint8_t>(angle + 0.5);
index = Index(feature);
if (index < 0) {
tprintf("Feature failed to map to a valid index:");
feature.print();
return -1;
}
feature = PositionFromIndex(index);
}
feature.print();
return index;
}
// Returns an INT_FEATURE_STRUCT corresponding to the given bucket coords.
INT_FEATURE_STRUCT IntFeatureSpace::PositionFromBuckets(int x, int y, int theta) const {
INT_FEATURE_STRUCT pos((x * kIntFeatureExtent + kIntFeatureExtent / 2) / x_buckets_,
(y * kIntFeatureExtent + kIntFeatureExtent / 2) / y_buckets_,
DivRounded(theta * kIntFeatureExtent, theta_buckets_));
return pos;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/classify/intfeaturespace.cpp
|
C++
|
apache-2.0
| 4,645
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: intfeaturespace.h
// Description: Indexed feature space based on INT_FEATURE_STRUCT.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_INTFEATURESPACE_H_
#define TESSERACT_CLASSIFY_INTFEATURESPACE_H_
#include "intproto.h"
// Extent of x,y,theta in the input feature space. [0,255].
const int kIntFeatureExtent = 256;
// Extent of x,y,theta dimensions in the quantized feature space.
const int kBoostXYBuckets = 16;
const int kBoostDirBuckets = 16;
namespace tesseract {
class IndexMap;
// Down-sampling quantization of the INT_FEATURE_STRUCT feature space and
// conversion to a single scalar index value, used as a binary feature space.
class TESS_API IntFeatureSpace {
public:
IntFeatureSpace();
// Default copy constructors and assignment OK!
// Setup the feature space with the given dimensions.
void Init(uint8_t xbuckets, uint8_t ybuckets, uint8_t thetabuckets);
// Serializes the feature space definition to the given file.
// Returns false on error.
bool Serialize(FILE *fp) const;
// Returns the total size of the feature space.
int Size() const {
return static_cast<int>(x_buckets_) * y_buckets_ * theta_buckets_;
}
// Returns an INT_FEATURE_STRUCT corresponding to the given index.
// This is the inverse of the Index member.
INT_FEATURE_STRUCT PositionFromIndex(int index) const;
// Returns a 1-dimensional index corresponding to the given feature value.
// Range is [0, Size()-1]. Inverse of PositionFromIndex member.
int Index(const INT_FEATURE_STRUCT &f) const {
return (XBucket(f.X) * y_buckets_ + YBucket(f.Y)) * theta_buckets_ + ThetaBucket(f.Theta);
}
// Bulk calls to Index. Maps the given array of features to a vector of
// int32_t indices in the same order as the input.
void IndexFeatures(const INT_FEATURE_STRUCT *features, int num_features,
std::vector<int> *mapped_features) const;
// Bulk calls to Index. Maps the given array of features to a vector of
// sorted int32_t indices.
void IndexAndSortFeatures(const INT_FEATURE_STRUCT *features, int num_features,
std::vector<int> *sorted_features) const;
// Returns a feature space index for the given x,y position in a display
// window, or -1 if the feature is a miss.
int XYToFeatureIndex(int x, int y) const;
protected:
// Converters to generate indices for individual feature dimensions.
int XBucket(int x) const {
int bucket = x * x_buckets_ / kIntFeatureExtent;
return ClipToRange(bucket, 0, static_cast<int>(x_buckets_) - 1);
}
int YBucket(int y) const {
int bucket = y * y_buckets_ / kIntFeatureExtent;
return ClipToRange(bucket, 0, static_cast<int>(y_buckets_) - 1);
}
// Use DivRounded for theta so that exactly vertical and horizontal are in
// the middle of a bucket. The Modulo takes care of the wrap-around.
int ThetaBucket(int theta) const {
int bucket = DivRounded(theta * theta_buckets_, kIntFeatureExtent);
return Modulo(bucket, theta_buckets_);
}
// Returns an INT_FEATURE_STRUCT corresponding to the given buckets.
INT_FEATURE_STRUCT PositionFromBuckets(int x, int y, int theta) const;
// Feature space definition - serialized.
uint8_t x_buckets_;
uint8_t y_buckets_;
uint8_t theta_buckets_;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_INTFEATURESPACE_H_
|
2301_81045437/tesseract
|
src/classify/intfeaturespace.h
|
C++
|
apache-2.0
| 4,120
|
/******************************************************************************
** Filename: intfx.c
** Purpose: Integer character normalization & feature extraction
** Author: Robert Moss, rays@google.com (Ray Smith)
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#define _USE_MATH_DEFINES // for M_PI
#include "intfx.h"
#include "classify.h"
#include "intmatcher.h"
#include "linlsq.h"
#include "normalis.h"
#include "statistc.h"
#include "trainingsample.h"
#include "helpers.h"
#include <allheaders.h>
#include <cmath> // for M_PI
#include <mutex> // for std::mutex
namespace tesseract {
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
// Look up table for cos and sin to turn the intfx feature angle to a vector.
// Protected by atan_table_mutex.
// The entries are in binary degrees where a full circle is 256 binary degrees.
static float cos_table[INT_CHAR_NORM_RANGE];
static float sin_table[INT_CHAR_NORM_RANGE];
/**----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------**/
void InitIntegerFX() {
// Guards write access to AtanTable so we don't create it more than once.
static std::mutex atan_table_mutex;
static bool atan_table_init = false;
std::lock_guard<std::mutex> guard(atan_table_mutex);
if (!atan_table_init) {
for (int i = 0; i < INT_CHAR_NORM_RANGE; ++i) {
cos_table[i] = cos(i * 2 * M_PI / INT_CHAR_NORM_RANGE + M_PI);
sin_table[i] = sin(i * 2 * M_PI / INT_CHAR_NORM_RANGE + M_PI);
}
atan_table_init = true;
}
}
// Returns a vector representing the direction of a feature with the given
// theta direction in an INT_FEATURE_STRUCT.
FCOORD FeatureDirection(uint8_t theta) {
return FCOORD(cos_table[theta], sin_table[theta]);
}
// Generates a TrainingSample from a TBLOB. Extracts features and sets
// the bounding box, so classifiers that operate on the image can work.
// TODO(rays) Make BlobToTrainingSample a member of Classify now that
// the FlexFx and FeatureDescription code have been removed and LearnBlob
// is now a member of Classify.
TrainingSample *BlobToTrainingSample(const TBLOB &blob, bool nonlinear_norm,
INT_FX_RESULT_STRUCT *fx_info,
std::vector<INT_FEATURE_STRUCT> *bl_features) {
std::vector<INT_FEATURE_STRUCT> cn_features;
Classify::ExtractFeatures(blob, nonlinear_norm, bl_features, &cn_features, fx_info, nullptr);
// TODO(rays) Use blob->PreciseBoundingBox() instead.
TBOX box = blob.bounding_box();
TrainingSample *sample = nullptr;
int num_features = fx_info->NumCN;
if (num_features > 0) {
sample = TrainingSample::CopyFromFeatures(*fx_info, box, &cn_features[0], num_features);
}
if (sample != nullptr) {
// Set the bounding box (in original image coordinates) in the sample.
TPOINT topleft, botright;
topleft.x = box.left();
topleft.y = box.top();
botright.x = box.right();
botright.y = box.bottom();
TPOINT original_topleft, original_botright;
blob.denorm().DenormTransform(nullptr, topleft, &original_topleft);
blob.denorm().DenormTransform(nullptr, botright, &original_botright);
sample->set_bounding_box(
TBOX(original_topleft.x, original_botright.y, original_botright.x, original_topleft.y));
}
return sample;
}
// Computes the DENORMS for bl(baseline) and cn(character) normalization
// during feature extraction. The input denorm describes the current state
// of the blob, which is usually a baseline-normalized word.
// The Transforms setup are as follows:
// Baseline Normalized (bl) Output:
// We center the grapheme by aligning the x-coordinate of its centroid with
// x=128 and leaving the already-baseline-normalized y as-is.
//
// Character Normalized (cn) Output:
// We align the grapheme's centroid at the origin and scale it
// asymmetrically in x and y so that the 2nd moments are a standard value
// (51.2) ie the result is vaguely square.
// If classify_nonlinear_norm is true:
// A non-linear normalization is setup that attempts to evenly distribute
// edges across x and y.
//
// Some of the fields of fx_info are also setup:
// Length: Total length of outline.
// Rx: Rounded y second moment. (Reversed by convention.)
// Ry: rounded x second moment.
// Xmean: Rounded x center of mass of the blob.
// Ymean: Rounded y center of mass of the blob.
void Classify::SetupBLCNDenorms(const TBLOB &blob, bool nonlinear_norm, DENORM *bl_denorm,
DENORM *cn_denorm, INT_FX_RESULT_STRUCT *fx_info) {
// Compute 1st and 2nd moments of the original outline.
FCOORD center, second_moments;
int length = blob.ComputeMoments(¢er, &second_moments);
if (fx_info != nullptr) {
fx_info->Length = length;
fx_info->Rx = IntCastRounded(second_moments.y());
fx_info->Ry = IntCastRounded(second_moments.x());
fx_info->Xmean = IntCastRounded(center.x());
fx_info->Ymean = IntCastRounded(center.y());
}
// Setup the denorm for Baseline normalization.
bl_denorm->SetupNormalization(nullptr, nullptr, &blob.denorm(), center.x(), 128.0f, 1.0f, 1.0f,
128.0f, 128.0f);
// Setup the denorm for character normalization.
if (nonlinear_norm) {
std::vector<std::vector<int>> x_coords;
std::vector<std::vector<int>> y_coords;
TBOX box;
blob.GetPreciseBoundingBox(&box);
box.pad(1, 1);
blob.GetEdgeCoords(box, x_coords, y_coords);
cn_denorm->SetupNonLinear(&blob.denorm(), box, UINT8_MAX, UINT8_MAX, 0.0f, 0.0f, x_coords,
y_coords);
} else {
cn_denorm->SetupNormalization(nullptr, nullptr, &blob.denorm(), center.x(), center.y(),
51.2f / second_moments.x(), 51.2f / second_moments.y(), 128.0f,
128.0f);
}
}
// Helper normalizes the direction, assuming that it is at the given
// unnormed_pos, using the given denorm, starting at the root_denorm.
static uint8_t NormalizeDirection(uint8_t dir, const FCOORD &unnormed_pos, const DENORM &denorm,
const DENORM *root_denorm) {
// Convert direction to a vector.
FCOORD unnormed_end;
unnormed_end.from_direction(dir);
unnormed_end += unnormed_pos;
FCOORD normed_pos, normed_end;
denorm.NormTransform(root_denorm, unnormed_pos, &normed_pos);
denorm.NormTransform(root_denorm, unnormed_end, &normed_end);
normed_end -= normed_pos;
return normed_end.to_direction();
}
// Helper returns the mean direction vector from the given stats. Use the
// mean direction from dirs if there is information available, otherwise, use
// the fit_vector from point_diffs.
static FCOORD MeanDirectionVector(const LLSQ &point_diffs, const LLSQ &dirs, const FCOORD &start_pt,
const FCOORD &end_pt) {
FCOORD fit_vector;
if (dirs.count() > 0) {
// There were directions, so use them. To avoid wrap-around problems, we
// have 2 accumulators in dirs: x for normal directions and y for
// directions offset by 128. We will use the one with the least variance.
FCOORD mean_pt = dirs.mean_point();
double mean_dir = 0.0;
if (dirs.x_variance() <= dirs.y_variance()) {
mean_dir = mean_pt.x();
} else {
mean_dir = mean_pt.y() + 128;
}
fit_vector.from_direction(Modulo(IntCastRounded(mean_dir), 256));
} else {
// There were no directions, so we rely on the vector_fit to the points.
// Since the vector_fit is 180 degrees ambiguous, we align with the
// supplied feature_dir by making the scalar product non-negative.
FCOORD feature_dir(end_pt - start_pt);
fit_vector = point_diffs.vector_fit();
if (fit_vector.x() == 0.0f && fit_vector.y() == 0.0f) {
// There was only a single point. Use feature_dir directly.
fit_vector = feature_dir;
} else {
// Sometimes the least mean squares fit is wrong, due to the small sample
// of points and scaling. Use a 90 degree rotated vector if that matches
// feature_dir better.
FCOORD fit_vector2 = !fit_vector;
// The fit_vector is 180 degrees ambiguous, so resolve the ambiguity by
// insisting that the scalar product with the feature_dir should be +ve.
if (fit_vector % feature_dir < 0.0) {
fit_vector = -fit_vector;
}
if (fit_vector2 % feature_dir < 0.0) {
fit_vector2 = -fit_vector2;
}
// Even though fit_vector2 has a higher mean squared error, it might be
// a better fit, so use it if the dot product with feature_dir is bigger.
if (fit_vector2 % feature_dir > fit_vector % feature_dir) {
fit_vector = fit_vector2;
}
}
}
return fit_vector;
}
// Helper computes one or more features corresponding to the given points.
// Emitted features are on the line defined by:
// start_pt + lambda * (end_pt - start_pt) for scalar lambda.
// Features are spaced at feature_length intervals.
static int ComputeFeatures(const FCOORD &start_pt, const FCOORD &end_pt, double feature_length,
std::vector<INT_FEATURE_STRUCT> *features) {
FCOORD feature_vector(end_pt - start_pt);
if (feature_vector.x() == 0.0f && feature_vector.y() == 0.0f) {
return 0;
}
// Compute theta for the feature based on its direction.
uint8_t theta = feature_vector.to_direction();
// Compute the number of features and lambda_step.
double target_length = feature_vector.length();
int num_features = IntCastRounded(target_length / feature_length);
if (num_features == 0) {
return 0;
}
// Divide the length evenly into num_features pieces.
double lambda_step = 1.0 / num_features;
double lambda = lambda_step / 2.0;
for (int f = 0; f < num_features; ++f, lambda += lambda_step) {
FCOORD feature_pt(start_pt);
feature_pt += feature_vector * lambda;
INT_FEATURE_STRUCT feature(feature_pt, theta);
features->push_back(feature);
}
return num_features;
}
// Gathers outline points and their directions from start_index into dirs by
// stepping along the outline and normalizing the coordinates until the
// required feature_length has been collected or end_index is reached.
// On input pos must point to the position corresponding to start_index and on
// return pos is updated to the current raw position, and pos_normed is set to
// the normed version of pos.
// Since directions wrap-around, they need special treatment to get the mean.
// Provided the cluster of directions doesn't straddle the wrap-around point,
// the simple mean works. If they do, then, unless the directions are wildly
// varying, the cluster rotated by 180 degrees will not straddle the wrap-
// around point, so mean(dir + 180 degrees) - 180 degrees will work. Since
// LLSQ conveniently stores the mean of 2 variables, we use it to store
// dir and dir+128 (128 is 180 degrees) and then use the resulting mean
// with the least variance.
static int GatherPoints(const C_OUTLINE *outline, double feature_length, const DENORM &denorm,
const DENORM *root_denorm, int start_index, int end_index, ICOORD *pos,
FCOORD *pos_normed, LLSQ *points, LLSQ *dirs) {
int step_length = outline->pathlength();
ICOORD step = outline->step(start_index % step_length);
// Prev_normed is the start point of this collection and will be set on the
// first iteration, and on later iterations used to determine the length
// that has been collected.
FCOORD prev_normed;
points->clear();
dirs->clear();
int num_points = 0;
int index;
for (index = start_index; index <= end_index; ++index, *pos += step) {
step = outline->step(index % step_length);
int edge_weight = outline->edge_strength_at_index(index % step_length);
if (edge_weight == 0) {
// This point has conflicting gradient and step direction, so ignore it.
continue;
}
// Get the sub-pixel precise location and normalize.
FCOORD f_pos = outline->sub_pixel_pos_at_index(*pos, index % step_length);
denorm.NormTransform(root_denorm, f_pos, pos_normed);
if (num_points == 0) {
// The start of this segment.
prev_normed = *pos_normed;
} else {
FCOORD offset = *pos_normed - prev_normed;
float length = offset.length();
if (length > feature_length) {
// We have gone far enough from the start. We will use this point in
// the next set so return what we have so far.
return index;
}
}
points->add(pos_normed->x(), pos_normed->y(), edge_weight);
int direction = outline->direction_at_index(index % step_length);
if (direction >= 0) {
direction = NormalizeDirection(direction, f_pos, denorm, root_denorm);
// Use both the direction and direction +128 so we are not trying to
// take the mean of something straddling the wrap-around point.
dirs->add(direction, Modulo(direction + 128, 256));
}
++num_points;
}
return index;
}
// Extracts Tesseract features and appends them to the features vector.
// Startpt to lastpt, inclusive, MUST have the same src_outline member,
// which may be nullptr. The vector from lastpt to its next is included in
// the feature extraction. Hidden edges should be excluded by the caller.
// If force_poly is true, the features will be extracted from the polygonal
// approximation even if more accurate data is available.
static void ExtractFeaturesFromRun(const EDGEPT *startpt, const EDGEPT *lastpt,
const DENORM &denorm, double feature_length, bool force_poly,
std::vector<INT_FEATURE_STRUCT> *features) {
const EDGEPT *endpt = lastpt->next;
const C_OUTLINE *outline = startpt->src_outline;
if (outline != nullptr && !force_poly) {
// Detailed information is available. We have to normalize only from
// the root_denorm to denorm.
const DENORM *root_denorm = denorm.RootDenorm();
int total_features = 0;
// Get the features from the outline.
int step_length = outline->pathlength();
int start_index = startpt->start_step;
// pos is the integer coordinates of the binary image steps.
ICOORD pos = outline->position_at_index(start_index);
// We use an end_index that allows us to use a positive increment, but that
// may be beyond the bounds of the outline steps/ due to wrap-around, to
// so we use % step_length everywhere, except for start_index.
int end_index = lastpt->start_step + lastpt->step_count;
if (end_index <= start_index) {
end_index += step_length;
}
LLSQ prev_points;
LLSQ prev_dirs;
FCOORD prev_normed_pos = outline->sub_pixel_pos_at_index(pos, start_index);
denorm.NormTransform(root_denorm, prev_normed_pos, &prev_normed_pos);
LLSQ points;
LLSQ dirs;
FCOORD normed_pos(0.0f, 0.0f);
int index = GatherPoints(outline, feature_length, denorm, root_denorm, start_index, end_index,
&pos, &normed_pos, &points, &dirs);
while (index <= end_index) {
// At each iteration we nominally have 3 accumulated sets of points and
// dirs: prev_points/dirs, points/dirs, next_points/dirs and sum them
// into sum_points/dirs, but we don't necessarily get any features out,
// so if that is the case, we keep accumulating instead of rotating the
// accumulators.
LLSQ next_points;
LLSQ next_dirs;
FCOORD next_normed_pos(0.0f, 0.0f);
index = GatherPoints(outline, feature_length, denorm, root_denorm, index, end_index, &pos,
&next_normed_pos, &next_points, &next_dirs);
LLSQ sum_points(prev_points);
// TODO(rays) find out why it is better to use just dirs and next_dirs
// in sum_dirs, instead of using prev_dirs as well.
LLSQ sum_dirs(dirs);
sum_points.add(points);
sum_points.add(next_points);
sum_dirs.add(next_dirs);
bool made_features = false;
// If we have some points, we can try making some features.
if (sum_points.count() > 0) {
// We have gone far enough from the start. Make a feature and restart.
FCOORD fit_pt = sum_points.mean_point();
FCOORD fit_vector = MeanDirectionVector(sum_points, sum_dirs, prev_normed_pos, normed_pos);
// The segment to which we fit features is the line passing through
// fit_pt in direction of fit_vector that starts nearest to
// prev_normed_pos and ends nearest to normed_pos.
FCOORD start_pos = prev_normed_pos.nearest_pt_on_line(fit_pt, fit_vector);
FCOORD end_pos = normed_pos.nearest_pt_on_line(fit_pt, fit_vector);
// Possible correction to match the adjacent polygon segment.
if (total_features == 0 && startpt != endpt) {
FCOORD poly_pos(startpt->pos.x, startpt->pos.y);
denorm.LocalNormTransform(poly_pos, &start_pos);
}
if (index > end_index && startpt != endpt) {
FCOORD poly_pos(endpt->pos.x, endpt->pos.y);
denorm.LocalNormTransform(poly_pos, &end_pos);
}
int num_features = ComputeFeatures(start_pos, end_pos, feature_length, features);
if (num_features > 0) {
// We made some features so shuffle the accumulators.
prev_points = points;
prev_dirs = dirs;
prev_normed_pos = normed_pos;
points = next_points;
dirs = next_dirs;
made_features = true;
total_features += num_features;
}
// The end of the next set becomes the end next time around.
normed_pos = next_normed_pos;
}
if (!made_features) {
// We didn't make any features, so keep the prev accumulators and
// add the next ones into the current.
points.add(next_points);
dirs.add(next_dirs);
}
}
} else {
// There is no outline, so we are forced to use the polygonal approximation.
const EDGEPT *pt = startpt;
do {
FCOORD start_pos(pt->pos.x, pt->pos.y);
FCOORD end_pos(pt->next->pos.x, pt->next->pos.y);
denorm.LocalNormTransform(start_pos, &start_pos);
denorm.LocalNormTransform(end_pos, &end_pos);
ComputeFeatures(start_pos, end_pos, feature_length, features);
} while ((pt = pt->next) != endpt);
}
}
// Extracts sets of 3-D features of length kStandardFeatureLength (=12.8), as
// (x,y) position and angle as measured counterclockwise from the vector
// <-1, 0>, from blob using two normalizations defined by bl_denorm and
// cn_denorm. See SetpuBLCNDenorms for definitions.
// If outline_cn_counts is not nullptr, on return it contains the cumulative
// number of cn features generated for each outline in the blob (in order).
// Thus after the first outline, there were (*outline_cn_counts)[0] features,
// after the second outline, there were (*outline_cn_counts)[1] features etc.
void Classify::ExtractFeatures(const TBLOB &blob, bool nonlinear_norm,
std::vector<INT_FEATURE_STRUCT> *bl_features,
std::vector<INT_FEATURE_STRUCT> *cn_features,
INT_FX_RESULT_STRUCT *results,
std::vector<int> *outline_cn_counts) {
DENORM bl_denorm, cn_denorm;
tesseract::Classify::SetupBLCNDenorms(blob, nonlinear_norm, &bl_denorm, &cn_denorm, results);
if (outline_cn_counts != nullptr) {
outline_cn_counts->clear();
}
// Iterate the outlines.
for (TESSLINE *ol = blob.outlines; ol != nullptr; ol = ol->next) {
// Iterate the polygon.
EDGEPT *loop_pt = ol->FindBestStartPt();
EDGEPT *pt = loop_pt;
if (pt == nullptr) {
continue;
}
do {
if (pt->IsHidden()) {
continue;
}
// Find a run of equal src_outline.
EDGEPT *last_pt = pt;
do {
last_pt = last_pt->next;
} while (last_pt != loop_pt && !last_pt->IsHidden() &&
last_pt->src_outline == pt->src_outline);
last_pt = last_pt->prev;
// Until the adaptive classifier can be weaned off polygon segments,
// we have to force extraction from the polygon for the bl_features.
ExtractFeaturesFromRun(pt, last_pt, bl_denorm, kStandardFeatureLength, true, bl_features);
ExtractFeaturesFromRun(pt, last_pt, cn_denorm, kStandardFeatureLength, false, cn_features);
pt = last_pt;
} while ((pt = pt->next) != loop_pt);
if (outline_cn_counts != nullptr) {
outline_cn_counts->push_back(cn_features->size());
}
}
results->NumBL = bl_features->size();
results->NumCN = cn_features->size();
results->YBottom = blob.bounding_box().bottom();
results->YTop = blob.bounding_box().top();
results->Width = blob.bounding_box().width();
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/intfx.cpp
|
C++
|
apache-2.0
| 21,924
|
/******************************************************************************
** Filename: intfx.h
** Purpose: Interface to high level integer feature extractor.
** Author: Robert Moss
** History: Tue May 21 15:51:57 MDT 1991, RWM, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef INTFX_H
#define INTFX_H
#include "blobs.h"
#include "intproto.h"
#include "normalis.h"
#include <cmath>
namespace tesseract {
class DENORM;
class TrainingSample;
struct INT_FX_RESULT_STRUCT {
int32_t Length; // total length of all outlines
int16_t Xmean, Ymean; // center of mass of all outlines
int16_t Rx, Ry; // radius of gyration
int16_t NumBL, NumCN; // number of features extracted
int16_t Width; // Width of blob in BLN coords.
uint8_t YBottom; // Bottom of blob in BLN coords.
uint8_t YTop; // Top of blob in BLN coords.
};
// The standard feature length
const double kStandardFeatureLength = 64.0 / 5;
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
TESS_API
void InitIntegerFX();
// Returns a vector representing the direction of a feature with the given
// theta direction in an INT_FEATURE_STRUCT.
TESS_API
FCOORD FeatureDirection(uint8_t theta);
// Generates a TrainingSample from a TBLOB. Extracts features and sets
// the bounding box, so classifiers that operate on the image can work.
// TODO(rays) BlobToTrainingSample must remain a global function until
// the FlexFx and FeatureDescription code can be removed and LearnBlob
// made a member of Classify.
TrainingSample *BlobToTrainingSample(const TBLOB &blob, bool nonlinear_norm,
INT_FX_RESULT_STRUCT *fx_info,
std::vector<INT_FEATURE_STRUCT> *bl_features);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/intfx.h
|
C++
|
apache-2.0
| 2,606
|
/******************************************************************************
** Filename: intmatcher.cpp
** Purpose: Generic high level classification routines.
** Author: Robert Moss
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "intmatcher.h"
#include "classify.h"
#include "float2int.h"
#include "fontinfo.h"
#include "intproto.h"
#include "scrollview.h"
#include "shapetable.h"
#include "helpers.h"
#include <cassert>
#include <cmath>
namespace tesseract {
/*----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------*/
// Parameters of the sigmoid used to convert similarity to evidence in the
// similarity_evidence_table_ that is used to convert distance metric to an
// 8 bit evidence value in the secondary matcher. (See IntMatcher::Init).
const float IntegerMatcher::kSEExponentialMultiplier = 0.0f;
const float IntegerMatcher::kSimilarityCenter = 0.0075f;
static const uint8_t offset_table[] = {
255, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,
0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1,
0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1,
0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
static const uint8_t next_table[] = {
0, 0, 0, 0x2, 0, 0x4, 0x4, 0x6, 0, 0x8, 0x8, 0x0a, 0x08, 0x0c, 0x0c, 0x0e,
0, 0x10, 0x10, 0x12, 0x10, 0x14, 0x14, 0x16, 0x10, 0x18, 0x18, 0x1a, 0x18, 0x1c, 0x1c, 0x1e,
0, 0x20, 0x20, 0x22, 0x20, 0x24, 0x24, 0x26, 0x20, 0x28, 0x28, 0x2a, 0x28, 0x2c, 0x2c, 0x2e,
0x20, 0x30, 0x30, 0x32, 0x30, 0x34, 0x34, 0x36, 0x30, 0x38, 0x38, 0x3a, 0x38, 0x3c, 0x3c, 0x3e,
0, 0x40, 0x40, 0x42, 0x40, 0x44, 0x44, 0x46, 0x40, 0x48, 0x48, 0x4a, 0x48, 0x4c, 0x4c, 0x4e,
0x40, 0x50, 0x50, 0x52, 0x50, 0x54, 0x54, 0x56, 0x50, 0x58, 0x58, 0x5a, 0x58, 0x5c, 0x5c, 0x5e,
0x40, 0x60, 0x60, 0x62, 0x60, 0x64, 0x64, 0x66, 0x60, 0x68, 0x68, 0x6a, 0x68, 0x6c, 0x6c, 0x6e,
0x60, 0x70, 0x70, 0x72, 0x70, 0x74, 0x74, 0x76, 0x70, 0x78, 0x78, 0x7a, 0x78, 0x7c, 0x7c, 0x7e,
0, 0x80, 0x80, 0x82, 0x80, 0x84, 0x84, 0x86, 0x80, 0x88, 0x88, 0x8a, 0x88, 0x8c, 0x8c, 0x8e,
0x80, 0x90, 0x90, 0x92, 0x90, 0x94, 0x94, 0x96, 0x90, 0x98, 0x98, 0x9a, 0x98, 0x9c, 0x9c, 0x9e,
0x80, 0xa0, 0xa0, 0xa2, 0xa0, 0xa4, 0xa4, 0xa6, 0xa0, 0xa8, 0xa8, 0xaa, 0xa8, 0xac, 0xac, 0xae,
0xa0, 0xb0, 0xb0, 0xb2, 0xb0, 0xb4, 0xb4, 0xb6, 0xb0, 0xb8, 0xb8, 0xba, 0xb8, 0xbc, 0xbc, 0xbe,
0x80, 0xc0, 0xc0, 0xc2, 0xc0, 0xc4, 0xc4, 0xc6, 0xc0, 0xc8, 0xc8, 0xca, 0xc8, 0xcc, 0xcc, 0xce,
0xc0, 0xd0, 0xd0, 0xd2, 0xd0, 0xd4, 0xd4, 0xd6, 0xd0, 0xd8, 0xd8, 0xda, 0xd8, 0xdc, 0xdc, 0xde,
0xc0, 0xe0, 0xe0, 0xe2, 0xe0, 0xe4, 0xe4, 0xe6, 0xe0, 0xe8, 0xe8, 0xea, 0xe8, 0xec, 0xec, 0xee,
0xe0, 0xf0, 0xf0, 0xf2, 0xf0, 0xf4, 0xf4, 0xf6, 0xf0, 0xf8, 0xf8, 0xfa, 0xf8, 0xfc, 0xfc, 0xfe};
// See http://b/19318793 (#6) for a complete discussion.
/**
* Sort Key array in ascending order using heap sort
* algorithm. Also sort Index array that is tied to
* the key array.
* @param n Number of elements to sort
* @param ra Key array [1..n]
* @param rb Index array [1..n]
*/
static void HeapSort(int n, int ra[], int rb[]) {
int i, rra, rrb;
int l, j, ir;
l = (n >> 1) + 1;
ir = n;
for (;;) {
if (l > 1) {
rra = ra[--l];
rrb = rb[l];
} else {
rra = ra[ir];
rrb = rb[ir];
ra[ir] = ra[1];
rb[ir] = rb[1];
if (--ir == 1) {
ra[1] = rra;
rb[1] = rrb;
return;
}
}
i = l;
j = l << 1;
while (j <= ir) {
if (j < ir && ra[j] < ra[j + 1]) {
++j;
}
if (rra < ra[j]) {
ra[i] = ra[j];
rb[i] = rb[j];
j += (i = j);
} else {
j = ir + 1;
}
}
ra[i] = rra;
rb[i] = rrb;
}
}
// Encapsulation of the intermediate data and computations made by the class
// pruner. The class pruner implements a simple linear classifier on binary
// features by heavily quantizing the feature space, and applying
// NUM_BITS_PER_CLASS (2)-bit weights to the features. Lack of resolution in
// weights is compensated by a non-constant bias that is dependent on the
// number of features present.
class ClassPruner {
public:
ClassPruner(int max_classes) {
// The unrolled loop in ComputeScores means that the array sizes need to
// be rounded up so that the array is big enough to accommodate the extra
// entries accessed by the unrolling. Each pruner word is of sized
// BITS_PER_WERD and each entry is NUM_BITS_PER_CLASS, so there are
// BITS_PER_WERD / NUM_BITS_PER_CLASS entries.
// See ComputeScores.
max_classes_ = max_classes;
rounded_classes_ =
RoundUp(max_classes, WERDS_PER_CP_VECTOR * BITS_PER_WERD / NUM_BITS_PER_CLASS);
class_count_ = new int[rounded_classes_];
norm_count_ = new int[rounded_classes_];
sort_key_ = new int[rounded_classes_ + 1];
sort_index_ = new int[rounded_classes_ + 1];
for (int i = 0; i < rounded_classes_; i++) {
class_count_[i] = 0;
}
pruning_threshold_ = 0;
num_features_ = 0;
num_classes_ = 0;
}
~ClassPruner() {
delete[] class_count_;
delete[] norm_count_;
delete[] sort_key_;
delete[] sort_index_;
}
/// Computes the scores for every class in the character set, by summing the
/// weights for each feature and stores the sums internally in class_count_.
void ComputeScores(const INT_TEMPLATES_STRUCT *int_templates, int num_features,
const INT_FEATURE_STRUCT *features) {
num_features_ = num_features;
auto num_pruners = int_templates->NumClassPruners;
for (int f = 0; f < num_features; ++f) {
const INT_FEATURE_STRUCT *feature = &features[f];
// Quantize the feature to NUM_CP_BUCKETS*NUM_CP_BUCKETS*NUM_CP_BUCKETS.
int x = feature->X * NUM_CP_BUCKETS >> 8;
int y = feature->Y * NUM_CP_BUCKETS >> 8;
int theta = feature->Theta * NUM_CP_BUCKETS >> 8;
int class_id = 0;
// Each CLASS_PRUNER_STRUCT only covers CLASSES_PER_CP(32) classes, so
// we need a collection of them, indexed by pruner_set.
for (unsigned pruner_set = 0; pruner_set < num_pruners; ++pruner_set) {
// Look up quantized feature in a 3-D array, an array of weights for
// each class.
const uint32_t *pruner_word_ptr = int_templates->ClassPruners[pruner_set]->p[x][y][theta];
for (int word = 0; word < WERDS_PER_CP_VECTOR; ++word) {
uint32_t pruner_word = *pruner_word_ptr++;
// This inner loop is unrolled to speed up the ClassPruner.
// Currently gcc would not unroll it unless it is set to O3
// level of optimization or -funroll-loops is specified.
/*
uint32_t class_mask = (1 << NUM_BITS_PER_CLASS) - 1;
for (int bit = 0; bit < BITS_PER_WERD/NUM_BITS_PER_CLASS; bit++) {
class_count_[class_id++] += pruner_word & class_mask;
pruner_word >>= NUM_BITS_PER_CLASS;
}
*/
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
pruner_word >>= NUM_BITS_PER_CLASS;
class_count_[class_id++] += pruner_word & CLASS_PRUNER_CLASS_MASK;
}
}
}
}
/// Adjusts the scores according to the number of expected features. Used
/// in lieu of a constant bias, this penalizes classes that expect more
/// features than there are present. Thus an actual c will score higher for c
/// than e, even though almost all the features match e as well as c, because
/// e expects more features to be present.
void AdjustForExpectedNumFeatures(const uint16_t *expected_num_features, int cutoff_strength) {
for (int class_id = 0; class_id < max_classes_; ++class_id) {
if (num_features_ < expected_num_features[class_id]) {
int deficit = expected_num_features[class_id] - num_features_;
class_count_[class_id] -=
class_count_[class_id] * deficit / (num_features_ * cutoff_strength + deficit);
}
}
}
/// Zeros the scores for classes disabled in the unicharset.
/// Implements the black-list to recognize a subset of the character set.
void DisableDisabledClasses(const UNICHARSET &unicharset) {
for (int class_id = 0; class_id < max_classes_; ++class_id) {
if (!unicharset.get_enabled(class_id)) {
class_count_[class_id] = 0; // This char is disabled!
}
}
}
/** Zeros the scores of fragments. */
void DisableFragments(const UNICHARSET &unicharset) {
for (int class_id = 0; class_id < max_classes_; ++class_id) {
// Do not include character fragments in the class pruner
// results if disable_character_fragments is true.
if (unicharset.get_fragment(class_id)) {
class_count_[class_id] = 0;
}
}
}
/// Normalizes the counts for xheight, putting the normalized result in
/// norm_count_. Applies a simple subtractive penalty for incorrect vertical
/// position provided by the normalization_factors array, indexed by
/// character class, and scaled by the norm_multiplier.
void NormalizeForXheight(int norm_multiplier, const uint8_t *normalization_factors) {
for (int class_id = 0; class_id < max_classes_; class_id++) {
norm_count_[class_id] =
class_count_[class_id] - ((norm_multiplier * normalization_factors[class_id]) >> 8);
}
}
/** The nop normalization copies the class_count_ array to norm_count_. */
void NoNormalization() {
for (int class_id = 0; class_id < max_classes_; class_id++) {
norm_count_[class_id] = class_count_[class_id];
}
}
/// Prunes the classes using <the maximum count> * pruning_factor/256 as a
/// threshold for keeping classes. If max_of_non_fragments, then ignore
/// fragments in computing the maximum count.
void PruneAndSort(int pruning_factor, int keep_this, bool max_of_non_fragments,
const UNICHARSET &unicharset) {
int max_count = 0;
for (int c = 0; c < max_classes_; ++c) {
if (norm_count_[c] > max_count &&
// This additional check is added in order to ensure that
// the classifier will return at least one non-fragmented
// character match.
// TODO(daria): verify that this helps accuracy and does not
// hurt performance.
(!max_of_non_fragments || !unicharset.get_fragment(c))) {
max_count = norm_count_[c];
}
}
// Prune Classes.
pruning_threshold_ = (max_count * pruning_factor) >> 8;
// Select Classes.
if (pruning_threshold_ < 1) {
pruning_threshold_ = 1;
}
num_classes_ = 0;
for (int class_id = 0; class_id < max_classes_; class_id++) {
if (norm_count_[class_id] >= pruning_threshold_ || class_id == keep_this) {
++num_classes_;
sort_index_[num_classes_] = class_id;
sort_key_[num_classes_] = norm_count_[class_id];
}
}
// Sort Classes using Heapsort Algorithm.
if (num_classes_ > 1) {
HeapSort(num_classes_, sort_key_, sort_index_);
}
}
/** Prints debug info on the class pruner matches for the pruned classes only.
*/
void DebugMatch(const Classify &classify, const INT_TEMPLATES_STRUCT *int_templates,
const INT_FEATURE_STRUCT *features) const {
int num_pruners = int_templates->NumClassPruners;
int max_num_classes = int_templates->NumClasses;
for (int f = 0; f < num_features_; ++f) {
const INT_FEATURE_STRUCT *feature = &features[f];
tprintf("F=%3d(%d,%d,%d),", f, feature->X, feature->Y, feature->Theta);
// Quantize the feature to NUM_CP_BUCKETS*NUM_CP_BUCKETS*NUM_CP_BUCKETS.
int x = feature->X * NUM_CP_BUCKETS >> 8;
int y = feature->Y * NUM_CP_BUCKETS >> 8;
int theta = feature->Theta * NUM_CP_BUCKETS >> 8;
int class_id = 0;
for (int pruner_set = 0; pruner_set < num_pruners; ++pruner_set) {
// Look up quantized feature in a 3-D array, an array of weights for
// each class.
const uint32_t *pruner_word_ptr = int_templates->ClassPruners[pruner_set]->p[x][y][theta];
for (int word = 0; word < WERDS_PER_CP_VECTOR; ++word) {
uint32_t pruner_word = *pruner_word_ptr++;
for (int word_class = 0; word_class < 16 && class_id < max_num_classes;
++word_class, ++class_id) {
if (norm_count_[class_id] >= pruning_threshold_) {
tprintf(" %s=%d,", classify.ClassIDToDebugStr(int_templates, class_id, 0).c_str(),
pruner_word & CLASS_PRUNER_CLASS_MASK);
}
pruner_word >>= NUM_BITS_PER_CLASS;
}
}
tprintf("\n");
}
}
}
/** Prints a summary of the pruner result. */
void SummarizeResult(const Classify &classify, const INT_TEMPLATES_STRUCT *int_templates,
const uint16_t *expected_num_features, int norm_multiplier,
const uint8_t *normalization_factors) const {
tprintf("CP:%d classes, %d features:\n", num_classes_, num_features_);
for (int i = 0; i < num_classes_; ++i) {
int class_id = sort_index_[num_classes_ - i];
std::string class_string = classify.ClassIDToDebugStr(int_templates, class_id, 0);
tprintf(
"%s:Initial=%d, E=%d, Xht-adj=%d, N=%d, Rat=%.2f\n", class_string.c_str(),
class_count_[class_id], expected_num_features[class_id],
(norm_multiplier * normalization_factors[class_id]) >> 8, sort_key_[num_classes_ - i],
100.0 - 100.0 * sort_key_[num_classes_ - i] / (CLASS_PRUNER_CLASS_MASK * num_features_));
}
}
/// Copies the pruned, sorted classes into the output results and returns
/// the number of classes.
int SetupResults(std::vector<CP_RESULT_STRUCT> *results) const {
results->clear();
results->resize(num_classes_);
for (int c = 0; c < num_classes_; ++c) {
(*results)[c].Class = sort_index_[num_classes_ - c];
(*results)[c].Rating =
1.0f - sort_key_[num_classes_ - c] /
(static_cast<float>(CLASS_PRUNER_CLASS_MASK) * num_features_);
}
return num_classes_;
}
private:
/** Array[rounded_classes_] of initial counts for each class. */
int *class_count_;
/// Array[rounded_classes_] of modified counts for each class after
/// normalizing for expected number of features, disabled classes, fragments,
/// and xheights.
int *norm_count_;
/** Array[rounded_classes_ +1] of pruned counts that gets sorted */
int *sort_key_;
/** Array[rounded_classes_ +1] of classes corresponding to sort_key_. */
int *sort_index_;
/** Number of classes in this class pruner. */
int max_classes_;
/** Rounded up number of classes used for array sizes. */
int rounded_classes_;
/** Threshold count applied to prune classes. */
int pruning_threshold_;
/** The number of features used to compute the scores. */
int num_features_;
/** Final number of pruned classes. */
int num_classes_;
};
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/**
* Runs the class pruner from int_templates on the given features, returning
* the number of classes output in results.
* @param int_templates Class pruner tables
* @param num_features Number of features in blob
* @param features Array of features
* @param normalization_factors Array of fudge factors from blob
* normalization process (by CLASS_INDEX)
* @param expected_num_features Array of expected number of features
* for each class (by CLASS_INDEX)
* @param results Sorted Array of pruned classes. Must be an
* array of size at least
* int_templates->NumClasses.
* @param keep_this
*/
int Classify::PruneClasses(const INT_TEMPLATES_STRUCT *int_templates, int num_features,
int keep_this, const INT_FEATURE_STRUCT *features,
const uint8_t *normalization_factors,
const uint16_t *expected_num_features,
std::vector<CP_RESULT_STRUCT> *results) {
ClassPruner pruner(int_templates->NumClasses);
// Compute initial match scores for all classes.
pruner.ComputeScores(int_templates, num_features, features);
// Adjust match scores for number of expected features.
pruner.AdjustForExpectedNumFeatures(expected_num_features, classify_cp_cutoff_strength);
// Apply disabled classes in unicharset - only works without a shape_table.
if (shape_table_ == nullptr) {
pruner.DisableDisabledClasses(unicharset);
}
// If fragments are disabled, remove them, also only without a shape table.
if (disable_character_fragments && shape_table_ == nullptr) {
pruner.DisableFragments(unicharset);
}
// If we have good x-heights, apply the given normalization factors.
if (normalization_factors != nullptr) {
pruner.NormalizeForXheight(classify_class_pruner_multiplier, normalization_factors);
} else {
pruner.NoNormalization();
}
// Do the actual pruning and sort the short-list.
pruner.PruneAndSort(classify_class_pruner_threshold, keep_this, shape_table_ == nullptr,
unicharset);
if (classify_debug_level > 2) {
pruner.DebugMatch(*this, int_templates, features);
}
if (classify_debug_level > 1) {
pruner.SummarizeResult(*this, int_templates, expected_num_features,
classify_class_pruner_multiplier, normalization_factors);
}
// Convert to the expected output format.
return pruner.SetupResults(results);
}
/**
* IntegerMatcher returns the best configuration and rating
* for a single class. The class matched against is determined
* by the uniqueness of the ClassTemplate parameter. The
* best rating and its associated configuration are returned.
*
* Globals:
* - local_matcher_multiplier_ Normalization factor multiplier
* param ClassTemplate Prototypes & tables for a class
* param NumFeatures Number of features in blob
* param Features Array of features
* param NormalizationFactor Fudge factor from blob normalization process
* param Result Class rating & configuration: (0.0 -> 1.0), 0=bad, 1=good
* param Debug Debugger flag: 1=debugger on
*/
void IntegerMatcher::Match(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
int16_t NumFeatures, const INT_FEATURE_STRUCT *Features,
UnicharRating *Result, int AdaptFeatureThreshold, int Debug,
bool SeparateDebugWindows) {
auto *tables = new ScratchEvidence();
int Feature;
if (MatchDebuggingOn(Debug)) {
tprintf("Integer Matcher -------------------------------------------\n");
}
tables->Clear(ClassTemplate);
Result->feature_misses = 0;
for (Feature = 0; Feature < NumFeatures; Feature++) {
int csum = UpdateTablesForFeature(ClassTemplate, ProtoMask, ConfigMask, Feature,
&Features[Feature], tables, Debug);
// Count features that were missed over all configs.
if (csum == 0) {
++Result->feature_misses;
}
}
#ifndef GRAPHICS_DISABLED
if (PrintProtoMatchesOn(Debug) || PrintMatchSummaryOn(Debug)) {
DebugFeatureProtoError(ClassTemplate, ProtoMask, ConfigMask, *tables, NumFeatures, Debug);
}
if (DisplayProtoMatchesOn(Debug)) {
DisplayProtoDebugInfo(ClassTemplate, ConfigMask, *tables, SeparateDebugWindows);
}
if (DisplayFeatureMatchesOn(Debug)) {
DisplayFeatureDebugInfo(ClassTemplate, ProtoMask, ConfigMask, NumFeatures, Features,
AdaptFeatureThreshold, Debug, SeparateDebugWindows);
}
#endif
tables->UpdateSumOfProtoEvidences(ClassTemplate, ConfigMask);
tables->NormalizeSums(ClassTemplate, NumFeatures);
FindBestMatch(ClassTemplate, *tables, Result);
#ifndef GRAPHICS_DISABLED
if (PrintMatchSummaryOn(Debug)) {
Result->Print();
}
if (MatchDebuggingOn(Debug)) {
tprintf("Match Complete --------------------------------------------\n");
}
#endif
delete tables;
}
/**
* FindGoodProtos finds all protos whose normalized proto-evidence
* exceed AdaptProtoThreshold. The list is ordered by increasing
* proto id number.
*
* Globals:
* - local_matcher_multiplier_ Normalization factor multiplier
* param ClassTemplate Prototypes & tables for a class
* param ProtoMask AND Mask for proto word
* param ConfigMask AND Mask for config word
* param NumFeatures Number of features in blob
* param Features Array of features
* param ProtoArray Array of good protos
* param AdaptProtoThreshold Threshold for good protos
* param Debug Debugger flag: 1=debugger on
* @return Number of good protos in ProtoArray.
*/
int IntegerMatcher::FindGoodProtos(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask, int16_t NumFeatures,
INT_FEATURE_ARRAY Features, PROTO_ID *ProtoArray,
int AdaptProtoThreshold, int Debug) {
auto *tables = new ScratchEvidence();
int NumGoodProtos = 0;
/* DEBUG opening heading */
if (MatchDebuggingOn(Debug)) {
tprintf("Find Good Protos -------------------------------------------\n");
}
tables->Clear(ClassTemplate);
for (int Feature = 0; Feature < NumFeatures; Feature++) {
UpdateTablesForFeature(ClassTemplate, ProtoMask, ConfigMask, Feature, &(Features[Feature]),
tables, Debug);
}
#ifndef GRAPHICS_DISABLED
if (PrintProtoMatchesOn(Debug) || PrintMatchSummaryOn(Debug)) {
DebugFeatureProtoError(ClassTemplate, ProtoMask, ConfigMask, *tables, NumFeatures, Debug);
}
#endif
/* Average Proto Evidences & Find Good Protos */
for (int proto = 0; proto < ClassTemplate->NumProtos; proto++) {
/* Compute Average for Actual Proto */
int Temp = 0;
for (uint8_t i = 0; i < MAX_PROTO_INDEX && i < ClassTemplate->ProtoLengths[proto]; i++) {
Temp += tables->proto_evidence_[proto][i];
}
Temp /= ClassTemplate->ProtoLengths[proto];
/* Find Good Protos */
if (Temp >= AdaptProtoThreshold) {
*ProtoArray = proto;
ProtoArray++;
NumGoodProtos++;
}
}
if (MatchDebuggingOn(Debug)) {
tprintf("Match Complete --------------------------------------------\n");
}
delete tables;
return NumGoodProtos;
}
/**
* FindBadFeatures finds all features with maximum feature-evidence <
* AdaptFeatureThresh. The list is ordered by increasing feature number.
* @param ClassTemplate Prototypes & tables for a class
* @param ProtoMask AND Mask for proto word
* @param ConfigMask AND Mask for config word
* @param NumFeatures Number of features in blob
* @param Features Array of features
* @param FeatureArray Array of bad features
* @param AdaptFeatureThreshold Threshold for bad features
* @param Debug Debugger flag: 1=debugger on
* @return Number of bad features in FeatureArray.
*/
int IntegerMatcher::FindBadFeatures(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask, int16_t NumFeatures,
INT_FEATURE_ARRAY Features, FEATURE_ID *FeatureArray,
int AdaptFeatureThreshold, int Debug) {
auto *tables = new ScratchEvidence();
int NumBadFeatures = 0;
/* DEBUG opening heading */
if (MatchDebuggingOn(Debug)) {
tprintf("Find Bad Features -------------------------------------------\n");
}
tables->Clear(ClassTemplate);
for (int Feature = 0; Feature < NumFeatures; Feature++) {
UpdateTablesForFeature(ClassTemplate, ProtoMask, ConfigMask, Feature, &Features[Feature],
tables, Debug);
/* Find Best Evidence for Current Feature */
int best = 0;
assert(ClassTemplate->NumConfigs < MAX_NUM_CONFIGS);
for (int i = 0; i < MAX_NUM_CONFIGS && i < ClassTemplate->NumConfigs; i++) {
if (tables->feature_evidence_[i] > best) {
best = tables->feature_evidence_[i];
}
}
/* Find Bad Features */
if (best < AdaptFeatureThreshold) {
*FeatureArray = Feature;
FeatureArray++;
NumBadFeatures++;
}
}
#ifndef GRAPHICS_DISABLED
if (PrintProtoMatchesOn(Debug) || PrintMatchSummaryOn(Debug)) {
DebugFeatureProtoError(ClassTemplate, ProtoMask, ConfigMask, *tables, NumFeatures, Debug);
}
#endif
if (MatchDebuggingOn(Debug)) {
tprintf("Match Complete --------------------------------------------\n");
}
delete tables;
return NumBadFeatures;
}
IntegerMatcher::IntegerMatcher(tesseract::IntParam *classify_debug_level)
: classify_debug_level_(classify_debug_level) {
/* Initialize table for evidence to similarity lookup */
for (int i = 0; i < SE_TABLE_SIZE; i++) {
uint32_t IntSimilarity = i << (27 - SE_TABLE_BITS);
double Similarity = (static_cast<double>(IntSimilarity)) / 65536.0 / 65536.0;
double evidence = Similarity / kSimilarityCenter;
evidence = 255.0 / (evidence * evidence + 1.0);
if (kSEExponentialMultiplier > 0.0) {
double scale =
1.0 - std::exp(-kSEExponentialMultiplier) *
exp(kSEExponentialMultiplier * (static_cast<double>(i) / SE_TABLE_SIZE));
evidence *= ClipToRange(scale, 0.0, 1.0);
}
similarity_evidence_table_[i] = static_cast<uint8_t>(evidence + 0.5);
}
/* Initialize evidence computation variables */
evidence_table_mask_ = ((1 << kEvidenceTableBits) - 1) << (9 - kEvidenceTableBits);
mult_trunc_shift_bits_ = (14 - kIntEvidenceTruncBits);
table_trunc_shift_bits_ = (27 - SE_TABLE_BITS - (mult_trunc_shift_bits_ << 1));
evidence_mult_mask_ = ((1 << kIntEvidenceTruncBits) - 1);
}
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
void ScratchEvidence::Clear(const INT_CLASS_STRUCT *class_template) {
memset(sum_feature_evidence_, 0, class_template->NumConfigs * sizeof(sum_feature_evidence_[0]));
memset(proto_evidence_, 0, class_template->NumProtos * sizeof(proto_evidence_[0]));
}
void ScratchEvidence::ClearFeatureEvidence(const INT_CLASS_STRUCT *class_template) {
memset(feature_evidence_, 0, class_template->NumConfigs * sizeof(feature_evidence_[0]));
}
/**
* Print debugging information for Configurations
*/
static void IMDebugConfiguration(int FeatureNum, uint16_t ActualProtoNum, uint8_t Evidence,
uint32_t ConfigWord) {
tprintf("F = %3d, P = %3d, E = %3d, Configs = ", FeatureNum, static_cast<int>(ActualProtoNum),
static_cast<int>(Evidence));
while (ConfigWord) {
if (ConfigWord & 1) {
tprintf("1");
} else {
tprintf("0");
}
ConfigWord >>= 1;
}
tprintf("\n");
}
/**
* Print debugging information for Configurations
*/
static void IMDebugConfigurationSum(int FeatureNum, uint8_t *FeatureEvidence, int32_t ConfigCount) {
tprintf("F=%3d, C=", FeatureNum);
for (int ConfigNum = 0; ConfigNum < ConfigCount; ConfigNum++) {
tprintf("%4d", FeatureEvidence[ConfigNum]);
}
tprintf("\n");
}
/**
* For the given feature: prune protos, compute evidence,
* update Feature Evidence, Proto Evidence, and Sum of Feature
* Evidence tables.
* @param ClassTemplate Prototypes & tables for a class
* @param FeatureNum Current feature number (for DEBUG only)
* @param Feature Pointer to a feature struct
* @param tables Evidence tables
* @param Debug Debugger flag: 1=debugger on
* @return sum of feature evidence tables
*/
int IntegerMatcher::UpdateTablesForFeature(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask, int FeatureNum,
const INT_FEATURE_STRUCT *Feature,
ScratchEvidence *tables, int Debug) {
uint32_t ConfigWord;
uint32_t ProtoWord;
uint32_t ProtoNum;
uint32_t ActualProtoNum;
uint8_t proto_byte;
int32_t proto_word_offset;
int32_t proto_offset;
PROTO_SET_STRUCT *ProtoSet;
uint32_t *ProtoPrunerPtr;
INT_PROTO_STRUCT *Proto;
int ProtoSetIndex;
uint8_t Evidence;
uint32_t XFeatureAddress;
uint32_t YFeatureAddress;
uint32_t ThetaFeatureAddress;
tables->ClearFeatureEvidence(ClassTemplate);
/* Precompute Feature Address offset for Proto Pruning */
XFeatureAddress = ((Feature->X >> 2) << 1);
YFeatureAddress = (NUM_PP_BUCKETS << 1) + ((Feature->Y >> 2) << 1);
ThetaFeatureAddress = (NUM_PP_BUCKETS << 2) + ((Feature->Theta >> 2) << 1);
for (ProtoSetIndex = 0, ActualProtoNum = 0; ProtoSetIndex < ClassTemplate->NumProtoSets;
ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
ProtoPrunerPtr = reinterpret_cast<uint32_t *>((*ProtoSet).ProtoPruner);
for (ProtoNum = 0; ProtoNum < PROTOS_PER_PROTO_SET; ProtoNum += (PROTOS_PER_PROTO_SET >> 1),
ActualProtoNum += (PROTOS_PER_PROTO_SET >> 1), ProtoMask++, ProtoPrunerPtr++) {
/* Prune Protos of current Proto Set */
ProtoWord = *(ProtoPrunerPtr + XFeatureAddress);
ProtoWord &= *(ProtoPrunerPtr + YFeatureAddress);
ProtoWord &= *(ProtoPrunerPtr + ThetaFeatureAddress);
ProtoWord &= *ProtoMask;
if (ProtoWord != 0) {
proto_byte = ProtoWord & 0xff;
ProtoWord >>= 8;
proto_word_offset = 0;
while (ProtoWord != 0 || proto_byte != 0) {
while (proto_byte == 0) {
proto_byte = ProtoWord & 0xff;
ProtoWord >>= 8;
proto_word_offset += 8;
}
proto_offset = offset_table[proto_byte] + proto_word_offset;
proto_byte = next_table[proto_byte];
Proto = &(ProtoSet->Protos[ProtoNum + proto_offset]);
ConfigWord = Proto->Configs[0];
int32_t A3 = (((Proto->A * (Feature->X - 128)) * 2) - (Proto->B * (Feature->Y - 128)) +
(Proto->C * 512));
int32_t M3 = ((static_cast<int8_t>(Feature->Theta - Proto->Angle)) * kIntThetaFudge) * 2;
if (A3 < 0) {
A3 = ~A3;
}
if (M3 < 0) {
M3 = ~M3;
}
A3 >>= mult_trunc_shift_bits_;
M3 >>= mult_trunc_shift_bits_;
if (static_cast<uint32_t>(A3) > evidence_mult_mask_) {
A3 = evidence_mult_mask_;
}
if (static_cast<uint32_t>(M3) > evidence_mult_mask_) {
M3 = evidence_mult_mask_;
}
uint32_t A4 = (A3 * A3) + (M3 * M3);
A4 >>= table_trunc_shift_bits_;
if (A4 > evidence_table_mask_) {
Evidence = 0;
} else {
Evidence = similarity_evidence_table_[A4];
}
if (PrintFeatureMatchesOn(Debug)) {
IMDebugConfiguration(FeatureNum, ActualProtoNum + proto_offset, Evidence, ConfigWord);
}
ConfigWord &= *ConfigMask;
uint8_t feature_evidence_index = 0;
uint8_t config_byte = 0;
while (ConfigWord != 0 || config_byte != 0) {
while (config_byte == 0) {
config_byte = ConfigWord & 0xff;
ConfigWord >>= 8;
feature_evidence_index += 8;
}
const uint8_t config_offset = offset_table[config_byte] + feature_evidence_index - 8;
config_byte = next_table[config_byte];
if (Evidence > tables->feature_evidence_[config_offset]) {
tables->feature_evidence_[config_offset] = Evidence;
}
}
uint8_t ProtoIndex = ClassTemplate->ProtoLengths[ActualProtoNum + proto_offset];
if (ProtoIndex > MAX_PROTO_INDEX) {
// Avoid buffer overflow.
// TODO: A better fix is still open.
ProtoIndex = MAX_PROTO_INDEX;
}
uint8_t *UINT8Pointer = &(tables->proto_evidence_[ActualProtoNum + proto_offset][0]);
for (; Evidence > 0 && ProtoIndex > 0; ProtoIndex--, UINT8Pointer++) {
if (Evidence > *UINT8Pointer) {
uint8_t Temp = *UINT8Pointer;
*UINT8Pointer = Evidence;
Evidence = Temp;
}
}
}
}
}
}
if (PrintFeatureMatchesOn(Debug)) {
IMDebugConfigurationSum(FeatureNum, tables->feature_evidence_, ClassTemplate->NumConfigs);
}
int *IntPointer = tables->sum_feature_evidence_;
uint8_t *UINT8Pointer = tables->feature_evidence_;
int SumOverConfigs = 0;
for (int ConfigNum = ClassTemplate->NumConfigs; ConfigNum > 0; ConfigNum--) {
int evidence = *UINT8Pointer++;
SumOverConfigs += evidence;
*IntPointer++ += evidence;
}
return SumOverConfigs;
}
/**
* Print debugging information for Configurations
*/
#ifndef GRAPHICS_DISABLED
void IntegerMatcher::DebugFeatureProtoError(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask, const ScratchEvidence &tables,
int16_t NumFeatures, int Debug) {
float ProtoConfigs[MAX_NUM_CONFIGS];
int ConfigNum;
uint32_t ConfigWord;
int ProtoSetIndex;
uint16_t ProtoNum;
uint8_t ProtoWordNum;
PROTO_SET_STRUCT *ProtoSet;
if (PrintMatchSummaryOn(Debug)) {
tprintf("Configuration Mask:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
tprintf("%1d", (((*ConfigMask) >> ConfigNum) & 1));
}
tprintf("\n");
tprintf("Feature Error for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
tprintf(" %5.1f", 100.0 * (1.0 - static_cast<float>(tables.sum_feature_evidence_[ConfigNum]) /
NumFeatures / 256.0));
}
tprintf("\n\n\n");
}
if (PrintMatchSummaryOn(Debug)) {
tprintf("Proto Mask:\n");
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets; ProtoSetIndex++) {
for (ProtoWordNum = 0; ProtoWordNum < 2; ProtoWordNum++, ProtoMask++) {
uint16_t ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoNum = 0; ((ProtoNum < (PROTOS_PER_PROTO_SET >> 1)) &&
(ActualProtoNum < ClassTemplate->NumProtos));
ProtoNum++, ActualProtoNum++) {
tprintf("%1d", (((*ProtoMask) >> ProtoNum) & 1));
}
tprintf("\n");
}
}
tprintf("\n");
}
for (int i = 0; i < ClassTemplate->NumConfigs; i++) {
ProtoConfigs[i] = 0;
}
if (PrintProtoMatchesOn(Debug)) {
tprintf("Proto Evidence:\n");
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets; ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
uint16_t ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoNum = 0;
((ProtoNum < PROTOS_PER_PROTO_SET) && (ActualProtoNum < ClassTemplate->NumProtos));
ProtoNum++, ActualProtoNum++) {
tprintf("P %3d =", ActualProtoNum);
int temp = 0;
for (uint8_t j = 0; j < ClassTemplate->ProtoLengths[ActualProtoNum]; j++) {
uint8_t data = tables.proto_evidence_[ActualProtoNum][j];
tprintf(" %d", data);
temp += data;
}
tprintf(" = %6.4f%%\n", temp / 256.0 / ClassTemplate->ProtoLengths[ActualProtoNum]);
ConfigWord = ProtoSet->Protos[ProtoNum].Configs[0];
ConfigNum = 0;
while (ConfigWord) {
tprintf("%5d", ConfigWord & 1 ? temp : 0);
if (ConfigWord & 1) {
ProtoConfigs[ConfigNum] += temp;
}
ConfigNum++;
ConfigWord >>= 1;
}
tprintf("\n");
}
}
}
if (PrintMatchSummaryOn(Debug)) {
tprintf("Proto Error for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
tprintf(" %5.1f", 100.0 * (1.0 - ProtoConfigs[ConfigNum] /
ClassTemplate->ConfigLengths[ConfigNum] / 256.0));
}
tprintf("\n\n");
}
if (PrintProtoMatchesOn(Debug)) {
tprintf("Proto Sum for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
tprintf(" %4.1f", ProtoConfigs[ConfigNum] / 256.0);
}
tprintf("\n\n");
tprintf("Proto Length for Configurations:\n");
for (ConfigNum = 0; ConfigNum < ClassTemplate->NumConfigs; ConfigNum++) {
tprintf(" %4.1f", static_cast<float>(ClassTemplate->ConfigLengths[ConfigNum]));
}
tprintf("\n\n");
}
}
void IntegerMatcher::DisplayProtoDebugInfo(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ConfigMask,
const ScratchEvidence &tables,
bool SeparateDebugWindows) {
uint16_t ProtoNum;
PROTO_SET_STRUCT *ProtoSet;
int ProtoSetIndex;
InitIntMatchWindowIfReqd();
if (SeparateDebugWindows) {
InitFeatureDisplayWindowIfReqd();
InitProtoDisplayWindowIfReqd();
}
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets; ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
uint16_t ActualProtoNum = ProtoSetIndex * PROTOS_PER_PROTO_SET;
for (ProtoNum = 0;
((ProtoNum < PROTOS_PER_PROTO_SET) && (ActualProtoNum < ClassTemplate->NumProtos));
ProtoNum++, ActualProtoNum++) {
/* Compute Average for Actual Proto */
int temp = 0;
for (uint8_t i = 0; i < ClassTemplate->ProtoLengths[ActualProtoNum]; i++) {
temp += tables.proto_evidence_[ActualProtoNum][i];
}
temp /= ClassTemplate->ProtoLengths[ActualProtoNum];
if ((ProtoSet->Protos[ProtoNum]).Configs[0] & (*ConfigMask)) {
DisplayIntProto(ClassTemplate, ActualProtoNum, temp / 255.0);
}
}
}
}
void IntegerMatcher::DisplayFeatureDebugInfo(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask,
BIT_VECTOR ConfigMask, int16_t NumFeatures,
const INT_FEATURE_STRUCT *Features,
int AdaptFeatureThreshold, int Debug,
bool SeparateDebugWindows) {
auto *tables = new ScratchEvidence();
tables->Clear(ClassTemplate);
InitIntMatchWindowIfReqd();
if (SeparateDebugWindows) {
InitFeatureDisplayWindowIfReqd();
InitProtoDisplayWindowIfReqd();
}
for (int Feature = 0; Feature < NumFeatures; Feature++) {
UpdateTablesForFeature(ClassTemplate, ProtoMask, ConfigMask, Feature, &Features[Feature],
tables, 0);
/* Find Best Evidence for Current Feature */
int best = 0;
assert(ClassTemplate->NumConfigs < MAX_NUM_CONFIGS);
for (int i = 0; i < MAX_NUM_CONFIGS && i < ClassTemplate->NumConfigs; i++) {
if (tables->feature_evidence_[i] > best) {
best = tables->feature_evidence_[i];
}
}
/* Update display for current feature */
if (ClipMatchEvidenceOn(Debug)) {
if (best < AdaptFeatureThreshold) {
DisplayIntFeature(&Features[Feature], 0.0);
} else {
DisplayIntFeature(&Features[Feature], 1.0);
}
} else {
DisplayIntFeature(&Features[Feature], best / 255.0);
}
}
delete tables;
}
#endif
/**
* Add sum of Proto Evidences into Sum Of Feature Evidence Array
*/
void ScratchEvidence::UpdateSumOfProtoEvidences(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ConfigMask) {
int *IntPointer;
uint32_t ConfigWord;
int ProtoSetIndex;
uint16_t ProtoNum;
PROTO_SET_STRUCT *ProtoSet;
int NumProtos;
NumProtos = ClassTemplate->NumProtos;
for (ProtoSetIndex = 0; ProtoSetIndex < ClassTemplate->NumProtoSets; ProtoSetIndex++) {
ProtoSet = ClassTemplate->ProtoSets[ProtoSetIndex];
uint16_t ActualProtoNum = (ProtoSetIndex * PROTOS_PER_PROTO_SET);
for (ProtoNum = 0; ((ProtoNum < PROTOS_PER_PROTO_SET) && (ActualProtoNum < NumProtos));
ProtoNum++, ActualProtoNum++) {
int temp = 0;
for (uint8_t i = 0; i < MAX_PROTO_INDEX && i < ClassTemplate->ProtoLengths[ActualProtoNum];
i++) {
temp += proto_evidence_[ActualProtoNum][i];
}
ConfigWord = ProtoSet->Protos[ProtoNum].Configs[0];
ConfigWord &= *ConfigMask;
IntPointer = sum_feature_evidence_;
while (ConfigWord) {
if (ConfigWord & 1) {
*IntPointer += temp;
}
IntPointer++;
ConfigWord >>= 1;
}
}
}
}
/**
* Normalize Sum of Proto and Feature Evidence by dividing by the sum of
* the Feature Lengths and the Proto Lengths for each configuration.
*/
void ScratchEvidence::NormalizeSums(INT_CLASS_STRUCT *ClassTemplate, int16_t NumFeatures) {
// ClassTemplate->NumConfigs can become larger than MAX_NUM_CONFIGS.
for (int i = 0; i < MAX_NUM_CONFIGS && i < ClassTemplate->NumConfigs; i++) {
sum_feature_evidence_[i] =
(sum_feature_evidence_[i] << 8) / (NumFeatures + ClassTemplate->ConfigLengths[i]);
}
}
/**
* Find the best match for the current class and update the Result
* with the configuration and match rating.
* @return The best normalized sum of evidences
*/
int IntegerMatcher::FindBestMatch(INT_CLASS_STRUCT *class_template, const ScratchEvidence &tables,
UnicharRating *result) {
int best_match = 0;
result->config = 0;
result->fonts.clear();
result->fonts.reserve(class_template->NumConfigs);
// Find best match.
// ClassTemplate->NumConfigs can become larger than MAX_NUM_CONFIGS.
for (int c = 0; c < MAX_NUM_CONFIGS && c < class_template->NumConfigs; ++c) {
int rating = tables.sum_feature_evidence_[c];
if (*classify_debug_level_ > 2) {
tprintf("Config %d, rating=%d\n", c, rating);
}
if (rating > best_match) {
result->config = c;
best_match = rating;
}
result->fonts.emplace_back(c, rating);
}
// Compute confidence on a Probability scale.
result->rating = best_match / 65536.0f;
return best_match;
}
/**
* Applies the CN normalization factor to the given rating and returns
* the modified rating.
*/
float IntegerMatcher::ApplyCNCorrection(float rating, int blob_length, int normalization_factor,
int matcher_multiplier) {
int divisor = blob_length + matcher_multiplier;
return divisor == 0
? 1.0f
: (rating * blob_length + matcher_multiplier * normalization_factor / 256.0f) /
divisor;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/intmatcher.cpp
|
C++
|
apache-2.0
| 46,087
|
/******************************************************************************
** Filename: intmatcher.h
** Purpose: Interface to high level generic classifier routines.
** Author: Robert Moss
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef INTMATCHER_H
#define INTMATCHER_H
#include "intproto.h"
#include "params.h"
namespace tesseract {
// Character fragments could be present in the trained templaes
// but turned on/off on the language-by-language basis or depending
// on particular properties of the corpus (e.g. when we expect the
// images to have low exposure).
extern BOOL_VAR_H(disable_character_fragments);
extern INT_VAR_H(classify_integer_matcher_multiplier);
struct UnicharRating;
struct CP_RESULT_STRUCT {
CP_RESULT_STRUCT() : Rating(0.0f), Class(0) {}
float Rating;
CLASS_ID Class;
};
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
#define SE_TABLE_BITS 9
#define SE_TABLE_SIZE 512
struct ScratchEvidence {
uint8_t feature_evidence_[MAX_NUM_CONFIGS];
int sum_feature_evidence_[MAX_NUM_CONFIGS];
uint8_t proto_evidence_[MAX_NUM_PROTOS][MAX_PROTO_INDEX];
void Clear(const INT_CLASS_STRUCT *class_template);
void ClearFeatureEvidence(const INT_CLASS_STRUCT *class_template);
void NormalizeSums(INT_CLASS_STRUCT *ClassTemplate, int16_t NumFeatures);
void UpdateSumOfProtoEvidences(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ConfigMask);
};
class IntegerMatcher {
public:
// Integer Matcher Theta Fudge (0-255).
static const int kIntThetaFudge = 128;
// Bits in Similarity to Evidence Lookup (8-9).
static const int kEvidenceTableBits = 9;
// Integer Evidence Truncation Bits (8-14).
static const int kIntEvidenceTruncBits = 14;
// Similarity to Evidence Table Exponential Multiplier.
static const float kSEExponentialMultiplier;
// Center of Similarity Curve.
static const float kSimilarityCenter;
IntegerMatcher(tesseract::IntParam *classify_debug_level);
void Match(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
int16_t NumFeatures, const INT_FEATURE_STRUCT *Features,
tesseract::UnicharRating *Result, int AdaptFeatureThreshold, int Debug,
bool SeparateDebugWindows);
// Applies the CN normalization factor to the given rating and returns
// the modified rating.
float ApplyCNCorrection(float rating, int blob_length, int normalization_factor,
int matcher_multiplier);
int FindGoodProtos(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
int16_t NumFeatures, INT_FEATURE_ARRAY Features, PROTO_ID *ProtoArray,
int AdaptProtoThreshold, int Debug);
int FindBadFeatures(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
int16_t NumFeatures, INT_FEATURE_ARRAY Features, FEATURE_ID *FeatureArray,
int AdaptFeatureThreshold, int Debug);
private:
int UpdateTablesForFeature(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
int FeatureNum, const INT_FEATURE_STRUCT *Feature,
ScratchEvidence *evidence, int Debug);
int FindBestMatch(INT_CLASS_STRUCT *ClassTemplate, const ScratchEvidence &tables,
tesseract::UnicharRating *Result);
#ifndef GRAPHICS_DISABLED
void DebugFeatureProtoError(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
const ScratchEvidence &tables, int16_t NumFeatures, int Debug);
void DisplayProtoDebugInfo(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ConfigMask,
const ScratchEvidence &tables, bool SeparateDebugWindows);
void DisplayFeatureDebugInfo(INT_CLASS_STRUCT *ClassTemplate, BIT_VECTOR ProtoMask, BIT_VECTOR ConfigMask,
int16_t NumFeatures, const INT_FEATURE_STRUCT *Features,
int AdaptFeatureThreshold, int Debug, bool SeparateDebugWindows);
#endif
private:
tesseract::IntParam *classify_debug_level_;
uint8_t similarity_evidence_table_[SE_TABLE_SIZE];
uint32_t evidence_table_mask_;
uint32_t mult_trunc_shift_bits_;
uint32_t table_trunc_shift_bits_;
uint32_t evidence_mult_mask_;
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/intmatcher.h
|
C++
|
apache-2.0
| 5,163
|
/******************************************************************************
** Filename: intproto.c
** Purpose: Definition of data structures for integer protos.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#define _USE_MATH_DEFINES // for M_PI
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "intproto.h"
#include "classify.h"
#include "fontinfo.h"
#include "mfoutline.h"
#include "picofeat.h"
#include "points.h"
#include "shapetable.h"
#ifndef GRAPHICS_DISABLED
#include "svmnode.h"
#endif
#include "helpers.h"
#include <algorithm>
#include <cassert>
#include <cmath> // for M_PI, std::floor
#include <cstdio>
namespace tesseract {
/* match debug display constants*/
#define PROTO_PRUNER_SCALE (4.0)
#define INT_DESCENDER (0.0 * INT_CHAR_NORM_RANGE)
#define INT_BASELINE (0.25 * INT_CHAR_NORM_RANGE)
#define INT_XHEIGHT (0.75 * INT_CHAR_NORM_RANGE)
#define INT_CAPHEIGHT (1.0 * INT_CHAR_NORM_RANGE)
#define INT_XCENTER (0.5 * INT_CHAR_NORM_RANGE)
#define INT_YCENTER (0.5 * INT_CHAR_NORM_RANGE)
#define INT_XRADIUS (0.2 * INT_CHAR_NORM_RANGE)
#define INT_YRADIUS (0.2 * INT_CHAR_NORM_RANGE)
#define INT_MIN_X 0
#define INT_MIN_Y 0
#define INT_MAX_X INT_CHAR_NORM_RANGE
#define INT_MAX_Y INT_CHAR_NORM_RANGE
/** define pad used to snap near horiz/vertical protos to horiz/vertical */
#define HV_TOLERANCE (0.0025) /* approx 0.9 degrees */
typedef enum { StartSwitch, EndSwitch, LastSwitch } SWITCH_TYPE;
#define MAX_NUM_SWITCHES 3
struct FILL_SWITCH {
SWITCH_TYPE Type;
int8_t X, Y;
int16_t YInit;
int16_t Delta;
};
struct TABLE_FILLER {
uint8_t NextSwitch;
uint8_t AngleStart, AngleEnd;
int8_t X;
int16_t YStart, YEnd;
int16_t StartDelta, EndDelta;
FILL_SWITCH Switch[MAX_NUM_SWITCHES];
};
struct FILL_SPEC {
int8_t X;
int8_t YStart, YEnd;
uint8_t AngleStart, AngleEnd;
};
/* constants for conversion from old inttemp format */
#define OLD_MAX_NUM_CONFIGS 32
#define OLD_WERDS_PER_CONFIG_VEC ((OLD_MAX_NUM_CONFIGS + BITS_PER_WERD - 1) / BITS_PER_WERD)
/*-----------------------------------------------------------------------------
Macros
-----------------------------------------------------------------------------*/
/** macro for performing circular increments of bucket indices */
#define CircularIncrement(i, r) (((i) < (r)-1) ? ((i)++) : ((i) = 0))
/** macro for mapping floats to ints without bounds checking */
#define MapParam(P, O, N) (std::floor(((P) + (O)) * (N)))
/*---------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------*/
float BucketStart(int Bucket, float Offset, int NumBuckets);
float BucketEnd(int Bucket, float Offset, int NumBuckets);
void DoFill(FILL_SPEC *FillSpec, CLASS_PRUNER_STRUCT *Pruner, uint32_t ClassMask,
uint32_t ClassCount, uint32_t WordIndex);
bool FillerDone(TABLE_FILLER *Filler);
void FillPPCircularBits(uint32_t ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR], int Bit,
float Center, float Spread, bool debug);
void FillPPLinearBits(uint32_t ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR], int Bit,
float Center, float Spread, bool debug);
void GetCPPadsForLevel(int Level, float *EndPad, float *SidePad, float *AnglePad);
ScrollView::Color GetMatchColorFor(float Evidence);
void GetNextFill(TABLE_FILLER *Filler, FILL_SPEC *Fill);
void InitTableFiller(float EndPad, float SidePad, float AnglePad, PROTO_STRUCT *Proto,
TABLE_FILLER *Filler);
#ifndef GRAPHICS_DISABLED
void RenderIntFeature(ScrollView *window, const INT_FEATURE_STRUCT *Feature,
ScrollView::Color color);
void RenderIntProto(ScrollView *window, INT_CLASS_STRUCT *Class, PROTO_ID ProtoId, ScrollView::Color color);
#endif // !GRAPHICS_DISABLED
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/* global display lists used to display proto and feature match information*/
static ScrollView *IntMatchWindow = nullptr;
static ScrollView *FeatureDisplayWindow = nullptr;
static ScrollView *ProtoDisplayWindow = nullptr;
#endif
/*-----------------------------------------------------------------------------
Variables
-----------------------------------------------------------------------------*/
/* control knobs */
static INT_VAR(classify_num_cp_levels, 3, "Number of Class Pruner Levels");
static double_VAR(classify_cp_angle_pad_loose, 45.0, "Class Pruner Angle Pad Loose");
static double_VAR(classify_cp_angle_pad_medium, 20.0, "Class Pruner Angle Pad Medium");
static double_VAR(classify_cp_angle_pad_tight, 10.0, "CLass Pruner Angle Pad Tight");
static double_VAR(classify_cp_end_pad_loose, 0.5, "Class Pruner End Pad Loose");
static double_VAR(classify_cp_end_pad_medium, 0.5, "Class Pruner End Pad Medium");
static double_VAR(classify_cp_end_pad_tight, 0.5, "Class Pruner End Pad Tight");
static double_VAR(classify_cp_side_pad_loose, 2.5, "Class Pruner Side Pad Loose");
static double_VAR(classify_cp_side_pad_medium, 1.2, "Class Pruner Side Pad Medium");
static double_VAR(classify_cp_side_pad_tight, 0.6, "Class Pruner Side Pad Tight");
static double_VAR(classify_pp_angle_pad, 45.0, "Proto Pruner Angle Pad");
static double_VAR(classify_pp_end_pad, 0.5, "Proto Prune End Pad");
static double_VAR(classify_pp_side_pad, 2.5, "Proto Pruner Side Pad");
/**
* This routine truncates Param to lie within the range
* of Min-Max inclusive.
*
* @param Param parameter value to be truncated
* @param Min, Max parameter limits (inclusive)
*
* @return Truncated parameter.
*/
static int TruncateParam(float Param, int Min, int Max) {
int result;
if (Param < Min) {
result = Min;
} else if (Param > Max) {
result = Max;
} else {
result = static_cast<int>(std::floor(Param));
}
return result;
}
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/// Builds a feature from an FCOORD for position with all the necessary
/// clipping and rounding.
INT_FEATURE_STRUCT::INT_FEATURE_STRUCT(const FCOORD &pos, uint8_t theta)
: X(ClipToRange<int16_t>(static_cast<int16_t>(pos.x() + 0.5), 0, 255))
, Y(ClipToRange<int16_t>(static_cast<int16_t>(pos.y() + 0.5), 0, 255))
, Theta(theta)
, CP_misses(0) {}
/** Builds a feature from ints with all the necessary clipping and casting. */
INT_FEATURE_STRUCT::INT_FEATURE_STRUCT(int x, int y, int theta)
: X(static_cast<uint8_t>(ClipToRange<int>(x, 0, UINT8_MAX)))
, Y(static_cast<uint8_t>(ClipToRange<int>(y, 0, UINT8_MAX)))
, Theta(static_cast<uint8_t>(ClipToRange<int>(theta, 0, UINT8_MAX)))
, CP_misses(0) {}
/**
* This routine adds a new class structure to a set of
* templates. Classes have to be added to Templates in
* the order of increasing ClassIds.
*
* @param Templates templates to add new class to
* @param ClassId class id to associate new class with
* @param Class class data structure to add to templates
*
* Globals: none
*/
void AddIntClass(INT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, INT_CLASS_STRUCT *Class) {
int Pruner;
assert(LegalClassId(ClassId));
if (static_cast<unsigned>(ClassId) != Templates->NumClasses) {
fprintf(stderr,
"Please make sure that classes are added to templates"
" in increasing order of ClassIds\n");
exit(1);
}
ClassForClassId(Templates, ClassId) = Class;
Templates->NumClasses++;
if (Templates->NumClasses > MaxNumClassesIn(Templates)) {
Pruner = Templates->NumClassPruners++;
Templates->ClassPruners[Pruner] = new CLASS_PRUNER_STRUCT;
memset(Templates->ClassPruners[Pruner], 0, sizeof(CLASS_PRUNER_STRUCT));
}
} /* AddIntClass */
/**
* This routine returns the index of the next free config
* in Class.
*
* @param Class class to add new configuration to
*
* Globals: none
*
* @return Index of next free config.
*/
int AddIntConfig(INT_CLASS_STRUCT *Class) {
int Index;
assert(Class->NumConfigs < MAX_NUM_CONFIGS);
Index = Class->NumConfigs++;
Class->ConfigLengths[Index] = 0;
return Index;
} /* AddIntConfig */
/**
* This routine allocates the next free proto in Class and
* returns its index.
*
* @param Class class to add new proto to
*
* Globals: none
*
* @return Proto index of new proto.
*/
int AddIntProto(INT_CLASS_STRUCT *Class) {
if (Class->NumProtos >= MAX_NUM_PROTOS) {
return (NO_PROTO);
}
int Index = Class->NumProtos++;
if (Class->NumProtos > MaxNumIntProtosIn(Class)) {
int ProtoSetId = Class->NumProtoSets++;
auto ProtoSet = new PROTO_SET_STRUCT;
Class->ProtoSets[ProtoSetId] = ProtoSet;
memset(ProtoSet, 0, sizeof(*ProtoSet));
/* reallocate space for the proto lengths and install in class */
Class->ProtoLengths.resize(MaxNumIntProtosIn(Class));
}
/* initialize proto so its length is zero and it isn't in any configs */
Class->ProtoLengths[Index] = 0;
auto Proto = ProtoForProtoId(Class, Index);
for (uint32_t *Word = Proto->Configs; Word < Proto->Configs + WERDS_PER_CONFIG_VEC; *Word++ = 0) {
}
return (Index);
}
/**
* This routine adds Proto to the class pruning tables
* for the specified class in Templates.
*
* Globals:
* - classify_num_cp_levels number of levels used in the class pruner
* @param Proto floating-pt proto to add to class pruner
* @param ClassId class id corresponding to Proto
* @param Templates set of templates containing class pruner
*/
void AddProtoToClassPruner(PROTO_STRUCT *Proto, CLASS_ID ClassId, INT_TEMPLATES_STRUCT *Templates)
#define MAX_LEVEL 2
{
CLASS_PRUNER_STRUCT *Pruner;
uint32_t ClassMask;
uint32_t ClassCount;
uint32_t WordIndex;
int Level;
float EndPad, SidePad, AnglePad;
TABLE_FILLER TableFiller;
FILL_SPEC FillSpec;
Pruner = CPrunerFor(Templates, ClassId);
WordIndex = CPrunerWordIndexFor(ClassId);
ClassMask = CPrunerMaskFor(MAX_LEVEL, ClassId);
for (Level = classify_num_cp_levels - 1; Level >= 0; Level--) {
GetCPPadsForLevel(Level, &EndPad, &SidePad, &AnglePad);
ClassCount = CPrunerMaskFor(Level, ClassId);
InitTableFiller(EndPad, SidePad, AnglePad, Proto, &TableFiller);
while (!FillerDone(&TableFiller)) {
GetNextFill(&TableFiller, &FillSpec);
DoFill(&FillSpec, Pruner, ClassMask, ClassCount, WordIndex);
}
}
} /* AddProtoToClassPruner */
/**
* This routine updates the proto pruner lookup tables
* for Class to include a new proto identified by ProtoId
* and described by Proto.
* @param Proto floating-pt proto to be added to proto pruner
* @param ProtoId id of proto
* @param Class integer class that contains desired proto pruner
* @param debug debug flag
* @note Globals: none
*/
void AddProtoToProtoPruner(PROTO_STRUCT *Proto, int ProtoId, INT_CLASS_STRUCT *Class, bool debug) {
float X, Y, Length;
float Pad;
if (ProtoId >= Class->NumProtos) {
tprintf("AddProtoToProtoPruner:assert failed: %d < %d", ProtoId, Class->NumProtos);
}
assert(ProtoId < Class->NumProtos);
int Index = IndexForProto(ProtoId);
auto ProtoSet = Class->ProtoSets[SetForProto(ProtoId)];
float Angle = Proto->Angle;
#ifndef _WIN32
assert(!std::isnan(Angle));
#endif
FillPPCircularBits(ProtoSet->ProtoPruner[PRUNER_ANGLE], Index, Angle + ANGLE_SHIFT,
classify_pp_angle_pad / 360.0, debug);
Angle *= 2.0 * M_PI;
Length = Proto->Length;
X = Proto->X + X_SHIFT;
Pad = std::max(fabs(std::cos(Angle)) * (Length / 2.0 + classify_pp_end_pad * GetPicoFeatureLength()),
fabs(std::sin(Angle)) * (classify_pp_side_pad * GetPicoFeatureLength()));
FillPPLinearBits(ProtoSet->ProtoPruner[PRUNER_X], Index, X, Pad, debug);
Y = Proto->Y + Y_SHIFT;
Pad = std::max(fabs(std::sin(Angle)) * (Length / 2.0 + classify_pp_end_pad * GetPicoFeatureLength()),
fabs(std::cos(Angle)) * (classify_pp_side_pad * GetPicoFeatureLength()));
FillPPLinearBits(ProtoSet->ProtoPruner[PRUNER_Y], Index, Y, Pad, debug);
} /* AddProtoToProtoPruner */
/**
* Returns a quantized bucket for the given param shifted by offset,
* notionally (param + offset) * num_buckets, but clipped and casted to the
* appropriate type.
*/
uint8_t Bucket8For(float param, float offset, int num_buckets) {
int bucket = IntCastRounded(MapParam(param, offset, num_buckets));
return static_cast<uint8_t>(ClipToRange<int>(bucket, 0, num_buckets - 1));
}
uint16_t Bucket16For(float param, float offset, int num_buckets) {
int bucket = IntCastRounded(MapParam(param, offset, num_buckets));
return static_cast<uint16_t>(ClipToRange<int>(bucket, 0, num_buckets - 1));
}
/**
* Returns a quantized bucket for the given circular param shifted by offset,
* notionally (param + offset) * num_buckets, but modded and casted to the
* appropriate type.
*/
uint8_t CircBucketFor(float param, float offset, int num_buckets) {
int bucket = IntCastRounded(MapParam(param, offset, num_buckets));
return static_cast<uint8_t>(Modulo(bucket, num_buckets));
} /* CircBucketFor */
#ifndef GRAPHICS_DISABLED
/**
* This routine clears the global feature and proto
* display lists.
*
* Globals:
* - FeatureShapes display list for features
* - ProtoShapes display list for protos
*/
void UpdateMatchDisplay() {
if (IntMatchWindow != nullptr) {
IntMatchWindow->Update();
}
} /* ClearMatchDisplay */
#endif
/**
* This operation updates the config vectors of all protos
* in Class to indicate that the protos with 1's in Config
* belong to a new configuration identified by ConfigId.
* It is assumed that the length of the Config bit vector is
* equal to the number of protos in Class.
* @param Config config to be added to class
* @param ConfigId id to be used for new config
* @param Class class to add new config to
*/
void ConvertConfig(BIT_VECTOR Config, int ConfigId, INT_CLASS_STRUCT *Class) {
int ProtoId;
INT_PROTO_STRUCT *Proto;
int TotalLength;
for (ProtoId = 0, TotalLength = 0; ProtoId < Class->NumProtos; ProtoId++) {
if (test_bit(Config, ProtoId)) {
Proto = ProtoForProtoId(Class, ProtoId);
SET_BIT(Proto->Configs, ConfigId);
TotalLength += Class->ProtoLengths[ProtoId];
}
}
Class->ConfigLengths[ConfigId] = TotalLength;
} /* ConvertConfig */
/**
* This routine converts Proto to integer format and
* installs it as ProtoId in Class.
* @param Proto floating-pt proto to be converted to integer format
* @param ProtoId id of proto
* @param Class integer class to add converted proto to
*/
void Classify::ConvertProto(PROTO_STRUCT *Proto, int ProtoId, INT_CLASS_STRUCT *Class) {
assert(ProtoId < Class->NumProtos);
INT_PROTO_STRUCT *P = ProtoForProtoId(Class, ProtoId);
float Param = Proto->A * 128;
P->A = TruncateParam(Param, -128, 127);
Param = -Proto->B * 256;
P->B = TruncateParam(Param, 0, 255);
Param = Proto->C * 128;
P->C = TruncateParam(Param, -128, 127);
Param = Proto->Angle * 256;
if (Param < 0 || Param >= 256) {
P->Angle = 0;
} else {
P->Angle = static_cast<uint8_t>(Param);
}
/* round proto length to nearest integer number of pico-features */
Param = (Proto->Length / GetPicoFeatureLength()) + 0.5;
Class->ProtoLengths[ProtoId] = TruncateParam(Param, 1, 255);
if (classify_learning_debug_level >= 2) {
tprintf("Converted ffeat to (A=%d,B=%d,C=%d,L=%d)", P->A, P->B, P->C,
Class->ProtoLengths[ProtoId]);
}
} /* ConvertProto */
/**
* This routine converts from the old floating point format
* to the new integer format.
* @param FloatProtos prototypes in old floating pt format
* @param target_unicharset the UNICHARSET to use
* @return New set of training templates in integer format.
* @note Globals: none
*/
INT_TEMPLATES_STRUCT *Classify::CreateIntTemplates(CLASSES FloatProtos,
const UNICHARSET &target_unicharset) {
CLASS_TYPE FClass;
INT_CLASS_STRUCT *IClass;
int ProtoId;
int ConfigId;
auto IntTemplates = new INT_TEMPLATES_STRUCT;
for (unsigned ClassId = 0; ClassId < target_unicharset.size(); ClassId++) {
FClass = &(FloatProtos[ClassId]);
if (FClass->NumProtos == 0 && FClass->NumConfigs == 0 &&
strcmp(target_unicharset.id_to_unichar(ClassId), " ") != 0) {
tprintf("Warning: no protos/configs for %s in CreateIntTemplates()\n",
target_unicharset.id_to_unichar(ClassId));
}
assert(UnusedClassIdIn(IntTemplates, ClassId));
IClass = new INT_CLASS_STRUCT(FClass->NumProtos, FClass->NumConfigs);
unsigned fs_size = FClass->font_set.size();
FontSet fs;
fs.reserve(fs_size);
for (unsigned i = 0; i < fs_size; ++i) {
fs.push_back(FClass->font_set[i]);
}
IClass->font_set_id = this->fontset_table_.push_back(fs);
AddIntClass(IntTemplates, ClassId, IClass);
for (ProtoId = 0; ProtoId < FClass->NumProtos; ProtoId++) {
AddIntProto(IClass);
ConvertProto(ProtoIn(FClass, ProtoId), ProtoId, IClass);
AddProtoToProtoPruner(ProtoIn(FClass, ProtoId), ProtoId, IClass,
classify_learning_debug_level >= 2);
AddProtoToClassPruner(ProtoIn(FClass, ProtoId), ClassId, IntTemplates);
}
for (ConfigId = 0; ConfigId < FClass->NumConfigs; ConfigId++) {
AddIntConfig(IClass);
ConvertConfig(FClass->Configurations[ConfigId], ConfigId, IClass);
}
}
return (IntTemplates);
} /* CreateIntTemplates */
#ifndef GRAPHICS_DISABLED
/**
* This routine renders the specified feature into a
* global display list.
*
* Globals:
* - FeatureShapes global display list for features
* @param Feature pico-feature to be displayed
* @param Evidence best evidence for this feature (0-1)
*/
void DisplayIntFeature(const INT_FEATURE_STRUCT *Feature, float Evidence) {
ScrollView::Color color = GetMatchColorFor(Evidence);
RenderIntFeature(IntMatchWindow, Feature, color);
if (FeatureDisplayWindow) {
RenderIntFeature(FeatureDisplayWindow, Feature, color);
}
} /* DisplayIntFeature */
/**
* This routine renders the specified proto into a
* global display list.
*
* Globals:
* - ProtoShapes global display list for protos
* @param Class class to take proto from
* @param ProtoId id of proto in Class to be displayed
* @param Evidence total evidence for proto (0-1)
*/
void DisplayIntProto(INT_CLASS_STRUCT *Class, PROTO_ID ProtoId, float Evidence) {
ScrollView::Color color = GetMatchColorFor(Evidence);
RenderIntProto(IntMatchWindow, Class, ProtoId, color);
if (ProtoDisplayWindow) {
RenderIntProto(ProtoDisplayWindow, Class, ProtoId, color);
}
} /* DisplayIntProto */
#endif
/// This constructor creates a new integer class data structure
/// and returns it. Sufficient space is allocated
/// to handle the specified number of protos and configs.
/// @param MaxNumProtos number of protos to allocate space for
/// @param MaxNumConfigs number of configs to allocate space for
INT_CLASS_STRUCT::INT_CLASS_STRUCT(int MaxNumProtos, int MaxNumConfigs) :
NumProtos(0),
NumProtoSets((MaxNumProtos + PROTOS_PER_PROTO_SET - 1) / PROTOS_PER_PROTO_SET),
NumConfigs(0),
ProtoLengths(MaxNumIntProtosIn(this))
{
assert(MaxNumConfigs <= MAX_NUM_CONFIGS);
assert(NumProtoSets <= MAX_NUM_PROTO_SETS);
for (int i = 0; i < NumProtoSets; i++) {
/* allocate space for a proto set, install in class, and initialize */
auto ProtoSet = new PROTO_SET_STRUCT;
memset(ProtoSet, 0, sizeof(*ProtoSet));
ProtoSets[i] = ProtoSet;
/* allocate space for the proto lengths and install in class */
}
memset(ConfigLengths, 0, sizeof(ConfigLengths));
}
INT_CLASS_STRUCT::~INT_CLASS_STRUCT() {
for (int i = 0; i < NumProtoSets; i++) {
delete ProtoSets[i];
}
}
/// This constructor allocates a new set of integer templates
/// initialized to hold 0 classes.
INT_TEMPLATES_STRUCT::INT_TEMPLATES_STRUCT() {
NumClasses = 0;
NumClassPruners = 0;
for (int i = 0; i < MAX_NUM_CLASSES; i++) {
ClassForClassId(this, i) = nullptr;
}
}
INT_TEMPLATES_STRUCT::~INT_TEMPLATES_STRUCT() {
for (unsigned i = 0; i < NumClasses; i++) {
delete Class[i];
}
for (unsigned i = 0; i < NumClassPruners; i++) {
delete ClassPruners[i];
}
}
/**
* This routine reads a set of integer templates from
* File. File must already be open and must be in the
* correct binary format.
* @param fp open file to read templates from
* @return Pointer to integer templates read from File.
* @note Globals: none
*/
INT_TEMPLATES_STRUCT *Classify::ReadIntTemplates(TFile *fp) {
int j, w, x, y, z;
INT_TEMPLATES_STRUCT *Templates;
CLASS_PRUNER_STRUCT *Pruner;
INT_CLASS_STRUCT *Class;
/* variables for conversion from older inttemp formats */
int b, bit_number, last_cp_bit_number, new_b, new_i, new_w;
CLASS_ID class_id, max_class_id;
std::vector<CLASS_ID> ClassIdFor(MAX_NUM_CLASSES);
std::vector<CLASS_PRUNER_STRUCT *> TempClassPruner(MAX_NUM_CLASS_PRUNERS);
uint32_t SetBitsForMask = // word with NUM_BITS_PER_CLASS
(1 << NUM_BITS_PER_CLASS) - 1; // set starting at bit 0
uint32_t Mask, NewMask, ClassBits;
unsigned MaxNumConfigs = MAX_NUM_CONFIGS;
unsigned WerdsPerConfigVec = WERDS_PER_CONFIG_VEC;
/* first read the high level template struct */
Templates = new INT_TEMPLATES_STRUCT;
// Read Templates in parts for 64 bit compatibility.
uint32_t unicharset_size;
if (fp->FReadEndian(&unicharset_size, sizeof(unicharset_size), 1) != 1) {
tprintf("Bad read of inttemp!\n");
}
int32_t version_id = 0;
if (fp->FReadEndian(&version_id, sizeof(version_id), 1) != 1 ||
fp->FReadEndian(&Templates->NumClassPruners, sizeof(Templates->NumClassPruners), 1) != 1) {
tprintf("Bad read of inttemp!\n");
}
if (version_id < 0) {
// This file has a version id!
version_id = -version_id;
if (fp->FReadEndian(&Templates->NumClasses, sizeof(Templates->NumClasses), 1) != 1) {
tprintf("Bad read of inttemp!\n");
}
} else {
Templates->NumClasses = version_id;
}
if (version_id < 3) {
MaxNumConfigs = OLD_MAX_NUM_CONFIGS;
WerdsPerConfigVec = OLD_WERDS_PER_CONFIG_VEC;
}
if (version_id < 2) {
std::vector<int16_t> IndexFor(MAX_NUM_CLASSES);
if (fp->FReadEndian(&IndexFor[0], sizeof(IndexFor[0]), unicharset_size) != unicharset_size) {
tprintf("Bad read of inttemp!\n");
}
if (fp->FReadEndian(&ClassIdFor[0], sizeof(ClassIdFor[0]), Templates->NumClasses) !=
Templates->NumClasses) {
tprintf("Bad read of inttemp!\n");
}
}
/* then read in the class pruners */
const unsigned kNumBuckets = NUM_CP_BUCKETS * NUM_CP_BUCKETS * NUM_CP_BUCKETS * WERDS_PER_CP_VECTOR;
for (unsigned i = 0; i < Templates->NumClassPruners; i++) {
Pruner = new CLASS_PRUNER_STRUCT;
if (fp->FReadEndian(Pruner, sizeof(Pruner->p[0][0][0][0]), kNumBuckets) != kNumBuckets) {
tprintf("Bad read of inttemp!\n");
}
if (version_id < 2) {
TempClassPruner[i] = Pruner;
} else {
Templates->ClassPruners[i] = Pruner;
}
}
/* fix class pruners if they came from an old version of inttemp */
if (version_id < 2) {
// Allocate enough class pruners to cover all the class ids.
max_class_id = 0;
for (unsigned i = 0; i < Templates->NumClasses; i++) {
if (ClassIdFor[i] > max_class_id) {
max_class_id = ClassIdFor[i];
}
}
for (int i = 0; i <= CPrunerIdFor(max_class_id); i++) {
Templates->ClassPruners[i] = new CLASS_PRUNER_STRUCT;
memset(Templates->ClassPruners[i], 0, sizeof(CLASS_PRUNER_STRUCT));
}
// Convert class pruners from the old format (indexed by class index)
// to the new format (indexed by class id).
last_cp_bit_number = NUM_BITS_PER_CLASS * Templates->NumClasses - 1;
for (unsigned i = 0; i < Templates->NumClassPruners; i++) {
for (x = 0; x < NUM_CP_BUCKETS; x++) {
for (y = 0; y < NUM_CP_BUCKETS; y++) {
for (z = 0; z < NUM_CP_BUCKETS; z++) {
for (w = 0; w < WERDS_PER_CP_VECTOR; w++) {
if (TempClassPruner[i]->p[x][y][z][w] == 0) {
continue;
}
for (b = 0; b < BITS_PER_WERD; b += NUM_BITS_PER_CLASS) {
bit_number = i * BITS_PER_CP_VECTOR + w * BITS_PER_WERD + b;
if (bit_number > last_cp_bit_number) {
break; // the rest of the bits in this word are not used
}
class_id = ClassIdFor[bit_number / NUM_BITS_PER_CLASS];
// Single out NUM_BITS_PER_CLASS bits relating to class_id.
Mask = SetBitsForMask << b;
ClassBits = TempClassPruner[i]->p[x][y][z][w] & Mask;
// Move these bits to the new position in which they should
// appear (indexed corresponding to the class_id).
new_i = CPrunerIdFor(class_id);
new_w = CPrunerWordIndexFor(class_id);
new_b = CPrunerBitIndexFor(class_id) * NUM_BITS_PER_CLASS;
if (new_b > b) {
ClassBits <<= (new_b - b);
} else {
ClassBits >>= (b - new_b);
}
// Copy bits relating to class_id to the correct position
// in Templates->ClassPruner.
NewMask = SetBitsForMask << new_b;
Templates->ClassPruners[new_i]->p[x][y][z][new_w] &= ~NewMask;
Templates->ClassPruners[new_i]->p[x][y][z][new_w] |= ClassBits;
}
}
}
}
}
}
for (unsigned i = 0; i < Templates->NumClassPruners; i++) {
delete TempClassPruner[i];
}
}
/* then read in each class */
for (unsigned i = 0; i < Templates->NumClasses; i++) {
/* first read in the high level struct for the class */
Class = new INT_CLASS_STRUCT;
if (fp->FReadEndian(&Class->NumProtos, sizeof(Class->NumProtos), 1) != 1 ||
fp->FRead(&Class->NumProtoSets, sizeof(Class->NumProtoSets), 1) != 1 ||
fp->FRead(&Class->NumConfigs, sizeof(Class->NumConfigs), 1) != 1) {
tprintf("Bad read of inttemp!\n");
}
if (version_id == 0) {
// Only version 0 writes 5 pointless pointers to the file.
for (j = 0; j < 5; ++j) {
int32_t junk;
if (fp->FRead(&junk, sizeof(junk), 1) != 1) {
tprintf("Bad read of inttemp!\n");
}
}
}
unsigned num_configs = version_id < 4 ? MaxNumConfigs : Class->NumConfigs;
ASSERT_HOST(num_configs <= MaxNumConfigs);
if (fp->FReadEndian(Class->ConfigLengths, sizeof(uint16_t), num_configs) != num_configs) {
tprintf("Bad read of inttemp!\n");
}
if (version_id < 2) {
ClassForClassId(Templates, ClassIdFor[i]) = Class;
} else {
ClassForClassId(Templates, i) = Class;
}
/* then read in the proto lengths */
Class->ProtoLengths.clear();
if (MaxNumIntProtosIn(Class) > 0) {
Class->ProtoLengths.resize(MaxNumIntProtosIn(Class));
if (fp->FRead(&Class->ProtoLengths[0], sizeof(uint8_t), MaxNumIntProtosIn(Class)) !=
MaxNumIntProtosIn(Class)) {
tprintf("Bad read of inttemp!\n");
}
}
/* then read in the proto sets */
for (j = 0; j < Class->NumProtoSets; j++) {
auto ProtoSet = new PROTO_SET_STRUCT;
unsigned num_buckets = NUM_PP_PARAMS * NUM_PP_BUCKETS * WERDS_PER_PP_VECTOR;
if (fp->FReadEndian(&ProtoSet->ProtoPruner, sizeof(ProtoSet->ProtoPruner[0][0][0]),
num_buckets) != num_buckets) {
tprintf("Bad read of inttemp!\n");
}
for (x = 0; x < PROTOS_PER_PROTO_SET; x++) {
if (fp->FRead(&ProtoSet->Protos[x].A, sizeof(ProtoSet->Protos[x].A), 1) != 1 ||
fp->FRead(&ProtoSet->Protos[x].B, sizeof(ProtoSet->Protos[x].B), 1) != 1 ||
fp->FRead(&ProtoSet->Protos[x].C, sizeof(ProtoSet->Protos[x].C), 1) != 1 ||
fp->FRead(&ProtoSet->Protos[x].Angle, sizeof(ProtoSet->Protos[x].Angle), 1) != 1) {
tprintf("Bad read of inttemp!\n");
}
if (fp->FReadEndian(&ProtoSet->Protos[x].Configs, sizeof(ProtoSet->Protos[x].Configs[0]),
WerdsPerConfigVec) != WerdsPerConfigVec) {
tprintf("Bad read of inttemp!\n");
}
}
Class->ProtoSets[j] = ProtoSet;
}
if (version_id < 4) {
Class->font_set_id = -1;
} else {
fp->FReadEndian(&Class->font_set_id, sizeof(Class->font_set_id), 1);
}
}
if (version_id < 2) {
/* add an empty nullptr class with class id 0 */
assert(UnusedClassIdIn(Templates, 0));
ClassForClassId(Templates, 0) = new INT_CLASS_STRUCT(1, 1);
ClassForClassId(Templates, 0)->font_set_id = -1;
Templates->NumClasses++;
/* make sure the classes are contiguous */
for (unsigned i = 0; i < MAX_NUM_CLASSES; i++) {
if (i < Templates->NumClasses) {
if (ClassForClassId(Templates, i) == nullptr) {
fprintf(stderr, "Non-contiguous class ids in inttemp\n");
exit(1);
}
} else {
if (ClassForClassId(Templates, i) != nullptr) {
fprintf(stderr, "Class id %u exceeds NumClassesIn (Templates) %u\n", i,
Templates->NumClasses);
exit(1);
}
}
}
}
if (version_id >= 4) {
using namespace std::placeholders; // for _1, _2
this->fontinfo_table_.read(fp, std::bind(read_info, _1, _2));
if (version_id >= 5) {
this->fontinfo_table_.read(fp, std::bind(read_spacing_info, _1, _2));
}
this->fontset_table_.read(fp, [](auto *f, auto *fs) { return f->DeSerialize(*fs); } );
}
return (Templates);
} /* ReadIntTemplates */
#ifndef GRAPHICS_DISABLED
/**
* This routine sends the shapes in the global display
* lists to the match debugger window.
*
* Globals:
* - FeatureShapes display list containing feature matches
* - ProtoShapes display list containing proto matches
*/
void Classify::ShowMatchDisplay() {
InitIntMatchWindowIfReqd();
if (ProtoDisplayWindow) {
ProtoDisplayWindow->Clear();
}
if (FeatureDisplayWindow) {
FeatureDisplayWindow->Clear();
}
ClearFeatureSpaceWindow(static_cast<NORM_METHOD>(static_cast<int>(classify_norm_method)),
IntMatchWindow);
IntMatchWindow->ZoomToRectangle(INT_MIN_X, INT_MIN_Y, INT_MAX_X, INT_MAX_Y);
if (ProtoDisplayWindow) {
ProtoDisplayWindow->ZoomToRectangle(INT_MIN_X, INT_MIN_Y, INT_MAX_X, INT_MAX_Y);
}
if (FeatureDisplayWindow) {
FeatureDisplayWindow->ZoomToRectangle(INT_MIN_X, INT_MIN_Y, INT_MAX_X, INT_MAX_Y);
}
} /* ShowMatchDisplay */
/// Clears the given window and draws the featurespace guides for the
/// appropriate normalization method.
void ClearFeatureSpaceWindow(NORM_METHOD norm_method, ScrollView *window) {
window->Clear();
window->Pen(ScrollView::GREY);
// Draw the feature space limit rectangle.
window->Rectangle(0, 0, INT_MAX_X, INT_MAX_Y);
if (norm_method == baseline) {
window->SetCursor(0, INT_DESCENDER);
window->DrawTo(INT_MAX_X, INT_DESCENDER);
window->SetCursor(0, INT_BASELINE);
window->DrawTo(INT_MAX_X, INT_BASELINE);
window->SetCursor(0, INT_XHEIGHT);
window->DrawTo(INT_MAX_X, INT_XHEIGHT);
window->SetCursor(0, INT_CAPHEIGHT);
window->DrawTo(INT_MAX_X, INT_CAPHEIGHT);
} else {
window->Rectangle(INT_XCENTER - INT_XRADIUS, INT_YCENTER - INT_YRADIUS,
INT_XCENTER + INT_XRADIUS, INT_YCENTER + INT_YRADIUS);
}
}
#endif
/**
* This routine writes Templates to File. The format
* is an efficient binary format. File must already be open
* for writing.
* @param File open file to write templates to
* @param Templates templates to save into File
* @param target_unicharset the UNICHARSET to use
*/
void Classify::WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates,
const UNICHARSET &target_unicharset) {
INT_CLASS_STRUCT *Class;
uint32_t unicharset_size = target_unicharset.size();
int version_id = -5; // When negated by the reader -1 becomes +1 etc.
if (Templates->NumClasses != unicharset_size) {
tprintf(
"Warning: executing WriteIntTemplates() with %d classes in"
" Templates, while target_unicharset size is %" PRIu32 "\n",
Templates->NumClasses, unicharset_size);
}
/* first write the high level template struct */
fwrite(&unicharset_size, sizeof(unicharset_size), 1, File);
fwrite(&version_id, sizeof(version_id), 1, File);
fwrite(&Templates->NumClassPruners, sizeof(Templates->NumClassPruners), 1, File);
fwrite(&Templates->NumClasses, sizeof(Templates->NumClasses), 1, File);
/* then write out the class pruners */
for (unsigned i = 0; i < Templates->NumClassPruners; i++) {
fwrite(Templates->ClassPruners[i], sizeof(CLASS_PRUNER_STRUCT), 1, File);
}
/* then write out each class */
for (unsigned i = 0; i < Templates->NumClasses; i++) {
Class = Templates->Class[i];
/* first write out the high level struct for the class */
fwrite(&Class->NumProtos, sizeof(Class->NumProtos), 1, File);
fwrite(&Class->NumProtoSets, sizeof(Class->NumProtoSets), 1, File);
ASSERT_HOST(Class->NumConfigs == this->fontset_table_.at(Class->font_set_id).size());
fwrite(&Class->NumConfigs, sizeof(Class->NumConfigs), 1, File);
for (int j = 0; j < Class->NumConfigs; ++j) {
fwrite(&Class->ConfigLengths[j], sizeof(uint16_t), 1, File);
}
/* then write out the proto lengths */
if (MaxNumIntProtosIn(Class) > 0) {
fwrite(&Class->ProtoLengths[0], sizeof(uint8_t), MaxNumIntProtosIn(Class), File);
}
/* then write out the proto sets */
for (int j = 0; j < Class->NumProtoSets; j++) {
fwrite(Class->ProtoSets[j], sizeof(PROTO_SET_STRUCT), 1, File);
}
/* then write the fonts info */
fwrite(&Class->font_set_id, sizeof(int), 1, File);
}
/* Write the fonts info tables */
using namespace std::placeholders; // for _1, _2
this->fontinfo_table_.write(File, std::bind(write_info, _1, _2));
this->fontinfo_table_.write(File, std::bind(write_spacing_info, _1, _2));
this->fontset_table_.write(File, std::bind(write_set, _1, _2));
} /* WriteIntTemplates */
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/**
* This routine returns the parameter value which
* corresponds to the beginning of the specified bucket.
* The bucket number should have been generated using the
* BucketFor() function with parameters Offset and NumBuckets.
* @param Bucket bucket whose start is to be computed
* @param Offset offset used to map params to buckets
* @param NumBuckets total number of buckets
* @return Param value corresponding to start position of Bucket.
* @note Globals: none
*/
float BucketStart(int Bucket, float Offset, int NumBuckets) {
return static_cast<float>(Bucket) / NumBuckets - Offset;
} /* BucketStart */
/**
* This routine returns the parameter value which
* corresponds to the end of the specified bucket.
* The bucket number should have been generated using the
* BucketFor() function with parameters Offset and NumBuckets.
* @param Bucket bucket whose end is to be computed
* @param Offset offset used to map params to buckets
* @param NumBuckets total number of buckets
* @return Param value corresponding to end position of Bucket.
* @note Globals: none
*/
float BucketEnd(int Bucket, float Offset, int NumBuckets) {
return static_cast<float>(Bucket + 1) / NumBuckets - Offset;
} /* BucketEnd */
/**
* This routine fills in the section of a class pruner
* corresponding to a single x value for a single proto of
* a class.
* @param FillSpec specifies which bits to fill in pruner
* @param Pruner class pruner to be filled
* @param ClassMask indicates which bits to change in each word
* @param ClassCount indicates what to change bits to
* @param WordIndex indicates which word to change
*/
void DoFill(FILL_SPEC *FillSpec, CLASS_PRUNER_STRUCT *Pruner, uint32_t ClassMask,
uint32_t ClassCount, uint32_t WordIndex) {
int X, Y, Angle;
uint32_t OldWord;
X = FillSpec->X;
if (X < 0) {
X = 0;
}
if (X >= NUM_CP_BUCKETS) {
X = NUM_CP_BUCKETS - 1;
}
if (FillSpec->YStart < 0) {
FillSpec->YStart = 0;
}
if (FillSpec->YEnd >= NUM_CP_BUCKETS) {
FillSpec->YEnd = NUM_CP_BUCKETS - 1;
}
for (Y = FillSpec->YStart; Y <= FillSpec->YEnd; Y++) {
for (Angle = FillSpec->AngleStart;; CircularIncrement(Angle, NUM_CP_BUCKETS)) {
OldWord = Pruner->p[X][Y][Angle][WordIndex];
if (ClassCount > (OldWord & ClassMask)) {
OldWord &= ~ClassMask;
OldWord |= ClassCount;
Pruner->p[X][Y][Angle][WordIndex] = OldWord;
}
if (Angle == FillSpec->AngleEnd) {
break;
}
}
}
} /* DoFill */
/**
* Return true if the specified table filler is done, i.e.
* if it has no more lines to fill.
* @param Filler table filler to check if done
* @return true if no more lines to fill, false otherwise.
* @note Globals: none
*/
bool FillerDone(TABLE_FILLER *Filler) {
FILL_SWITCH *Next;
Next = &(Filler->Switch[Filler->NextSwitch]);
return Filler->X > Next->X && Next->Type == LastSwitch;
} /* FillerDone */
/**
* This routine sets Bit in each bit vector whose
* bucket lies within the range Center +- Spread. The fill
* is done for a circular dimension, i.e. bucket 0 is adjacent
* to the last bucket. It is assumed that Center and Spread
* are expressed in a circular coordinate system whose range
* is 0 to 1.
* @param ParamTable table of bit vectors, one per param bucket
* @param Bit bit position in vectors to be filled
* @param Center center of filled area
* @param Spread spread of filled area
* @param debug debug flag
*/
void FillPPCircularBits(uint32_t ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR], int Bit,
float Center, float Spread, bool debug) {
int i, FirstBucket, LastBucket;
if (Spread > 0.5) {
Spread = 0.5;
}
FirstBucket = static_cast<int>(std::floor((Center - Spread) * NUM_PP_BUCKETS));
if (FirstBucket < 0) {
FirstBucket += NUM_PP_BUCKETS;
}
LastBucket = static_cast<int>(std::floor((Center + Spread) * NUM_PP_BUCKETS));
if (LastBucket >= NUM_PP_BUCKETS) {
LastBucket -= NUM_PP_BUCKETS;
}
if (debug) {
tprintf("Circular fill from %d to %d", FirstBucket, LastBucket);
}
for (i = FirstBucket; true; CircularIncrement(i, NUM_PP_BUCKETS)) {
SET_BIT(ParamTable[i], Bit);
/* exit loop after we have set the bit for the last bucket */
if (i == LastBucket) {
break;
}
}
} /* FillPPCircularBits */
/**
* This routine sets Bit in each bit vector whose
* bucket lies within the range Center +- Spread. The fill
* is done for a linear dimension, i.e. there is no wrap-around
* for this dimension. It is assumed that Center and Spread
* are expressed in a linear coordinate system whose range
* is approximately 0 to 1. Values outside this range will
* be clipped.
* @param ParamTable table of bit vectors, one per param bucket
* @param Bit bit number being filled
* @param Center center of filled area
* @param Spread spread of filled area
* @param debug debug flag
*/
void FillPPLinearBits(uint32_t ParamTable[NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR], int Bit,
float Center, float Spread, bool debug) {
int i, FirstBucket, LastBucket;
FirstBucket = static_cast<int>(std::floor((Center - Spread) * NUM_PP_BUCKETS));
if (FirstBucket < 0) {
FirstBucket = 0;
}
LastBucket = static_cast<int>(std::floor((Center + Spread) * NUM_PP_BUCKETS));
if (LastBucket >= NUM_PP_BUCKETS) {
LastBucket = NUM_PP_BUCKETS - 1;
}
if (debug) {
tprintf("Linear fill from %d to %d", FirstBucket, LastBucket);
}
for (i = FirstBucket; i <= LastBucket; i++) {
SET_BIT(ParamTable[i], Bit);
}
} /* FillPPLinearBits */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**
* This routine prompts the user with Prompt and waits
* for the user to enter something in the debug window.
* @param Prompt prompt to print while waiting for input from window
* @param adaptive_on
* @param pretrained_on
* @param shape_id
* @return Character entered in the debug window.
* @note Globals: none
*/
CLASS_ID Classify::GetClassToDebug(const char *Prompt, bool *adaptive_on, bool *pretrained_on,
int *shape_id) {
tprintf("%s\n", Prompt);
SVEventType ev_type;
int unichar_id = INVALID_UNICHAR_ID;
// Wait until a click or popup event.
do {
auto ev = IntMatchWindow->AwaitEvent(SVET_ANY);
ev_type = ev->type;
if (ev_type == SVET_POPUP) {
if (ev->command_id == IDA_SHAPE_INDEX) {
if (shape_table_ != nullptr) {
*shape_id = atoi(ev->parameter);
*adaptive_on = false;
*pretrained_on = true;
if (*shape_id >= 0 && static_cast<unsigned>(*shape_id) < shape_table_->NumShapes()) {
int font_id;
shape_table_->GetFirstUnicharAndFont(*shape_id, &unichar_id, &font_id);
tprintf("Shape %d, first unichar=%d, font=%d\n", *shape_id, unichar_id, font_id);
return unichar_id;
}
tprintf("Shape index '%s' not found in shape table\n", ev->parameter);
} else {
tprintf("No shape table loaded!\n");
}
} else {
if (unicharset.contains_unichar(ev->parameter)) {
unichar_id = unicharset.unichar_to_id(ev->parameter);
if (ev->command_id == IDA_ADAPTIVE) {
*adaptive_on = true;
*pretrained_on = false;
*shape_id = -1;
} else if (ev->command_id == IDA_STATIC) {
*adaptive_on = false;
*pretrained_on = true;
} else {
*adaptive_on = true;
*pretrained_on = true;
}
if (ev->command_id == IDA_ADAPTIVE || shape_table_ == nullptr) {
*shape_id = -1;
return unichar_id;
}
for (unsigned s = 0; s < shape_table_->NumShapes(); ++s) {
if (shape_table_->GetShape(s).ContainsUnichar(unichar_id)) {
tprintf("%s\n", shape_table_->DebugStr(s).c_str());
}
}
} else {
tprintf("Char class '%s' not found in unicharset", ev->parameter);
}
}
}
} while (ev_type != SVET_CLICK);
return 0;
} /* GetClassToDebug */
#endif
/**
* This routine copies the appropriate global pad variables
* into EndPad, SidePad, and AnglePad. This is a kludge used
* to get around the fact that global control variables cannot
* be arrays. If the specified level is illegal, the tightest
* possible pads are returned.
* @param Level "tightness" level to return pads for
* @param EndPad place to put end pad for Level
* @param SidePad place to put side pad for Level
* @param AnglePad place to put angle pad for Level
*/
void GetCPPadsForLevel(int Level, float *EndPad, float *SidePad, float *AnglePad) {
switch (Level) {
case 0:
*EndPad = classify_cp_end_pad_loose * GetPicoFeatureLength();
*SidePad = classify_cp_side_pad_loose * GetPicoFeatureLength();
*AnglePad = classify_cp_angle_pad_loose / 360.0;
break;
case 1:
*EndPad = classify_cp_end_pad_medium * GetPicoFeatureLength();
*SidePad = classify_cp_side_pad_medium * GetPicoFeatureLength();
*AnglePad = classify_cp_angle_pad_medium / 360.0;
break;
case 2:
*EndPad = classify_cp_end_pad_tight * GetPicoFeatureLength();
*SidePad = classify_cp_side_pad_tight * GetPicoFeatureLength();
*AnglePad = classify_cp_angle_pad_tight / 360.0;
break;
default:
*EndPad = classify_cp_end_pad_tight * GetPicoFeatureLength();
*SidePad = classify_cp_side_pad_tight * GetPicoFeatureLength();
*AnglePad = classify_cp_angle_pad_tight / 360.0;
break;
}
if (*AnglePad > 0.5) {
*AnglePad = 0.5;
}
} /* GetCPPadsForLevel */
/**
* @param Evidence evidence value to return color for
* @return Color which corresponds to specified Evidence value.
* @note Globals: none
*/
ScrollView::Color GetMatchColorFor(float Evidence) {
assert(Evidence >= 0.0);
assert(Evidence <= 1.0);
if (Evidence >= 0.90) {
return ScrollView::WHITE;
} else if (Evidence >= 0.75) {
return ScrollView::GREEN;
} else if (Evidence >= 0.50) {
return ScrollView::RED;
} else {
return ScrollView::BLUE;
}
} /* GetMatchColorFor */
/**
* This routine returns (in Fill) the specification of
* the next line to be filled from Filler. FillerDone() should
* always be called before GetNextFill() to ensure that we
* do not run past the end of the fill table.
* @param Filler filler to get next fill spec from
* @param Fill place to put spec for next fill
*/
void GetNextFill(TABLE_FILLER *Filler, FILL_SPEC *Fill) {
FILL_SWITCH *Next;
/* compute the fill assuming no switches will be encountered */
Fill->AngleStart = Filler->AngleStart;
Fill->AngleEnd = Filler->AngleEnd;
Fill->X = Filler->X;
Fill->YStart = Filler->YStart >> 8;
Fill->YEnd = Filler->YEnd >> 8;
/* update the fill info and the filler for ALL switches at this X value */
Next = &(Filler->Switch[Filler->NextSwitch]);
while (Filler->X >= Next->X) {
Fill->X = Filler->X = Next->X;
if (Next->Type == StartSwitch) {
Fill->YStart = Next->Y;
Filler->StartDelta = Next->Delta;
Filler->YStart = Next->YInit;
} else if (Next->Type == EndSwitch) {
Fill->YEnd = Next->Y;
Filler->EndDelta = Next->Delta;
Filler->YEnd = Next->YInit;
} else { /* Type must be LastSwitch */
break;
}
Filler->NextSwitch++;
Next = &(Filler->Switch[Filler->NextSwitch]);
}
/* prepare the filler for the next call to this routine */
Filler->X++;
Filler->YStart += Filler->StartDelta;
Filler->YEnd += Filler->EndDelta;
} /* GetNextFill */
/**
* This routine computes a data structure (Filler)
* which can be used to fill in a rectangle surrounding
* the specified Proto. Results are returned in Filler.
*
* @param EndPad, SidePad, AnglePad padding to add to proto
* @param Proto proto to create a filler for
* @param Filler place to put table filler
*/
void InitTableFiller(float EndPad, float SidePad, float AnglePad, PROTO_STRUCT *Proto, TABLE_FILLER *Filler)
#define XS X_SHIFT
#define YS Y_SHIFT
#define AS ANGLE_SHIFT
#define NB NUM_CP_BUCKETS
{
float Angle;
float X, Y, HalfLength;
float Cos, Sin;
float XAdjust, YAdjust;
FPOINT Start, Switch1, Switch2, End;
int S1 = 0;
int S2 = 1;
Angle = Proto->Angle;
X = Proto->X;
Y = Proto->Y;
HalfLength = Proto->Length / 2.0;
Filler->AngleStart = CircBucketFor(Angle - AnglePad, AS, NB);
Filler->AngleEnd = CircBucketFor(Angle + AnglePad, AS, NB);
Filler->NextSwitch = 0;
if (fabs(Angle - 0.0) < HV_TOLERANCE || fabs(Angle - 0.5) < HV_TOLERANCE) {
/* horizontal proto - handle as special case */
Filler->X = Bucket8For(X - HalfLength - EndPad, XS, NB);
Filler->YStart = Bucket16For(Y - SidePad, YS, NB * 256);
Filler->YEnd = Bucket16For(Y + SidePad, YS, NB * 256);
Filler->StartDelta = 0;
Filler->EndDelta = 0;
Filler->Switch[0].Type = LastSwitch;
Filler->Switch[0].X = Bucket8For(X + HalfLength + EndPad, XS, NB);
} else if (fabs(Angle - 0.25) < HV_TOLERANCE || fabs(Angle - 0.75) < HV_TOLERANCE) {
/* vertical proto - handle as special case */
Filler->X = Bucket8For(X - SidePad, XS, NB);
Filler->YStart = Bucket16For(Y - HalfLength - EndPad, YS, NB * 256);
Filler->YEnd = Bucket16For(Y + HalfLength + EndPad, YS, NB * 256);
Filler->StartDelta = 0;
Filler->EndDelta = 0;
Filler->Switch[0].Type = LastSwitch;
Filler->Switch[0].X = Bucket8For(X + SidePad, XS, NB);
} else {
/* diagonal proto */
if ((Angle > 0.0 && Angle < 0.25) || (Angle > 0.5 && Angle < 0.75)) {
/* rising diagonal proto */
Angle *= 2.0 * M_PI;
Cos = fabs(std::cos(Angle));
Sin = fabs(std::sin(Angle));
/* compute the positions of the corners of the acceptance region */
Start.x = X - (HalfLength + EndPad) * Cos - SidePad * Sin;
Start.y = Y - (HalfLength + EndPad) * Sin + SidePad * Cos;
End.x = 2.0 * X - Start.x;
End.y = 2.0 * Y - Start.y;
Switch1.x = X - (HalfLength + EndPad) * Cos + SidePad * Sin;
Switch1.y = Y - (HalfLength + EndPad) * Sin - SidePad * Cos;
Switch2.x = 2.0 * X - Switch1.x;
Switch2.y = 2.0 * Y - Switch1.y;
if (Switch1.x > Switch2.x) {
S1 = 1;
S2 = 0;
}
/* translate into bucket positions and deltas */
Filler->X = Bucket8For(Start.x, XS, NB);
Filler->StartDelta = -static_cast<int16_t>((Cos / Sin) * 256);
Filler->EndDelta = static_cast<int16_t>((Sin / Cos) * 256);
XAdjust = BucketEnd(Filler->X, XS, NB) - Start.x;
YAdjust = XAdjust * Cos / Sin;
Filler->YStart = Bucket16For(Start.y - YAdjust, YS, NB * 256);
YAdjust = XAdjust * Sin / Cos;
Filler->YEnd = Bucket16For(Start.y + YAdjust, YS, NB * 256);
Filler->Switch[S1].Type = StartSwitch;
Filler->Switch[S1].X = Bucket8For(Switch1.x, XS, NB);
Filler->Switch[S1].Y = Bucket8For(Switch1.y, YS, NB);
XAdjust = Switch1.x - BucketStart(Filler->Switch[S1].X, XS, NB);
YAdjust = XAdjust * Sin / Cos;
Filler->Switch[S1].YInit = Bucket16For(Switch1.y - YAdjust, YS, NB * 256);
Filler->Switch[S1].Delta = Filler->EndDelta;
Filler->Switch[S2].Type = EndSwitch;
Filler->Switch[S2].X = Bucket8For(Switch2.x, XS, NB);
Filler->Switch[S2].Y = Bucket8For(Switch2.y, YS, NB);
XAdjust = Switch2.x - BucketStart(Filler->Switch[S2].X, XS, NB);
YAdjust = XAdjust * Cos / Sin;
Filler->Switch[S2].YInit = Bucket16For(Switch2.y + YAdjust, YS, NB * 256);
Filler->Switch[S2].Delta = Filler->StartDelta;
Filler->Switch[2].Type = LastSwitch;
Filler->Switch[2].X = Bucket8For(End.x, XS, NB);
} else {
/* falling diagonal proto */
Angle *= 2.0 * M_PI;
Cos = fabs(std::cos(Angle));
Sin = fabs(std::sin(Angle));
/* compute the positions of the corners of the acceptance region */
Start.x = X - (HalfLength + EndPad) * Cos - SidePad * Sin;
Start.y = Y + (HalfLength + EndPad) * Sin - SidePad * Cos;
End.x = 2.0 * X - Start.x;
End.y = 2.0 * Y - Start.y;
Switch1.x = X - (HalfLength + EndPad) * Cos + SidePad * Sin;
Switch1.y = Y + (HalfLength + EndPad) * Sin + SidePad * Cos;
Switch2.x = 2.0 * X - Switch1.x;
Switch2.y = 2.0 * Y - Switch1.y;
if (Switch1.x > Switch2.x) {
S1 = 1;
S2 = 0;
}
/* translate into bucket positions and deltas */
Filler->X = Bucket8For(Start.x, XS, NB);
Filler->StartDelta = static_cast<int16_t>(
ClipToRange<int>(-IntCastRounded((Sin / Cos) * 256), INT16_MIN, INT16_MAX));
Filler->EndDelta = static_cast<int16_t>(
ClipToRange<int>(IntCastRounded((Cos / Sin) * 256), INT16_MIN, INT16_MAX));
XAdjust = BucketEnd(Filler->X, XS, NB) - Start.x;
YAdjust = XAdjust * Sin / Cos;
Filler->YStart = Bucket16For(Start.y - YAdjust, YS, NB * 256);
YAdjust = XAdjust * Cos / Sin;
Filler->YEnd = Bucket16For(Start.y + YAdjust, YS, NB * 256);
Filler->Switch[S1].Type = EndSwitch;
Filler->Switch[S1].X = Bucket8For(Switch1.x, XS, NB);
Filler->Switch[S1].Y = Bucket8For(Switch1.y, YS, NB);
XAdjust = Switch1.x - BucketStart(Filler->Switch[S1].X, XS, NB);
YAdjust = XAdjust * Sin / Cos;
Filler->Switch[S1].YInit = Bucket16For(Switch1.y + YAdjust, YS, NB * 256);
Filler->Switch[S1].Delta = Filler->StartDelta;
Filler->Switch[S2].Type = StartSwitch;
Filler->Switch[S2].X = Bucket8For(Switch2.x, XS, NB);
Filler->Switch[S2].Y = Bucket8For(Switch2.y, YS, NB);
XAdjust = Switch2.x - BucketStart(Filler->Switch[S2].X, XS, NB);
YAdjust = XAdjust * Cos / Sin;
Filler->Switch[S2].YInit = Bucket16For(Switch2.y - YAdjust, YS, NB * 256);
Filler->Switch[S2].Delta = Filler->EndDelta;
Filler->Switch[2].Type = LastSwitch;
Filler->Switch[2].X = Bucket8For(End.x, XS, NB);
}
}
} /* InitTableFiller */
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**
* This routine renders the specified feature into ShapeList.
* @param window to add feature rendering to
* @param Feature feature to be rendered
* @param color color to use for feature rendering
* @return New shape list with rendering of Feature added.
* @note Globals: none
*/
void RenderIntFeature(ScrollView *window, const INT_FEATURE_STRUCT *Feature,
ScrollView::Color color) {
float X, Y, Dx, Dy, Length;
window->Pen(color);
assert(Feature != nullptr);
assert(color != 0);
X = Feature->X;
Y = Feature->Y;
Length = GetPicoFeatureLength() * 0.7 * INT_CHAR_NORM_RANGE;
// The -PI has no significant effect here, but the value of Theta is computed
// using BinaryAnglePlusPi in intfx.cpp.
Dx = (Length / 2.0) * cos((Feature->Theta / 256.0) * 2.0 * M_PI - M_PI);
Dy = (Length / 2.0) * sin((Feature->Theta / 256.0) * 2.0 * M_PI - M_PI);
window->SetCursor(X, Y);
window->DrawTo(X + Dx, Y + Dy);
} /* RenderIntFeature */
/**
* This routine extracts the parameters of the specified
* proto from the class description and adds a rendering of
* the proto onto the ShapeList.
*
* @param window ScrollView instance
* @param Class class that proto is contained in
* @param ProtoId id of proto to be rendered
* @param color color to render proto in
*
* Globals: none
*
* @return New shape list with a rendering of one proto added.
*/
void RenderIntProto(ScrollView *window, INT_CLASS_STRUCT *Class, PROTO_ID ProtoId,
ScrollView::Color color) {
INT_PROTO_STRUCT *Proto;
int ProtoSetIndex;
int ProtoWordIndex;
float Length;
int Xmin, Xmax, Ymin, Ymax;
float X, Y, Dx, Dy;
uint32_t ProtoMask;
int Bucket;
assert(ProtoId >= 0);
assert(Class != nullptr);
assert(ProtoId < Class->NumProtos);
assert(color != 0);
window->Pen(color);
auto ProtoSet = Class->ProtoSets[SetForProto(ProtoId)];
ProtoSetIndex = IndexForProto(ProtoId);
Proto = &(ProtoSet->Protos[ProtoSetIndex]);
Length = (Class->ProtoLengths[ProtoId] * GetPicoFeatureLength() * INT_CHAR_NORM_RANGE);
ProtoMask = PPrunerMaskFor(ProtoId);
ProtoWordIndex = PPrunerWordIndexFor(ProtoId);
// find the x and y extent of the proto from the proto pruning table
Xmin = Ymin = NUM_PP_BUCKETS;
Xmax = Ymax = 0;
for (Bucket = 0; Bucket < NUM_PP_BUCKETS; Bucket++) {
if (ProtoMask & ProtoSet->ProtoPruner[PRUNER_X][Bucket][ProtoWordIndex]) {
UpdateRange(Bucket, &Xmin, &Xmax);
}
if (ProtoMask & ProtoSet->ProtoPruner[PRUNER_Y][Bucket][ProtoWordIndex]) {
UpdateRange(Bucket, &Ymin, &Ymax);
}
}
X = (Xmin + Xmax + 1) / 2.0 * PROTO_PRUNER_SCALE;
Y = (Ymin + Ymax + 1) / 2.0 * PROTO_PRUNER_SCALE;
// The -PI has no significant effect here, but the value of Theta is computed
// using BinaryAnglePlusPi in intfx.cpp.
Dx = (Length / 2.0) * cos((Proto->Angle / 256.0) * 2.0 * M_PI - M_PI);
Dy = (Length / 2.0) * sin((Proto->Angle / 256.0) * 2.0 * M_PI - M_PI);
window->SetCursor(X - Dx, Y - Dy);
window->DrawTo(X + Dx, Y + Dy);
} /* RenderIntProto */
#endif
#ifndef GRAPHICS_DISABLED
/**
* Initializes the int matcher window if it is not already
* initialized.
*/
void InitIntMatchWindowIfReqd() {
if (IntMatchWindow == nullptr) {
IntMatchWindow = CreateFeatureSpaceWindow("IntMatchWindow", 50, 200);
auto *popup_menu = new SVMenuNode();
popup_menu->AddChild("Debug Adapted classes", IDA_ADAPTIVE, "x", "Class to debug");
popup_menu->AddChild("Debug Static classes", IDA_STATIC, "x", "Class to debug");
popup_menu->AddChild("Debug Both", IDA_BOTH, "x", "Class to debug");
popup_menu->AddChild("Debug Shape Index", IDA_SHAPE_INDEX, "0", "Index to debug");
popup_menu->BuildMenu(IntMatchWindow, false);
}
}
/**
* Initializes the proto display window if it is not already
* initialized.
*/
void InitProtoDisplayWindowIfReqd() {
if (ProtoDisplayWindow == nullptr) {
ProtoDisplayWindow = CreateFeatureSpaceWindow("ProtoDisplayWindow", 550, 200);
}
}
/**
* Initializes the feature display window if it is not already
* initialized.
*/
void InitFeatureDisplayWindowIfReqd() {
if (FeatureDisplayWindow == nullptr) {
FeatureDisplayWindow = CreateFeatureSpaceWindow("FeatureDisplayWindow", 50, 700);
}
}
/// Creates a window of the appropriate size for displaying elements
/// in feature space.
ScrollView *CreateFeatureSpaceWindow(const char *name, int xpos, int ypos) {
return new ScrollView(name, xpos, ypos, 520, 520, 260, 260, true);
}
#endif // !GRAPHICS_DISABLED
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/intproto.cpp
|
C++
|
apache-2.0
| 58,549
|
/******************************************************************************
** Filename: intproto.h
** Purpose: Definition of data structures for integer protos.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#ifndef INTPROTO_H
#define INTPROTO_H
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "matchdefs.h"
#include "mfoutline.h"
#include "protos.h"
#include "scrollview.h"
#include "unicharset.h"
namespace tesseract {
class FCOORD;
/* define order of params in pruners */
#define PRUNER_X 0
#define PRUNER_Y 1
#define PRUNER_ANGLE 2
/* definition of coordinate system offsets for each table parameter */
#define ANGLE_SHIFT (0.0)
#define X_SHIFT (0.5)
#define Y_SHIFT (0.5)
#define MAX_PROTO_INDEX 24
#define BITS_PER_WERD static_cast<int>(8 * sizeof(uint32_t))
/* Script detection: increase this number to 128 */
#define MAX_NUM_CONFIGS 64
#define MAX_NUM_PROTOS 512
#define PROTOS_PER_PROTO_SET 64
#define MAX_NUM_PROTO_SETS (MAX_NUM_PROTOS / PROTOS_PER_PROTO_SET)
#define NUM_PP_PARAMS 3
#define NUM_PP_BUCKETS 64
#define NUM_CP_BUCKETS 24
#define CLASSES_PER_CP 32
#define NUM_BITS_PER_CLASS 2
#define CLASS_PRUNER_CLASS_MASK (~(~0u << NUM_BITS_PER_CLASS))
#define CLASSES_PER_CP_WERD (CLASSES_PER_CP / NUM_BITS_PER_CLASS)
#define PROTOS_PER_PP_WERD BITS_PER_WERD
#define BITS_PER_CP_VECTOR (CLASSES_PER_CP * NUM_BITS_PER_CLASS)
#define MAX_NUM_CLASS_PRUNERS ((MAX_NUM_CLASSES + CLASSES_PER_CP - 1) / CLASSES_PER_CP)
#define WERDS_PER_CP_VECTOR (BITS_PER_CP_VECTOR / BITS_PER_WERD)
#define WERDS_PER_PP_VECTOR ((PROTOS_PER_PROTO_SET + BITS_PER_WERD - 1) / BITS_PER_WERD)
#define WERDS_PER_PP (NUM_PP_PARAMS * NUM_PP_BUCKETS * WERDS_PER_PP_VECTOR)
#define WERDS_PER_CP (NUM_CP_BUCKETS * NUM_CP_BUCKETS * NUM_CP_BUCKETS * WERDS_PER_CP_VECTOR)
#define WERDS_PER_CONFIG_VEC ((MAX_NUM_CONFIGS + BITS_PER_WERD - 1) / BITS_PER_WERD)
/* The first 3 dimensions of the CLASS_PRUNER_STRUCT are the
* 3 axes of the quantized feature space.
* The position of the bits recorded for each class in the
* 4th dimension is determined by using CPrunerWordIndexFor(c),
* where c is the corresponding class id. */
struct CLASS_PRUNER_STRUCT {
uint32_t p[NUM_CP_BUCKETS][NUM_CP_BUCKETS][NUM_CP_BUCKETS][WERDS_PER_CP_VECTOR];
};
struct INT_PROTO_STRUCT {
int8_t A;
uint8_t B;
int8_t C;
uint8_t Angle;
uint32_t Configs[WERDS_PER_CONFIG_VEC];
};
typedef uint32_t PROTO_PRUNER[NUM_PP_PARAMS][NUM_PP_BUCKETS][WERDS_PER_PP_VECTOR];
struct PROTO_SET_STRUCT {
PROTO_PRUNER ProtoPruner;
INT_PROTO_STRUCT Protos[PROTOS_PER_PROTO_SET];
};
typedef uint32_t CONFIG_PRUNER[NUM_PP_PARAMS][NUM_PP_BUCKETS][4];
struct INT_CLASS_STRUCT {
INT_CLASS_STRUCT() = default;
INT_CLASS_STRUCT(int MaxNumProtos, int MaxNumConfigs);
~INT_CLASS_STRUCT();
uint16_t NumProtos = 0;
uint8_t NumProtoSets = 0;
uint8_t NumConfigs = 0;
PROTO_SET_STRUCT *ProtoSets[MAX_NUM_PROTO_SETS];
std::vector<uint8_t> ProtoLengths;
uint16_t ConfigLengths[MAX_NUM_CONFIGS];
int font_set_id = 0; // FontSet id, see above
};
struct TESS_API INT_TEMPLATES_STRUCT {
INT_TEMPLATES_STRUCT();
~INT_TEMPLATES_STRUCT();
unsigned NumClasses;
unsigned NumClassPruners;
INT_CLASS_STRUCT *Class[MAX_NUM_CLASSES];
CLASS_PRUNER_STRUCT *ClassPruners[MAX_NUM_CLASS_PRUNERS];
};
/* definitions of integer features*/
#define MAX_NUM_INT_FEATURES 512
#define INT_CHAR_NORM_RANGE 256
struct INT_FEATURE_STRUCT {
INT_FEATURE_STRUCT() : X(0), Y(0), Theta(0), CP_misses(0) {}
// Builds a feature from an FCOORD for position with all the necessary
// clipping and rounding.
INT_FEATURE_STRUCT(const FCOORD &pos, uint8_t theta);
// Builds a feature from ints with all the necessary clipping and casting.
INT_FEATURE_STRUCT(int x, int y, int theta);
uint8_t X;
uint8_t Y;
uint8_t Theta;
int8_t CP_misses;
void print() const {
tprintf("(%d,%d):%d\n", X, Y, Theta);
}
};
typedef INT_FEATURE_STRUCT INT_FEATURE_ARRAY[MAX_NUM_INT_FEATURES];
enum IntmatcherDebugAction { IDA_ADAPTIVE, IDA_STATIC, IDA_SHAPE_INDEX, IDA_BOTH };
/**----------------------------------------------------------------------------
Macros
----------------------------------------------------------------------------**/
#define MaxNumIntProtosIn(C) (C->NumProtoSets * PROTOS_PER_PROTO_SET)
#define SetForProto(P) (P / PROTOS_PER_PROTO_SET)
#define IndexForProto(P) (P % PROTOS_PER_PROTO_SET)
#define ProtoForProtoId(C, P) (&((C->ProtoSets[SetForProto(P)])->Protos[IndexForProto(P)]))
#define PPrunerWordIndexFor(I) (((I) % PROTOS_PER_PROTO_SET) / PROTOS_PER_PP_WERD)
#define PPrunerBitIndexFor(I) ((I) % PROTOS_PER_PP_WERD)
#define PPrunerMaskFor(I) (1 << PPrunerBitIndexFor(I))
#define MaxNumClassesIn(T) (T->NumClassPruners * CLASSES_PER_CP)
#define LegalClassId(c) ((c) >= 0 && (c) < MAX_NUM_CLASSES)
#define UnusedClassIdIn(T, c) ((T)->Class[c] == nullptr)
#define ClassForClassId(T, c) ((T)->Class[c])
#define ClassPrunersFor(T) ((T)->ClassPruner)
#define CPrunerIdFor(c) ((c) / CLASSES_PER_CP)
#define CPrunerFor(T, c) ((T)->ClassPruners[CPrunerIdFor(c)])
#define CPrunerWordIndexFor(c) (((c) % CLASSES_PER_CP) / CLASSES_PER_CP_WERD)
#define CPrunerBitIndexFor(c) (((c) % CLASSES_PER_CP) % CLASSES_PER_CP_WERD)
#define CPrunerMaskFor(L, c) (((L) + 1) << CPrunerBitIndexFor(c) * NUM_BITS_PER_CLASS)
/* DEBUG macros*/
#define PRINT_MATCH_SUMMARY 0x001
#define DISPLAY_FEATURE_MATCHES 0x002
#define DISPLAY_PROTO_MATCHES 0x004
#define PRINT_FEATURE_MATCHES 0x008
#define PRINT_PROTO_MATCHES 0x010
#define CLIP_MATCH_EVIDENCE 0x020
#define MatchDebuggingOn(D) (D)
#define PrintMatchSummaryOn(D) ((D)&PRINT_MATCH_SUMMARY)
#define DisplayFeatureMatchesOn(D) ((D)&DISPLAY_FEATURE_MATCHES)
#define DisplayProtoMatchesOn(D) ((D)&DISPLAY_PROTO_MATCHES)
#define PrintFeatureMatchesOn(D) ((D)&PRINT_FEATURE_MATCHES)
#define PrintProtoMatchesOn(D) ((D)&PRINT_PROTO_MATCHES)
#define ClipMatchEvidenceOn(D) ((D)&CLIP_MATCH_EVIDENCE)
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
void AddIntClass(INT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, INT_CLASS_STRUCT *Class);
int AddIntConfig(INT_CLASS_STRUCT *Class);
int AddIntProto(INT_CLASS_STRUCT *Class);
void AddProtoToClassPruner(PROTO_STRUCT *Proto, CLASS_ID ClassId, INT_TEMPLATES_STRUCT *Templates);
void AddProtoToProtoPruner(PROTO_STRUCT *Proto, int ProtoId, INT_CLASS_STRUCT *Class, bool debug);
uint8_t Bucket8For(float param, float offset, int num_buckets);
uint16_t Bucket16For(float param, float offset, int num_buckets);
uint8_t CircBucketFor(float param, float offset, int num_buckets);
void UpdateMatchDisplay();
void ConvertConfig(BIT_VECTOR Config, int ConfigId, INT_CLASS_STRUCT *Class);
void DisplayIntFeature(const INT_FEATURE_STRUCT *Feature, float Evidence);
void DisplayIntProto(INT_CLASS_STRUCT *Class, PROTO_ID ProtoId, float Evidence);
void ShowMatchDisplay();
#ifndef GRAPHICS_DISABLED
// Clears the given window and draws the featurespace guides for the
// appropriate normalization method.
TESS_API
void ClearFeatureSpaceWindow(NORM_METHOD norm_method, ScrollView *window);
#endif // !GRAPHICS_DISABLED
/*----------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
TESS_API
void RenderIntFeature(ScrollView *window, const INT_FEATURE_STRUCT *Feature,
ScrollView::Color color);
void InitIntMatchWindowIfReqd();
void InitProtoDisplayWindowIfReqd();
void InitFeatureDisplayWindowIfReqd();
// Creates a window of the appropriate size for displaying elements
// in feature space.
TESS_API
ScrollView *CreateFeatureSpaceWindow(const char *name, int xpos, int ypos);
#endif // !GRAPHICS_DISABLED
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/intproto.h
|
C++
|
apache-2.0
| 8,671
|
/******************************************************************************
** Filename: kdtree.cpp
** Purpose: Routines for managing K-D search trees
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#include "kdtree.h"
#include <algorithm>
#include <cfloat> // for FLT_MAX
#include <cmath>
#include <cstdio>
namespace tesseract {
#define Magnitude(X) ((X) < 0 ? -(X) : (X))
#define NodeFound(N, K, D) (((N)->Key == (K)) && ((N)->Data == (D)))
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
#define MINSEARCH (-FLT_MAX)
#define MAXSEARCH FLT_MAX
// Helper function to find the next essential dimension in a cycle.
static int NextLevel(KDTREE *tree, int level) {
do {
++level;
if (level >= tree->KeySize) {
level = 0;
}
} while (tree->KeyDesc[level].NonEssential);
return level;
}
//-----------------------------------------------------------------------------
/** Store the k smallest-keyed key-value pairs. */
template <typename Key, typename Value>
class MinK {
public:
MinK(Key max_key, int k);
~MinK();
struct Element {
Element() = default;
Element(const Key &k, const Value &v) : key(k), value(v) {}
Key key;
Value value;
};
bool insert(Key k, Value v);
const Key &max_insertable_key();
int elements_count() {
return elements_count_;
}
const Element *elements() {
return elements_;
}
private:
const Key max_key_; ///< the maximum possible Key
Element *elements_; ///< unsorted array of elements
int elements_count_; ///< the number of results collected so far
int k_; ///< the number of results we want from the search
int max_index_; ///< the index of the result with the largest key
};
template <typename Key, typename Value>
MinK<Key, Value>::MinK(Key max_key, int k)
: max_key_(max_key), elements_count_(0), k_(k < 1 ? 1 : k), max_index_(0) {
elements_ = new Element[k_];
}
template <typename Key, typename Value>
MinK<Key, Value>::~MinK() {
delete[] elements_;
}
template <typename Key, typename Value>
const Key &MinK<Key, Value>::max_insertable_key() {
if (elements_count_ < k_) {
return max_key_;
}
return elements_[max_index_].key;
}
template <typename Key, typename Value>
bool MinK<Key, Value>::insert(Key key, Value value) {
if (elements_count_ < k_) {
elements_[elements_count_++] = Element(key, value);
if (key > elements_[max_index_].key) {
max_index_ = elements_count_ - 1;
}
return true;
} else if (key < elements_[max_index_].key) {
// evict the largest element.
elements_[max_index_] = Element(key, value);
// recompute max_index_
for (int i = 0; i < elements_count_; i++) {
if (elements_[i].key > elements_[max_index_].key) {
max_index_ = i;
}
}
return true;
}
return false;
}
//-----------------------------------------------------------------------------
/** Helper class for searching for the k closest points to query_point in tree.
*/
class KDTreeSearch {
public:
KDTreeSearch(KDTREE *tree, float *query_point, int k_closest);
~KDTreeSearch();
/** Return the k nearest points' data. */
void Search(int *result_count, float *distances, void **results);
private:
void SearchRec(int Level, KDNODE *SubTree);
bool BoxIntersectsSearch(float *lower, float *upper);
KDTREE *tree_;
float *query_point_;
float *sb_min_; ///< search box minimum
float *sb_max_; ///< search box maximum
MinK<float, void *> results_;
};
KDTreeSearch::KDTreeSearch(KDTREE *tree, float *query_point, int k_closest)
: tree_(tree), query_point_(query_point), results_(MAXSEARCH, k_closest) {
sb_min_ = new float[tree->KeySize];
sb_max_ = new float[tree->KeySize];
}
KDTreeSearch::~KDTreeSearch() {
delete[] sb_min_;
delete[] sb_max_;
}
/// Locate the k_closest points to query_point_, and return their distances and
/// data into the given buffers.
void KDTreeSearch::Search(int *result_count, float *distances, void **results) {
if (tree_->Root.Left == nullptr) {
*result_count = 0;
} else {
for (int i = 0; i < tree_->KeySize; i++) {
sb_min_[i] = tree_->KeyDesc[i].Min;
sb_max_[i] = tree_->KeyDesc[i].Max;
}
SearchRec(0, tree_->Root.Left);
int count = results_.elements_count();
*result_count = count;
for (int j = 0; j < count; j++) {
// Pre-cast to float64 as key is a template type and we have no control
// over its actual type.
distances[j] = static_cast<float>(sqrt(static_cast<double>(results_.elements()[j].key)));
results[j] = results_.elements()[j].value;
}
}
}
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/// @return a new KDTREE based on the specified parameters.
/// @param KeySize # of dimensions in the K-D tree
/// @param KeyDesc array of params to describe key dimensions
KDTREE *MakeKDTree(int16_t KeySize, const PARAM_DESC KeyDesc[]) {
auto *KDTree = new KDTREE(KeySize);
for (int i = 0; i < KeySize; i++) {
KDTree->KeyDesc[i].NonEssential = KeyDesc[i].NonEssential;
KDTree->KeyDesc[i].Circular = KeyDesc[i].Circular;
if (KeyDesc[i].Circular) {
KDTree->KeyDesc[i].Min = KeyDesc[i].Min;
KDTree->KeyDesc[i].Max = KeyDesc[i].Max;
KDTree->KeyDesc[i].Range = KeyDesc[i].Max - KeyDesc[i].Min;
KDTree->KeyDesc[i].HalfRange = KDTree->KeyDesc[i].Range / 2;
KDTree->KeyDesc[i].MidRange = (KeyDesc[i].Max + KeyDesc[i].Min) / 2;
} else {
KDTree->KeyDesc[i].Min = MINSEARCH;
KDTree->KeyDesc[i].Max = MAXSEARCH;
}
}
KDTree->Root.Left = nullptr;
KDTree->Root.Right = nullptr;
return KDTree;
}
/**
* This routine stores Data in the K-D tree specified by Tree
* using Key as an access key.
*
* @param Tree K-D tree in which data is to be stored
* @param Key ptr to key by which data can be retrieved
* @param Data ptr to data to be stored in the tree
*/
void KDStore(KDTREE *Tree, float *Key, CLUSTER *Data) {
auto PtrToNode = &(Tree->Root.Left);
auto Node = *PtrToNode;
auto Level = NextLevel(Tree, -1);
while (Node != nullptr) {
if (Key[Level] < Node->BranchPoint) {
PtrToNode = &(Node->Left);
if (Key[Level] > Node->LeftBranch) {
Node->LeftBranch = Key[Level];
}
} else {
PtrToNode = &(Node->Right);
if (Key[Level] < Node->RightBranch) {
Node->RightBranch = Key[Level];
}
}
Level = NextLevel(Tree, Level);
Node = *PtrToNode;
}
*PtrToNode = new KDNODE(Tree, Key, Data, Level);
} /* KDStore */
/**
* This routine deletes a node from Tree. The node to be
* deleted is specified by the Key for the node and the Data
* contents of the node. These two pointers must be identical
* to the pointers that were used for the node when it was
* originally stored in the tree. A node will be deleted from
* the tree only if its key and data pointers are identical
* to Key and Data respectively. The tree is re-formed by removing
* the affected subtree and inserting all elements but the root.
*
* @param Tree K-D tree to delete node from
* @param Key key of node to be deleted
* @param Data data contents of node to be deleted
*/
void KDDelete(KDTREE *Tree, float Key[], void *Data) {
int Level;
KDNODE *Current;
KDNODE *Father;
/* initialize search at root of tree */
Father = &(Tree->Root);
Current = Father->Left;
Level = NextLevel(Tree, -1);
/* search tree for node to be deleted */
while ((Current != nullptr) && (!NodeFound(Current, Key, Data))) {
Father = Current;
if (Key[Level] < Current->BranchPoint) {
Current = Current->Left;
} else {
Current = Current->Right;
}
Level = NextLevel(Tree, Level);
}
if (Current != nullptr) { /* if node to be deleted was found */
if (Current == Father->Left) {
Father->Left = nullptr;
Father->LeftBranch = Tree->KeyDesc[Level].Min;
} else {
Father->Right = nullptr;
Father->RightBranch = Tree->KeyDesc[Level].Max;
}
InsertNodes(Tree, Current->Left);
InsertNodes(Tree, Current->Right);
delete Current;
}
} /* KDDelete */
/**
* This routine searches the K-D tree specified by Tree and
* finds the QuerySize nearest neighbors of Query. All neighbors
* must be within MaxDistance of Query. The data contents of
* the nearest neighbors
* are placed in NBuffer and their distances from Query are
* placed in DBuffer.
* @param Tree ptr to K-D tree to be searched
* @param Query ptr to query key (point in D-space)
* @param QuerySize number of nearest neighbors to be found
* @param MaxDistance all neighbors must be within this distance
* @param NBuffer ptr to QuerySize buffer to hold nearest neighbors
* @param DBuffer ptr to QuerySize buffer to hold distances
* from nearest neighbor to query point
* @param NumberOfResults [out] Number of nearest neighbors actually found
*/
void KDNearestNeighborSearch(KDTREE *Tree, float Query[], int QuerySize, float MaxDistance,
int *NumberOfResults, void **NBuffer, float DBuffer[]) {
KDTreeSearch search(Tree, Query, QuerySize);
search.Search(NumberOfResults, DBuffer, NBuffer);
}
/*---------------------------------------------------------------------------*/
/** Walk a given Tree with action. */
void KDWalk(KDTREE *Tree, kdwalk_proc action, ClusteringContext *context) {
if (Tree->Root.Left != nullptr) {
Walk(Tree, action, context, Tree->Root.Left, NextLevel(Tree, -1));
}
}
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* Recursively accumulate the k_closest points to query_point_ into results_.
* @param Level level in tree of sub-tree to be searched
* @param SubTree sub-tree to be searched
*/
void KDTreeSearch::SearchRec(int level, KDNODE *sub_tree) {
if (level >= tree_->KeySize) {
level = 0;
}
if (!BoxIntersectsSearch(sb_min_, sb_max_)) {
return;
}
results_.insert(DistanceSquared(tree_->KeySize, &tree_->KeyDesc[0], query_point_, sub_tree->Key),
sub_tree->Data);
if (query_point_[level] < sub_tree->BranchPoint) {
if (sub_tree->Left != nullptr) {
float tmp = sb_max_[level];
sb_max_[level] = sub_tree->LeftBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Left);
sb_max_[level] = tmp;
}
if (sub_tree->Right != nullptr) {
float tmp = sb_min_[level];
sb_min_[level] = sub_tree->RightBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Right);
sb_min_[level] = tmp;
}
} else {
if (sub_tree->Right != nullptr) {
float tmp = sb_min_[level];
sb_min_[level] = sub_tree->RightBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Right);
sb_min_[level] = tmp;
}
if (sub_tree->Left != nullptr) {
float tmp = sb_max_[level];
sb_max_[level] = sub_tree->LeftBranch;
SearchRec(NextLevel(tree_, level), sub_tree->Left);
sb_max_[level] = tmp;
}
}
}
/*---------------------------------------------------------------------------*/
/**
*Returns the Euclidean distance squared between p1 and p2 for all essential
* dimensions.
* @param k keys are in k-space
* @param dim dimension descriptions (essential, circular, etc)
* @param p1,p2 two different points in K-D space
*/
float DistanceSquared(int k, PARAM_DESC *dim, float p1[], float p2[]) {
float total_distance = 0;
for (; k > 0; k--, p1++, p2++, dim++) {
if (dim->NonEssential) {
continue;
}
float dimension_distance = *p1 - *p2;
/* if this dimension is circular - check wraparound distance */
if (dim->Circular) {
dimension_distance = Magnitude(dimension_distance);
float wrap_distance = dim->Max - dim->Min - dimension_distance;
dimension_distance = std::min(dimension_distance, wrap_distance);
}
total_distance += dimension_distance * dimension_distance;
}
return total_distance;
}
float ComputeDistance(int k, PARAM_DESC *dim, float p1[], float p2[]) {
return std::sqrt(DistanceSquared(k, dim, p1, p2));
}
/*---------------------------------------------------------------------------*/
/// Return whether the query region (the smallest known circle about
/// query_point_ containing results->k_ points) intersects the box specified
/// between lower and upper. For circular dimensions, we also check the point
/// one wrap distance away from the query.
bool KDTreeSearch::BoxIntersectsSearch(float *lower, float *upper) {
float *query = query_point_;
// Compute the sum in higher precision.
double total_distance = 0.0;
double radius_squared =
static_cast<double>(results_.max_insertable_key()) * results_.max_insertable_key();
PARAM_DESC *dim = &tree_->KeyDesc[0];
for (int i = tree_->KeySize; i > 0; i--, dim++, query++, lower++, upper++) {
if (dim->NonEssential) {
continue;
}
float dimension_distance;
if (*query < *lower) {
dimension_distance = *lower - *query;
} else if (*query > *upper) {
dimension_distance = *query - *upper;
} else {
dimension_distance = 0;
}
/* if this dimension is circular - check wraparound distance */
if (dim->Circular) {
float wrap_distance = FLT_MAX;
if (*query < *lower) {
wrap_distance = *query + dim->Max - dim->Min - *upper;
} else if (*query > *upper) {
wrap_distance = *lower - (*query - (dim->Max - dim->Min));
}
dimension_distance = std::min(dimension_distance, wrap_distance);
}
total_distance += static_cast<double>(dimension_distance) * dimension_distance;
if (total_distance >= radius_squared) {
return false;
}
}
return true;
}
/*---------------------------------------------------------------------------*/
/**
* Walk a tree, calling action once on each node.
*
* Operation:
* This routine walks through the specified sub_tree and invokes action
* action at each node as follows:
* action(context, data, level)
* data the data contents of the node being visited,
* level is the level of the node in the tree with the root being level 0.
* @param tree root of the tree being walked.
* @param action action to be performed at every node
* @param context action's context
* @param sub_tree ptr to root of subtree to be walked
* @param level current level in the tree for this node
*/
void Walk(KDTREE *tree, kdwalk_proc action, ClusteringContext *context, KDNODE *sub_tree, int32_t level) {
(*action)(context, sub_tree->Data, level);
if (sub_tree->Left != nullptr) {
Walk(tree, action, context, sub_tree->Left, NextLevel(tree, level));
}
if (sub_tree->Right != nullptr) {
Walk(tree, action, context, sub_tree->Right, NextLevel(tree, level));
}
}
/** Given a subtree nodes, insert all of its elements into tree. */
void InsertNodes(KDTREE *tree, KDNODE *nodes) {
if (nodes == nullptr) {
return;
}
KDStore(tree, nodes->Key, nodes->Data);
InsertNodes(tree, nodes->Left);
InsertNodes(tree, nodes->Right);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/kdtree.cpp
|
C++
|
apache-2.0
| 16,410
|
/******************************************************************************
** Filename: kdtree.h
** Purpose: Definition of K-D tree access routines.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#ifndef KDTREE_H
#define KDTREE_H
#include "ocrfeatures.h"
namespace tesseract {
/**
NOTE: All circular parameters of all keys must be in the range
Min <= Param < Max
where Min and Max are specified in the KeyDesc parameter passed to
MakeKDTree. All KD routines assume that this is true and will not operate
correctly if circular parameters outside the specified range are used.
*/
struct ClusteringContext;
struct CLUSTER;
struct KDTREE;
using kdwalk_proc = void (*)(ClusteringContext *context, CLUSTER *Cluster, int32_t Level);
struct KDNODE {
/// This routine allocates memory for a new K-D tree node
/// and places the specified Key and Data into it. The
/// left and right subtree pointers for the node are
/// initialized to empty subtrees.
/// @param tree The tree to create the node for
/// @param Key Access key for new node in KD tree
/// @param Data ptr to data to be stored in new node
/// @param Index index of Key to branch on
KDNODE() = default;
KDNODE(KDTREE *tree, float key[], CLUSTER *data, int Index);
~KDNODE() {
delete Left;
delete Right;
}
float *Key; /**< search key */
CLUSTER *Data; /**< data that corresponds to key */
float BranchPoint; /**< needed to make deletes work efficiently */
float LeftBranch; /**< used to optimize search pruning */
float RightBranch; /**< used to optimize search pruning */
KDNODE *Left; /**< ptrs for KD tree structure */
KDNODE *Right;
};
struct KDTREE {
KDTREE(size_t n) : KeySize(n), KeyDesc(n) {
}
// The destructor frees all memory which is allocated to the
// specified KD-tree. This includes the data structure for
// the kd-tree itself plus the data structures for each node
// in the tree. It does not include the Key and Data items
// which are pointed to by the nodes. This memory is left
// untouched.
~KDTREE() {
}
// TODO: KeySize might be replaced by KeyDesc.size().
int16_t KeySize = 0; // number of dimensions in the tree
KDNODE Root; // Root.Left points to actual root node
std::vector<PARAM_DESC> KeyDesc; // description of each dimension
};
inline KDNODE::KDNODE(KDTREE *tree, float key[], CLUSTER *data, int Index) {
Key = key;
Data = data;
BranchPoint = Key[Index];
LeftBranch = tree->KeyDesc[Index].Min;
RightBranch = tree->KeyDesc[Index].Max;
Left = nullptr;
Right = nullptr;
}
/*----------------------------------------------------------------------------
Macros
-----------------------------------------------------------------------------*/
#define RootOf(T) ((T)->Root.Left->Data)
/*-----------------------------------------------------------------------------
Public Function Prototypes
-----------------------------------------------------------------------------*/
KDTREE *MakeKDTree(int16_t KeySize, const PARAM_DESC KeyDesc[]);
void KDStore(KDTREE *Tree, float *Key, CLUSTER *Data);
void KDDelete(KDTREE *Tree, float Key[], void *Data);
void KDNearestNeighborSearch(KDTREE *Tree, float Query[], int QuerySize, float MaxDistance,
int *NumberOfResults, void **NBuffer, float DBuffer[]);
void KDWalk(KDTREE *Tree, kdwalk_proc Action, ClusteringContext *context);
/*-----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
float DistanceSquared(int k, PARAM_DESC *dim, float p1[], float p2[]);
TESS_API
float ComputeDistance(int k, PARAM_DESC *dim, float p1[], float p2[]);
int QueryInSearch(KDTREE *tree);
void Walk(KDTREE *tree, kdwalk_proc action, ClusteringContext *context, KDNODE *SubTree, int32_t Level);
void InsertNodes(KDTREE *tree, KDNODE *nodes);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/kdtree.h
|
C++
|
apache-2.0
| 4,710
|
/******************************************************************************
** Filename: mf.c
** Purpose: Micro-feature interface to flexible feature extractor.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#include "mf.h"
#include "featdefs.h"
#include "mfdefs.h"
#include "mfx.h"
#include <cmath>
namespace tesseract {
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/**
* Call the old micro-feature extractor and then copy
* the features into the new format. Then deallocate the
* old micro-features.
* @param Blob blob to extract micro-features from
* @param cn_denorm control parameter to feature extractor.
* @return Micro-features for Blob.
*/
FEATURE_SET ExtractMicros(TBLOB *Blob, const DENORM &cn_denorm) {
auto features = BlobMicroFeatures(Blob, cn_denorm);
if (features.empty()) {
return nullptr;
}
int n = 0;
for ([[maybe_unused]] auto &f: features) {
++n;
}
auto FeatureSet = new FEATURE_SET_STRUCT(n);
for (auto &f : features) {
auto Feature = new FEATURE_STRUCT(&MicroFeatureDesc);
for (int i = 0; i < (int)MicroFeatureParameter::MFCount; ++i)
Feature->Params[i] = f[i];
// Bulge features are deprecated and should not be used. Set to 0.
Feature->Params[(int)MicroFeatureParameter::MFBulge1] = 0.0f;
Feature->Params[(int)MicroFeatureParameter::MFBulge2] = 0.0f;
#ifndef _WIN32
// Assert that feature parameters are well defined.
for (int i = 0; i < Feature->Type->NumParams; i++) {
ASSERT_HOST(!std::isnan(Feature->Params[i]));
}
#endif
AddFeature(FeatureSet, Feature);
}
return FeatureSet;
} /* ExtractMicros */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/mf.cpp
|
C++
|
apache-2.0
| 2,668
|
/******************************************************************************
** Filename: mf.h
** Purpose: Micro-feature interface to flexible feature extractor.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef MF_H
#define MF_H
#include "blobs.h"
#include "ocrfeatures.h"
namespace tesseract {
FEATURE_SET ExtractMicros(TBLOB *Blob, const DENORM &cn_denorm);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/mf.h
|
C++
|
apache-2.0
| 1,086
|
/******************************************************************************
** Filename: mfdefs.h
** Purpose: Definition of micro-features
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef MFDEFS_H
#define MFDEFS_H
#include <array>
#include <forward_list>
namespace tesseract {
enum class MicroFeatureParameter {
MFXPosition,
MFYPosition,
MFLength,
MFDirection,
MFBulge1,
MFBulge2,
MFCount // For array sizes.
};
using MicroFeature = std::array<float, (int)MicroFeatureParameter::MFCount>;
using MICROFEATURES = std::forward_list<MicroFeature>;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/mfdefs.h
|
C++
|
apache-2.0
| 1,286
|
/******************************************************************************
** Filename: mfoutline.c
** Purpose: Interface to outline struct used for extracting features
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "mfoutline.h"
#include "blobs.h"
#include "classify.h"
#include "clusttool.h" //If remove you get caught in a loop somewhere
#include "mfx.h"
#include "params.h"
#include <cmath>
#include <cstdio>
namespace tesseract {
/*---------------------------------------------------------------------------*/
/** Convert a blob into a list of MFOUTLINEs (float-based microfeature format).
*/
LIST ConvertBlob(TBLOB *blob) {
LIST outlines = NIL_LIST;
return (blob == nullptr) ? NIL_LIST : ConvertOutlines(blob->outlines, outlines, outer);
}
/*---------------------------------------------------------------------------*/
/** Convert a TESSLINE into the float-based MFOUTLINE micro-feature format. */
MFOUTLINE ConvertOutline(TESSLINE *outline) {
auto MFOutline = NIL_LIST;
if (outline == nullptr || outline->loop == nullptr) {
return MFOutline;
}
auto StartPoint = outline->loop;
auto EdgePoint = StartPoint;
do {
auto NextPoint = EdgePoint->next;
/* filter out duplicate points */
if (EdgePoint->pos.x != NextPoint->pos.x || EdgePoint->pos.y != NextPoint->pos.y) {
auto NewPoint = new MFEDGEPT;
NewPoint->ClearMark();
NewPoint->Hidden = EdgePoint->IsHidden();
NewPoint->Point.x = EdgePoint->pos.x;
NewPoint->Point.y = EdgePoint->pos.y;
MFOutline = push(MFOutline, NewPoint);
}
EdgePoint = NextPoint;
} while (EdgePoint != StartPoint);
if (MFOutline != nullptr) {
MakeOutlineCircular(MFOutline);
}
return MFOutline;
}
/*---------------------------------------------------------------------------*/
/**
* Convert a tree of outlines to a list of MFOUTLINEs (lists of MFEDGEPTs).
*
* @param outline first outline to be converted
* @param mf_outlines list to add converted outlines to
* @param outline_type are the outlines outer or holes?
*/
LIST ConvertOutlines(TESSLINE *outline, LIST mf_outlines, OUTLINETYPE outline_type) {
MFOUTLINE mf_outline;
while (outline != nullptr) {
mf_outline = ConvertOutline(outline);
if (mf_outline != nullptr) {
mf_outlines = push(mf_outlines, mf_outline);
}
outline = outline->next;
}
return mf_outlines;
}
/*---------------------------------------------------------------------------*/
/**
* This routine searches through the specified outline, computes
* a slope for each vector in the outline, and marks each
* vector as having one of the following directions:
* N, S, E, W, NE, NW, SE, SW
* This information is then stored in the outline and the
* outline is returned.
* @param Outline micro-feature outline to analyze
* @param MinSlope controls "snapping" of segments to horizontal
* @param MaxSlope controls "snapping" of segments to vertical
*/
void FindDirectionChanges(MFOUTLINE Outline, float MinSlope, float MaxSlope) {
MFEDGEPT *Current;
MFEDGEPT *Last;
MFOUTLINE EdgePoint;
if (DegenerateOutline(Outline)) {
return;
}
Last = PointAt(Outline);
Outline = NextPointAfter(Outline);
EdgePoint = Outline;
do {
Current = PointAt(EdgePoint);
ComputeDirection(Last, Current, MinSlope, MaxSlope);
Last = Current;
EdgePoint = NextPointAfter(EdgePoint);
} while (EdgePoint != Outline);
} /* FindDirectionChanges */
/*---------------------------------------------------------------------------*/
/**
* This routine deallocates all of the memory consumed by
* a micro-feature outline.
* @param arg micro-feature outline to be freed
*/
void FreeMFOutline(void *arg) { // MFOUTLINE Outline)
auto Outline = static_cast<MFOUTLINE>(arg);
/* break the circular outline so we can use std. techniques to deallocate */
MFOUTLINE Start = Outline->list_rest();
set_rest(Outline, NIL_LIST);
while (Start != nullptr) {
delete reinterpret_cast<MFEDGEPT *>(Start->first_node());
Start = pop(Start);
}
} /* FreeMFOutline */
/*---------------------------------------------------------------------------*/
/**
* Release all memory consumed by the specified list
* of outlines.
* @param Outlines list of mf-outlines to be freed
*/
void FreeOutlines(LIST Outlines) {
destroy_nodes(Outlines, FreeMFOutline);
} /* FreeOutlines */
/*---------------------------------------------------------------------------*/
/**
* This routine searches through the specified outline and finds
* the points at which the outline changes direction. These
* points are then marked as "extremities". This routine is
* used as an alternative to FindExtremities(). It forces the
* endpoints of the microfeatures to be at the direction
* changes rather than at the midpoint between direction
* changes.
* @param Outline micro-feature outline to analyze
*/
void MarkDirectionChanges(MFOUTLINE Outline) {
MFOUTLINE Current;
MFOUTLINE Last;
MFOUTLINE First;
if (DegenerateOutline(Outline)) {
return;
}
First = NextDirectionChange(Outline);
Last = First;
do {
Current = NextDirectionChange(Last);
PointAt(Current)->MarkPoint();
Last = Current;
} while (Last != First);
} /* MarkDirectionChanges */
/*---------------------------------------------------------------------------*/
/**
* This routine returns the next point in the micro-feature
* outline that is an extremity. The search starts after
* EdgePoint. The routine assumes that the outline being
* searched is not a degenerate outline (i.e. it must have
* 2 or more edge points).
* @param EdgePoint start search from this point
* @return Next extremity in the outline after EdgePoint.
* @note Globals: none
*/
MFOUTLINE NextExtremity(MFOUTLINE EdgePoint) {
EdgePoint = NextPointAfter(EdgePoint);
while (!PointAt(EdgePoint)->ExtremityMark) {
EdgePoint = NextPointAfter(EdgePoint);
}
return (EdgePoint);
} /* NextExtremity */
/*---------------------------------------------------------------------------*/
/**
* This routine normalizes the coordinates of the specified
* outline so that the outline is deskewed down to the
* baseline, translated so that x=0 is at XOrigin, and scaled
* so that the height of a character cell from descender to
* ascender is 1. Of this height, 0.25 is for the descender,
* 0.25 for the ascender, and 0.5 for the x-height. The
* y coordinate of the baseline is 0.
* @param Outline outline to be normalized
* @param XOrigin x-origin of text
*/
void NormalizeOutline(MFOUTLINE Outline, float XOrigin) {
if (Outline == NIL_LIST) {
return;
}
MFOUTLINE EdgePoint = Outline;
do {
MFEDGEPT *Current = PointAt(EdgePoint);
Current->Point.y = MF_SCALE_FACTOR * (Current->Point.y - kBlnBaselineOffset);
Current->Point.x = MF_SCALE_FACTOR * (Current->Point.x - XOrigin);
EdgePoint = NextPointAfter(EdgePoint);
} while (EdgePoint != Outline);
} /* NormalizeOutline */
/*---------------------------------------------------------------------------*/
/**
* This routine normalizes every outline in Outlines
* according to the currently selected normalization method.
* It also returns the scale factors that it used to do this
* scaling. The scale factors returned represent the x and
* y sizes in the normalized coordinate system that correspond
* to 1 pixel in the original coordinate system.
* Outlines are changed and XScale and YScale are updated.
*
* Globals:
* - classify_norm_method method being used for normalization
* - classify_char_norm_range map radius of gyration to this value
* @param Outlines list of outlines to be normalized
* @param XScale x-direction scale factor used by routine
* @param YScale y-direction scale factor used by routine
*/
void Classify::NormalizeOutlines(LIST Outlines, float *XScale, float *YScale) {
MFOUTLINE Outline;
switch (classify_norm_method) {
case character:
ASSERT_HOST(!"How did NormalizeOutlines get called in character mode?");
break;
case baseline:
iterate(Outlines) {
Outline = static_cast<MFOUTLINE>(Outlines->first_node());
NormalizeOutline(Outline, 0.0);
}
*XScale = *YScale = MF_SCALE_FACTOR;
break;
}
} /* NormalizeOutlines */
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/**
* Change the direction of every vector in the specified
* outline segment to Direction. The segment to be changed
* starts at Start and ends at End. Note that the previous
* direction of End must also be changed to reflect the
* change in direction of the point before it.
* @param Start defines start of segment of outline to be modified
* @param End defines end of segment of outline to be modified
* @param Direction new direction to assign to segment
*/
void ChangeDirection(MFOUTLINE Start, MFOUTLINE End, DIRECTION Direction) {
MFOUTLINE Current;
for (Current = Start; Current != End; Current = NextPointAfter(Current)) {
PointAt(Current)->Direction = Direction;
}
PointAt(End)->PreviousDirection = Direction;
} /* ChangeDirection */
/**
* This routine normalizes each point in Outline by
* translating it to the specified center and scaling it
* anisotropically according to the given scale factors.
* @param Outline outline to be character normalized
* @param cn_denorm
*/
void CharNormalizeOutline(MFOUTLINE Outline, const DENORM &cn_denorm) {
MFOUTLINE First, Current;
MFEDGEPT *CurrentPoint;
if (Outline == NIL_LIST) {
return;
}
First = Outline;
Current = First;
do {
CurrentPoint = PointAt(Current);
FCOORD pos(CurrentPoint->Point.x, CurrentPoint->Point.y);
cn_denorm.LocalNormTransform(pos, &pos);
CurrentPoint->Point.x = (pos.x() - UINT8_MAX / 2) * MF_SCALE_FACTOR;
CurrentPoint->Point.y = (pos.y() - UINT8_MAX / 2) * MF_SCALE_FACTOR;
Current = NextPointAfter(Current);
} while (Current != First);
} /* CharNormalizeOutline */
/**
* This routine computes the slope from Start to Finish and
* and then computes the approximate direction of the line
* segment from Start to Finish. The direction is quantized
* into 8 buckets:
* N, S, E, W, NE, NW, SE, SW
* Both the slope and the direction are then stored into
* the appropriate fields of the Start edge point. The
* direction is also stored into the PreviousDirection field
* of the Finish edge point.
* @param Start starting point to compute direction from
* @param Finish finishing point to compute direction to
* @param MinSlope slope below which lines are horizontal
* @param MaxSlope slope above which lines are vertical
*/
void ComputeDirection(MFEDGEPT *Start, MFEDGEPT *Finish, float MinSlope, float MaxSlope) {
FVECTOR Delta;
Delta.x = Finish->Point.x - Start->Point.x;
Delta.y = Finish->Point.y - Start->Point.y;
if (Delta.x == 0) {
if (Delta.y < 0) {
Start->Slope = -FLT_MAX;
Start->Direction = south;
} else {
Start->Slope = FLT_MAX;
Start->Direction = north;
}
} else {
Start->Slope = Delta.y / Delta.x;
if (Delta.x > 0) {
if (Delta.y > 0) {
if (Start->Slope > MinSlope) {
if (Start->Slope < MaxSlope) {
Start->Direction = northeast;
} else {
Start->Direction = north;
}
} else {
Start->Direction = east;
}
} else if (Start->Slope < -MinSlope) {
if (Start->Slope > -MaxSlope) {
Start->Direction = southeast;
} else {
Start->Direction = south;
}
} else {
Start->Direction = east;
}
} else if (Delta.y > 0) {
if (Start->Slope < -MinSlope) {
if (Start->Slope > -MaxSlope) {
Start->Direction = northwest;
} else {
Start->Direction = north;
}
} else {
Start->Direction = west;
}
} else if (Start->Slope > MinSlope) {
if (Start->Slope < MaxSlope) {
Start->Direction = southwest;
} else {
Start->Direction = south;
}
} else {
Start->Direction = west;
}
}
Finish->PreviousDirection = Start->Direction;
}
/**
* This routine returns the next point in the micro-feature
* outline that has a direction different than EdgePoint. The
* routine assumes that the outline being searched is not a
* degenerate outline (i.e. it must have 2 or more edge points).
* @param EdgePoint start search from this point
* @return Point of next direction change in micro-feature outline.
* @note Globals: none
*/
MFOUTLINE NextDirectionChange(MFOUTLINE EdgePoint) {
DIRECTION InitialDirection;
InitialDirection = PointAt(EdgePoint)->Direction;
MFOUTLINE next_pt = nullptr;
do {
EdgePoint = NextPointAfter(EdgePoint);
next_pt = NextPointAfter(EdgePoint);
} while (PointAt(EdgePoint)->Direction == InitialDirection && !PointAt(EdgePoint)->Hidden &&
next_pt != nullptr && !PointAt(next_pt)->Hidden);
return (EdgePoint);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/mfoutline.cpp
|
C++
|
apache-2.0
| 13,884
|
/******************************************************************************
** Filename: mfoutline.h
** Purpose: Interface spec for fx outline structures
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef MFOUTLINE_H
#define MFOUTLINE_H
#include "blobs.h"
#include "fpoint.h"
#include "oldlist.h"
#include "params.h"
namespace tesseract {
using MFOUTLINE = LIST;
enum DIRECTION : uint8_t { north, south, east, west, northeast, northwest, southeast, southwest };
struct MFEDGEPT {
// Inline functions for manipulating micro-feature outline edge points.
void ClearMark() {
ExtremityMark = false;
}
void MarkPoint() {
ExtremityMark = true;
}
FPOINT Point;
float Slope;
bool Hidden;
bool ExtremityMark;
DIRECTION Direction;
DIRECTION PreviousDirection;
};
enum OUTLINETYPE { outer, hole };
enum NORM_METHOD { baseline, character };
/**----------------------------------------------------------------------------
Macros
----------------------------------------------------------------------------**/
#define AverageOf(A, B) (((A) + (B)) / 2)
// Constant for computing the scale factor to use to normalize characters.
const float MF_SCALE_FACTOR = 0.5f / kBlnXHeight;
// Inline functions for manipulating micro-feature outlines.
static inline bool DegenerateOutline(MFOUTLINE Outline) {
return (Outline == NIL_LIST) || (Outline == Outline->list_rest());
}
static inline MFEDGEPT *PointAt(MFOUTLINE Outline) {
return reinterpret_cast<MFEDGEPT *>(Outline->first_node());
}
static inline MFOUTLINE NextPointAfter(MFOUTLINE Outline) {
return Outline->list_rest();
}
static inline void MakeOutlineCircular(MFOUTLINE Outline) {
set_rest(last(Outline), Outline);
}
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
void ComputeBlobCenter(TBLOB *Blob, TPOINT *BlobCenter);
LIST ConvertBlob(TBLOB *Blob);
MFOUTLINE ConvertOutline(TESSLINE *Outline);
LIST ConvertOutlines(TESSLINE *Outline, LIST ConvertedOutlines, OUTLINETYPE OutlineType);
void FilterEdgeNoise(MFOUTLINE Outline, float NoiseSegmentLength);
void FindDirectionChanges(MFOUTLINE Outline, float MinSlope, float MaxSlope);
void FreeMFOutline(void *agr); // MFOUTLINE Outline);
void FreeOutlines(LIST Outlines);
void MarkDirectionChanges(MFOUTLINE Outline);
MFOUTLINE NextExtremity(MFOUTLINE EdgePoint);
void NormalizeOutline(MFOUTLINE Outline, float XOrigin);
/*----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
void ChangeDirection(MFOUTLINE Start, MFOUTLINE End, DIRECTION Direction);
// Normalizes the Outline in-place using cn_denorm's local transformation,
// then converts from the integer feature range [0,255] to the clusterer
// feature range of [-0.5, 0.5].
void CharNormalizeOutline(MFOUTLINE Outline, const DENORM &cn_denorm);
void ComputeDirection(MFEDGEPT *Start, MFEDGEPT *Finish, float MinSlope, float MaxSlope);
MFOUTLINE NextDirectionChange(MFOUTLINE EdgePoint);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/mfoutline.h
|
C++
|
apache-2.0
| 3,922
|
/******************************************************************************
** Filename: mfx.c
** Purpose: Micro feature extraction routines
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#include "mfx.h"
#include "clusttool.h" //NEEDED
#include "intfx.h"
#include "mfdefs.h"
#include "mfoutline.h"
#include "normalis.h"
#include "params.h"
namespace tesseract {
/* old numbers corresponded to 10.0 degrees and 80.0 degrees */
double_VAR(classify_min_slope, 0.414213562, "Slope below which lines are called horizontal");
double_VAR(classify_max_slope, 2.414213562, "Slope above which lines are called vertical");
/*----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
MICROFEATURES ConvertToMicroFeatures(MFOUTLINE Outline, MICROFEATURES MicroFeatures);
MicroFeature ExtractMicroFeature(MFOUTLINE Start, MFOUTLINE End);
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/**
* This routine extracts micro-features from the specified
* blob and returns a list of the micro-features. All
* micro-features are normalized according to the specified
* line statistics.
* @param Blob blob to extract micro-features from
* @param cn_denorm control parameter to feature extractor
* @return List of micro-features extracted from the blob.
*/
MICROFEATURES BlobMicroFeatures(TBLOB *Blob, const DENORM &cn_denorm) {
MICROFEATURES MicroFeatures;
LIST Outlines;
LIST RemainingOutlines;
if (Blob != nullptr) {
Outlines = ConvertBlob(Blob);
RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
auto Outline = static_cast<MFOUTLINE>(RemainingOutlines->first_node());
CharNormalizeOutline(Outline, cn_denorm);
}
RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
auto Outline = static_cast<MFOUTLINE>(RemainingOutlines->first_node());
FindDirectionChanges(Outline, classify_min_slope, classify_max_slope);
MarkDirectionChanges(Outline);
MicroFeatures = ConvertToMicroFeatures(Outline, MicroFeatures);
}
FreeOutlines(Outlines);
}
return MicroFeatures;
} /* BlobMicroFeatures */
/*---------------------------------------------------------------------------
Private Code
---------------------------------------------------------------------------*/
/**
* Convert Outline to MicroFeatures
* @param Outline outline to extract micro-features from
* @param MicroFeatures list of micro-features to add to
* @return List of micro-features with new features added to front.
* @note Globals: none
*/
MICROFEATURES ConvertToMicroFeatures(MFOUTLINE Outline, MICROFEATURES MicroFeatures) {
MFOUTLINE Current;
MFOUTLINE Last;
MFOUTLINE First;
if (DegenerateOutline(Outline)) {
return (MicroFeatures);
}
First = NextExtremity(Outline);
Last = First;
do {
Current = NextExtremity(Last);
if (!PointAt(Current)->Hidden) {
auto NewFeature = ExtractMicroFeature(Last, Current);
MicroFeatures.push_front(NewFeature);
}
Last = Current;
} while (Last != First);
return MicroFeatures;
} /* ConvertToMicroFeatures */
/**
* This routine computes the feature parameters which describe
* the micro-feature that starts and Start and ends at End.
* A new micro-feature is allocated, filled with the feature
* parameters, and returned. The routine assumes that
* Start and End are not the same point. If they are the
* same point, nullptr is returned, a warning message is
* printed, and the current outline is dumped to stdout.
* @param Start starting point of micro-feature
* @param End ending point of micro-feature
* @return New micro-feature or nullptr if the feature was rejected.
* @note Globals: none
*/
MicroFeature ExtractMicroFeature(MFOUTLINE Start, MFOUTLINE End) {
MFEDGEPT *P1, *P2;
P1 = PointAt(Start);
P2 = PointAt(End);
MicroFeature NewFeature;
NewFeature[(int)MicroFeatureParameter::MFXPosition] = AverageOf(P1->Point.x, P2->Point.x);
NewFeature[(int)MicroFeatureParameter::MFYPosition] = AverageOf(P1->Point.y, P2->Point.y);
NewFeature[(int)MicroFeatureParameter::MFLength] = DistanceBetween(P1->Point, P2->Point);
NewFeature[(int)MicroFeatureParameter::MFDirection] = NormalizedAngleFrom(&P1->Point, &P2->Point, 1.0);
NewFeature[(int)MicroFeatureParameter::MFBulge1] = 0.0f; // deprecated
NewFeature[(int)MicroFeatureParameter::MFBulge2] = 0.0f; // deprecated
return NewFeature;
} /* ExtractMicroFeature */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/mfx.cpp
|
C++
|
apache-2.0
| 5,410
|
/******************************************************************************
** Filename: mfx.h
** Purpose: Definition of micro-feature extraction routines
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef MFX_H
#define MFX_H
#include "mfdefs.h"
#include "params.h"
namespace tesseract {
class DENORM;
struct TBLOB;
/*----------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------**/
/* old numbers corresponded to 10.0 degrees and 80.0 degrees */
extern double_VAR_H(classify_min_slope);
extern double_VAR_H(classify_max_slope);
/*----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
MICROFEATURES BlobMicroFeatures(TBLOB *Blob, const DENORM &cn_denorm);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/mfx.h
|
C++
|
apache-2.0
| 1,627
|
/******************************************************************************
** Filename: normfeat.c
** Purpose: Definition of char normalization features.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "normfeat.h"
#include "featdefs.h"
#include "intfx.h"
#include "mfoutline.h"
namespace tesseract {
/** Return the length of the outline in baseline normalized form. */
float ActualOutlineLength(FEATURE Feature) {
return (Feature->Params[CharNormLength] * LENGTH_COMPRESSION);
}
/**
* Return the character normalization feature for a blob.
*
* The features returned are in a scale where the x-height has been
* normalized to live in the region y = [-0.25 .. 0.25]. Example ranges
* for English below are based on the Linux font collection on 2009-12-04:
*
* - Params[CharNormY]
* - The y coordinate of the grapheme's centroid.
* - English: [-0.27, 0.71]
*
* - Params[CharNormLength]
* - The length of the grapheme's outline (tiny segments discarded),
* divided by 10.0=LENGTH_COMPRESSION.
* - English: [0.16, 0.85]
*
* - Params[CharNormRx]
* - The radius of gyration about the x axis, as measured from CharNormY.
* - English: [0.011, 0.34]
*
* - Params[CharNormRy]
* - The radius of gyration about the y axis, as measured from
* the x center of the grapheme's bounding box.
* - English: [0.011, 0.31]
*/
FEATURE_SET ExtractCharNormFeatures(const INT_FX_RESULT_STRUCT &fx_info) {
auto feature_set = new FEATURE_SET_STRUCT(1);
auto feature = new FEATURE_STRUCT(&CharNormDesc);
feature->Params[CharNormY] = MF_SCALE_FACTOR * (fx_info.Ymean - kBlnBaselineOffset);
feature->Params[CharNormLength] = MF_SCALE_FACTOR * fx_info.Length / LENGTH_COMPRESSION;
feature->Params[CharNormRx] = MF_SCALE_FACTOR * fx_info.Rx;
feature->Params[CharNormRy] = MF_SCALE_FACTOR * fx_info.Ry;
AddFeature(feature_set, feature);
return feature_set;
} /* ExtractCharNormFeatures */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/normfeat.cpp
|
C++
|
apache-2.0
| 2,669
|
/******************************************************************************
** Filename: normfeat.h
** Purpose: Definition of character normalization features.
** Author: Dan Johnson
** History: 12/14/90, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#ifndef NORMFEAT_H
#define NORMFEAT_H
#include "ocrfeatures.h"
namespace tesseract {
#define LENGTH_COMPRESSION (10.0)
struct INT_FX_RESULT_STRUCT;
typedef enum { CharNormY, CharNormLength, CharNormRx, CharNormRy } NORM_PARAM_NAME;
float ActualOutlineLength(FEATURE Feature);
FEATURE_SET ExtractCharNormFeatures(const INT_FX_RESULT_STRUCT &fx_info);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/normfeat.h
|
C++
|
apache-2.0
| 1,322
|
/******************************************************************************
** Filename: normmatch.c
** Purpose: Simple matcher based on character normalization features.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#include "normmatch.h"
#include "classify.h"
#include "clusttool.h"
#include "helpers.h"
#include "normfeat.h"
#include "params.h"
#include "unicharset.h"
#include <cmath>
#include <cstdio>
#include <sstream> // for std::istringstream
namespace tesseract {
struct NORM_PROTOS {
NORM_PROTOS(size_t n) : NumProtos(n), Protos(n) {
}
int NumParams = 0;
int NumProtos;
PARAM_DESC *ParamDesc = nullptr;
std::vector<LIST> Protos;
};
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/**
* @name NormEvidenceOf
*
* Return the new type of evidence number corresponding to this
* normalization adjustment. The equation that represents the transform is:
* 1 / (1 + (NormAdj / midpoint) ^ curl)
*/
static float NormEvidenceOf(float NormAdj) {
NormAdj /= static_cast<float>(classify_norm_adj_midpoint);
if (classify_norm_adj_curl == 3) {
NormAdj = NormAdj * NormAdj * NormAdj;
} else if (classify_norm_adj_curl == 2) {
NormAdj = NormAdj * NormAdj;
} else {
NormAdj = std::pow(NormAdj, static_cast<float>(classify_norm_adj_curl));
}
return (1 / (1 + NormAdj));
}
/*----------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------*/
/** control knobs used to control the normalization adjustment process */
double_VAR(classify_norm_adj_midpoint, 32.0, "Norm adjust midpoint ...");
double_VAR(classify_norm_adj_curl, 2.0, "Norm adjust curl ...");
/** Weight of width variance against height and vertical position. */
const float kWidthErrorWeighting = 0.125f;
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/**
* This routine compares Features against each character
* normalization proto for ClassId and returns the match
* rating of the best match.
* @param ClassId id of class to match against
* @param feature character normalization feature
* @param DebugMatch controls dump of debug info
*
* Globals:
* #NormProtos character normalization prototypes
*
* @return Best match rating for Feature against protos of ClassId.
*/
float Classify::ComputeNormMatch(CLASS_ID ClassId, const FEATURE_STRUCT &feature, bool DebugMatch) {
if (ClassId >= NormProtos->NumProtos) {
ClassId = NO_CLASS;
}
/* handle requests for classification as noise */
if (ClassId == NO_CLASS) {
/* kludge - clean up constants and make into control knobs later */
float Match = (feature.Params[CharNormLength] * feature.Params[CharNormLength] * 500.0f +
feature.Params[CharNormRx] * feature.Params[CharNormRx] * 8000.0f +
feature.Params[CharNormRy] * feature.Params[CharNormRy] * 8000.0f);
return (1 - NormEvidenceOf(Match));
}
if (DebugMatch) {
tprintf("\nChar norm for class %s\n", unicharset.id_to_unichar(ClassId));
}
LIST Protos = NormProtos->Protos[ClassId];
if (Protos == nullptr) {
// Avoid FP overflow in NormEvidenceOf.
return 1.0f;
}
float BestMatch = FLT_MAX;
iterate(Protos) {
auto Proto = reinterpret_cast<PROTOTYPE *>(Protos->first_node());
float Delta = feature.Params[CharNormY] - Proto->Mean[CharNormY];
float Match = Delta * Delta * Proto->Weight.Elliptical[CharNormY];
if (DebugMatch) {
tprintf("YMiddle: Proto=%g, Delta=%g, Var=%g, Dist=%g\n", Proto->Mean[CharNormY], Delta,
Proto->Weight.Elliptical[CharNormY], Match);
}
Delta = feature.Params[CharNormRx] - Proto->Mean[CharNormRx];
Match += Delta * Delta * Proto->Weight.Elliptical[CharNormRx];
if (DebugMatch) {
tprintf("Height: Proto=%g, Delta=%g, Var=%g, Dist=%g\n", Proto->Mean[CharNormRx], Delta,
Proto->Weight.Elliptical[CharNormRx], Match);
}
// Ry is width! See intfx.cpp.
Delta = feature.Params[CharNormRy] - Proto->Mean[CharNormRy];
if (DebugMatch) {
tprintf("Width: Proto=%g, Delta=%g, Var=%g\n", Proto->Mean[CharNormRy], Delta,
Proto->Weight.Elliptical[CharNormRy]);
}
Delta = Delta * Delta * Proto->Weight.Elliptical[CharNormRy];
Delta *= kWidthErrorWeighting;
Match += Delta;
if (DebugMatch) {
tprintf("Total Dist=%g, scaled=%g, sigmoid=%g, penalty=%g\n", Match,
Match / classify_norm_adj_midpoint, NormEvidenceOf(Match),
256 * (1 - NormEvidenceOf(Match)));
}
if (Match < BestMatch) {
BestMatch = Match;
}
}
return 1 - NormEvidenceOf(BestMatch);
} /* ComputeNormMatch */
void Classify::FreeNormProtos() {
if (NormProtos != nullptr) {
for (int i = 0; i < NormProtos->NumProtos; i++) {
FreeProtoList(&NormProtos->Protos[i]);
}
delete[] NormProtos->ParamDesc;
delete NormProtos;
NormProtos = nullptr;
}
}
/**
* This routine allocates a new data structure to hold
* a set of character normalization protos. It then fills in
* the data structure by reading from the specified File.
* @param fp open text file to read normalization protos from
* Globals: none
* @return Character normalization protos.
*/
NORM_PROTOS *Classify::ReadNormProtos(TFile *fp) {
char unichar[2 * UNICHAR_LEN + 1];
UNICHAR_ID unichar_id;
LIST Protos;
int NumProtos;
/* allocate and initialization data structure */
auto NormProtos = new NORM_PROTOS(unicharset.size());
/* read file header and save in data structure */
NormProtos->NumParams = ReadSampleSize(fp);
NormProtos->ParamDesc = ReadParamDesc(fp, NormProtos->NumParams);
/* read protos for each class into a separate list */
const int kMaxLineSize = 100;
char line[kMaxLineSize];
while (fp->FGets(line, kMaxLineSize) != nullptr) {
std::istringstream stream(line);
stream.imbue(std::locale::classic());
stream >> unichar >> NumProtos;
if (stream.fail()) {
continue;
}
if (unicharset.contains_unichar(unichar)) {
unichar_id = unicharset.unichar_to_id(unichar);
Protos = NormProtos->Protos[unichar_id];
for (int i = 0; i < NumProtos; i++) {
Protos = push_last(Protos, ReadPrototype(fp, NormProtos->NumParams));
}
NormProtos->Protos[unichar_id] = Protos;
} else {
tprintf("Error: unichar %s in normproto file is not in unichar set.\n", unichar);
for (int i = 0; i < NumProtos; i++) {
FreePrototype(ReadPrototype(fp, NormProtos->NumParams));
}
}
}
return NormProtos;
} /* ReadNormProtos */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/normmatch.cpp
|
C++
|
apache-2.0
| 7,788
|
/******************************************************************************
** Filename: normmatch.h
** Purpose: Simple matcher based on character normalization features.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef NORMMATCH_H
#define NORMMATCH_H
#include "matchdefs.h"
#include "ocrfeatures.h"
#include "params.h"
namespace tesseract {
/* control knobs used to control the normalization adjustment process */
extern double_VAR_H(classify_norm_adj_midpoint);
extern double_VAR_H(classify_norm_adj_curl);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/normmatch.h
|
C++
|
apache-2.0
| 1,236
|
/******************************************************************************
** Filename: ocrfeatures.cpp
** Purpose: Generic definition of a feature.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "ocrfeatures.h"
#include "scanutils.h"
#include <cassert>
#include <cmath>
#include <sstream> // for std::stringstream
namespace tesseract {
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/**
* Add a feature to a feature set. If the feature set is
* already full, false is returned to indicate that the
* feature could not be added to the set; otherwise, true is
* returned.
* @param FeatureSet set of features to add Feature to
* @param Feature feature to be added to FeatureSet
* @return true if feature added to set, false if set is already full.
*/
bool AddFeature(FEATURE_SET FeatureSet, FEATURE Feature) {
if (FeatureSet->NumFeatures >= FeatureSet->MaxNumFeatures) {
delete Feature;
return false;
}
FeatureSet->Features[FeatureSet->NumFeatures++] = Feature;
return true;
} /* AddFeature */
/**
* Create a new feature of the specified type and read in
* the value of its parameters from File. The extra penalty
* for the feature is also computed by calling the appropriate
* function for the specified feature type. The correct text
* representation for a feature is a list of N floats where
* N is the number of parameters in the feature.
* @param File open text file to read feature from
* @param FeatureDesc specifies type of feature to read from File
* @return New #FEATURE read from File.
*/
static FEATURE ReadFeature(FILE *File, const FEATURE_DESC_STRUCT *FeatureDesc) {
auto Feature = new FEATURE_STRUCT(FeatureDesc);
for (int i = 0; i < Feature->Type->NumParams; i++) {
ASSERT_HOST(tfscanf(File, "%f", &(Feature->Params[i])) == 1);
#ifndef _WIN32
assert(!std::isnan(Feature->Params[i]));
#endif
}
return Feature;
}
/**
* Create a new feature set of the specified type and read in
* the features from File. The correct text representation
* for a feature set is an integer which specifies the number (N)
* of features in a set followed by a list of N feature
* descriptions.
* @param File open text file to read new feature set from
* @param FeatureDesc specifies type of feature to read from File
* @return New feature set read from File.
*/
FEATURE_SET ReadFeatureSet(FILE *File, const FEATURE_DESC_STRUCT *FeatureDesc) {
int NumFeatures;
ASSERT_HOST(tfscanf(File, "%d", &NumFeatures) == 1);
ASSERT_HOST(NumFeatures >= 0);
auto FeatureSet = new FEATURE_SET_STRUCT(NumFeatures);
for (int i = 0; i < NumFeatures; i++) {
AddFeature(FeatureSet, ReadFeature(File, FeatureDesc));
}
return FeatureSet;
}
/**
* Appends a textual representation of Feature to str.
* This representation is simply a list of the N parameters
* of the feature, terminated with a newline. It is assumed
* that the ExtraPenalty field can be reconstructed from the
* parameters of the feature. It is also assumed that the
* feature type information is specified or assumed elsewhere.
* @param Feature feature to write out to str
* @param str string to write Feature to
*/
static void WriteFeature(FEATURE Feature, std::string &str) {
for (int i = 0; i < Feature->Type->NumParams; i++) {
#ifndef WIN32
assert(!std::isnan(Feature->Params[i]));
#endif
std::stringstream stream;
// Use "C" locale (needed for double value).
stream.imbue(std::locale::classic());
// Use 8 digits for double value.
stream.precision(8);
stream << Feature->Params[i];
str += " " + stream.str();
}
str += "\n";
} /* WriteFeature */
/**
* Write a textual representation of FeatureSet to File.
* This representation is an integer specifying the number of
* features in the set, followed by a newline, followed by
* text representations for each feature in the set.
* @param FeatureSet feature set to write to File
* @param str string to write Feature to
*/
void WriteFeatureSet(FEATURE_SET FeatureSet, std::string &str) {
if (FeatureSet) {
str += "" + std::to_string(FeatureSet->NumFeatures);
str += "\n";
for (int i = 0; i < FeatureSet->NumFeatures; i++) {
WriteFeature(FeatureSet->Features[i], str);
}
}
} /* WriteFeatureSet */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/ocrfeatures.cpp
|
C++
|
apache-2.0
| 5,113
|
/******************************************************************************
** Filename: features.h
** Purpose: Generic definition of a feature.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef FEATURES_H
#define FEATURES_H
#include "blobs.h"
#include <cstdio>
#include <string> // for std::string
namespace tesseract {
class DENORM;
#undef Min
#undef Max
#define FEAT_NAME_SIZE 80
// A character is described by multiple sets of extracted features. Each
// set contains a number of features of a particular type, for example, a
// set of bays, or a set of closures, or a set of microfeatures. Each
// feature consists of a number of parameters. All features within a
// feature set contain the same number of parameters. All circular
// parameters are required to be the first parameters in the feature.
struct PARAM_DESC {
bool Circular; // true if dimension wraps around
bool NonEssential; // true if dimension not used in searches
float Min; // low end of range for circular dimensions
float Max; // high end of range for circular dimensions
float Range; // Max - Min
float HalfRange; // (Max - Min)/2
float MidRange; // (Max + Min)/2
};
struct FEATURE_DESC_STRUCT {
uint16_t NumParams; // total # of params
const char *ShortName; // short name for feature
const PARAM_DESC *ParamDesc; // array - one per param
};
using FEATURE_DESC = FEATURE_DESC_STRUCT *;
struct FEATURE_STRUCT {
/// Constructor for a new feature of the specified type.
/// @param FeatureDesc description of feature to be created.
FEATURE_STRUCT(const FEATURE_DESC_STRUCT *FeatureDesc) : Type(FeatureDesc), Params(FeatureDesc->NumParams) {
}
~FEATURE_STRUCT() {
}
const FEATURE_DESC_STRUCT *Type; // points to description of feature type
std::vector<float> Params; // variable size array - params for feature
};
using FEATURE = FEATURE_STRUCT *;
struct FEATURE_SET_STRUCT {
/// Creator for a new feature set large enough to
/// hold the specified number of features.
/// @param NumFeatures maximum # of features to be put in feature set
FEATURE_SET_STRUCT(int numFeatures) : NumFeatures(0), MaxNumFeatures(numFeatures), Features(numFeatures) {
}
~FEATURE_SET_STRUCT() {
for (uint16_t i = 0; i < NumFeatures; i++) {
delete Features[i];
}
}
uint16_t NumFeatures; // number of features in set
uint16_t MaxNumFeatures; // maximum size of feature set
std::vector<FEATURE_STRUCT *> Features; // variable size array of features
};
using FEATURE_SET = FEATURE_SET_STRUCT *;
// A generic character description as a char pointer. In reality, it will be
// a pointer to some data structure. Paired feature extractors/matchers need
// to agree on the data structure to be used, however, the high level
// classifier does not need to know the details of this data structure.
using CHAR_FEATURES = char *;
/*----------------------------------------------------------------------
Macros for defining the parameters of a new features
----------------------------------------------------------------------*/
#define StartParamDesc(Name) const PARAM_DESC Name[] = {
#define DefineParam(Circular, NonEssential, Min, Max) \
{Circular, \
NonEssential, \
Min, \
Max, \
(Max) - (Min), \
(((Max) - (Min)) / 2.0), \
(((Max) + (Min)) / 2.0)},
#define EndParamDesc \
} \
;
/*----------------------------------------------------------------------
Macro for describing a new feature. The parameters of the macro
are as follows:
DefineFeature (Name, NumLinear, NumCircular, ShortName, ParamName)
----------------------------------------------------------------------*/
#define DefineFeature(Name, NL, NC, SN, PN) \
const FEATURE_DESC_STRUCT Name = {((NL) + (NC)), SN, PN};
/*----------------------------------------------------------------------
Generic routines that work for all feature types
----------------------------------------------------------------------*/
bool AddFeature(FEATURE_SET FeatureSet, FEATURE Feature);
FEATURE_SET ReadFeatureSet(FILE *File, const FEATURE_DESC_STRUCT *FeatureDesc);
void WriteFeatureSet(FEATURE_SET FeatureSet, std::string &str);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/ocrfeatures.h
|
C++
|
apache-2.0
| 5,174
|
/******************************************************************************
** Filename: outfeat.c
** Purpose: Definition of outline-features.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "outfeat.h"
#include "classify.h"
#include "featdefs.h"
#include "mfoutline.h"
#include "ocrfeatures.h"
#include <cstdio>
namespace tesseract {
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/**
* Convert each segment in the outline to a feature
* and return the features.
* @param Blob blob to extract pico-features from
* @return Outline-features for Blob.
* @note Globals: none
*/
FEATURE_SET Classify::ExtractOutlineFeatures(TBLOB *Blob) {
auto FeatureSet = new FEATURE_SET_STRUCT(MAX_OUTLINE_FEATURES);
if (Blob == nullptr) {
return (FeatureSet);
}
auto Outlines = ConvertBlob(Blob);
float XScale, YScale;
NormalizeOutlines(Outlines, &XScale, &YScale);
auto RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
auto Outline = static_cast<MFOUTLINE>(RemainingOutlines->first_node());
ConvertToOutlineFeatures(Outline, FeatureSet);
}
if (classify_norm_method == baseline) {
NormalizeOutlineX(FeatureSet);
}
FreeOutlines(Outlines);
return (FeatureSet);
} /* ExtractOutlineFeatures */
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* This routine computes the midpoint between Start and
* End to obtain the x,y position of the outline-feature. It
* also computes the direction from Start to End as the
* direction of the outline-feature and the distance from
* Start to End as the length of the outline-feature.
* This feature is then
* inserted into the next feature slot in FeatureSet.
* @param Start starting point of outline-feature
* @param End ending point of outline-feature
* @param FeatureSet set to add outline-feature to
*/
void AddOutlineFeatureToSet(FPOINT *Start, FPOINT *End, FEATURE_SET FeatureSet) {
auto Feature = new FEATURE_STRUCT(&OutlineFeatDesc);
Feature->Params[OutlineFeatDir] = NormalizedAngleFrom(Start, End, 1.0);
Feature->Params[OutlineFeatX] = AverageOf(Start->x, End->x);
Feature->Params[OutlineFeatY] = AverageOf(Start->y, End->y);
Feature->Params[OutlineFeatLength] = DistanceBetween(*Start, *End);
AddFeature(FeatureSet, Feature);
} /* AddOutlineFeatureToSet */
/*---------------------------------------------------------------------------*/
/**
* This routine steps converts each section in the specified
* outline to a feature described by its x,y position, length
* and angle.
* Results are returned in FeatureSet.
* @param Outline outline to extract outline-features from
* @param FeatureSet set of features to add outline-features to
*/
void ConvertToOutlineFeatures(MFOUTLINE Outline, FEATURE_SET FeatureSet) {
MFOUTLINE Next;
MFOUTLINE First;
FPOINT FeatureStart;
FPOINT FeatureEnd;
if (DegenerateOutline(Outline)) {
return;
}
First = Outline;
Next = First;
do {
FeatureStart = PointAt(Next)->Point;
Next = NextPointAfter(Next);
/* note that an edge is hidden if the ending point of the edge is
marked as hidden. This situation happens because the order of
the outlines is reversed when they are converted from the old
format. In the old format, a hidden edge is marked by the
starting point for that edge. */
if (!PointAt(Next)->Hidden) {
FeatureEnd = PointAt(Next)->Point;
AddOutlineFeatureToSet(&FeatureStart, &FeatureEnd, FeatureSet);
}
} while (Next != First);
} /* ConvertToOutlineFeatures */
/*---------------------------------------------------------------------------*/
/**
* This routine computes the weighted average x position
* over all of the outline-features in FeatureSet and then
* renormalizes the outline-features to force this average
* to be the x origin (i.e. x=0).
* FeatureSet is changed.
* @param FeatureSet outline-features to be normalized
*/
void NormalizeOutlineX(FEATURE_SET FeatureSet) {
int i;
FEATURE Feature;
float Length;
float TotalX = 0.0;
float TotalWeight = 0.0;
float Origin;
if (FeatureSet->NumFeatures <= 0) {
return;
}
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Length = Feature->Params[OutlineFeatLength];
TotalX += Feature->Params[OutlineFeatX] * Length;
TotalWeight += Length;
}
Origin = TotalX / TotalWeight;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Feature->Params[OutlineFeatX] -= Origin;
}
} /* NormalizeOutlineX */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/outfeat.cpp
|
C++
|
apache-2.0
| 5,591
|
/******************************************************************************
** Filename: outfeat.h
** Purpose: Definition of outline features.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef OUTFEAT_H
#define OUTFEAT_H
#include "fpoint.h"
#include "mfoutline.h"
#include "ocrfeatures.h"
namespace tesseract {
typedef enum {
OutlineFeatX,
OutlineFeatY,
OutlineFeatLength,
OutlineFeatDir
} OUTLINE_FEAT_PARAM_NAME;
#define MAX_OUTLINE_FEATURES (100)
/*---------------------------------------------------------------------------
Privat Function Prototypes
----------------------------------------------------------------------------*/
void AddOutlineFeatureToSet(FPOINT *Start, FPOINT *End, FEATURE_SET FeatureSet);
void ConvertToOutlineFeatures(MFOUTLINE Outline, FEATURE_SET FeatureSet);
void NormalizeOutlineX(FEATURE_SET FeatureSet);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/outfeat.h
|
C++
|
apache-2.0
| 1,585
|
/******************************************************************************
** Filename: picofeat.c
** Purpose: Definition of pico-features.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "picofeat.h"
#include "classify.h"
#include "featdefs.h"
#include "fpoint.h"
#include "mfoutline.h"
#include "ocrfeatures.h"
#include "params.h"
#include "trainingsample.h"
#include <cmath>
#include <cstdio>
namespace tesseract {
/*---------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------*/
double_VAR(classify_pico_feature_length, 0.05, "Pico Feature Length");
/*---------------------------------------------------------------------------
Private Function Prototypes
----------------------------------------------------------------------------*/
void ConvertSegmentToPicoFeat(FPOINT *Start, FPOINT *End, FEATURE_SET FeatureSet);
void ConvertToPicoFeatures2(MFOUTLINE Outline, FEATURE_SET FeatureSet);
void NormalizePicoX(FEATURE_SET FeatureSet);
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* Operation: Dummy for now.
*
* Globals:
* - classify_norm_method normalization method currently specified
* @param Blob blob to extract pico-features from
* @return Pico-features for Blob.
*/
FEATURE_SET Classify::ExtractPicoFeatures(TBLOB *Blob) {
auto FeatureSet = new FEATURE_SET_STRUCT(MAX_PICO_FEATURES);
auto Outlines = ConvertBlob(Blob);
float XScale, YScale;
NormalizeOutlines(Outlines, &XScale, &YScale);
auto RemainingOutlines = Outlines;
iterate(RemainingOutlines) {
auto Outline = static_cast<MFOUTLINE>(RemainingOutlines->first_node());
ConvertToPicoFeatures2(Outline, FeatureSet);
}
if (classify_norm_method == baseline) {
NormalizePicoX(FeatureSet);
}
FreeOutlines(Outlines);
return (FeatureSet);
} /* ExtractPicoFeatures */
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* This routine converts an entire segment of an outline
* into a set of pico features which are added to
* FeatureSet. The length of the segment is rounded to the
* nearest whole number of pico-features. The pico-features
* are spaced evenly over the entire segment.
* Results are placed in FeatureSet.
* Globals:
* - classify_pico_feature_length length of a single pico-feature
* @param Start starting point of pico-feature
* @param End ending point of pico-feature
* @param FeatureSet set to add pico-feature to
*/
void ConvertSegmentToPicoFeat(FPOINT *Start, FPOINT *End, FEATURE_SET FeatureSet) {
float Angle;
float Length;
int NumFeatures;
FPOINT Center;
FPOINT Delta;
int i;
Angle = NormalizedAngleFrom(Start, End, 1.0);
Length = DistanceBetween(*Start, *End);
NumFeatures = static_cast<int>(floor(Length / classify_pico_feature_length + 0.5));
if (NumFeatures < 1) {
NumFeatures = 1;
}
/* compute vector for one pico feature */
Delta.x = XDelta(*Start, *End) / NumFeatures;
Delta.y = YDelta(*Start, *End) / NumFeatures;
/* compute position of first pico feature */
Center.x = Start->x + Delta.x / 2.0;
Center.y = Start->y + Delta.y / 2.0;
/* compute each pico feature in segment and add to feature set */
for (i = 0; i < NumFeatures; i++) {
auto Feature = new FEATURE_STRUCT(&PicoFeatDesc);
Feature->Params[PicoFeatDir] = Angle;
Feature->Params[PicoFeatX] = Center.x;
Feature->Params[PicoFeatY] = Center.y;
AddFeature(FeatureSet, Feature);
Center.x += Delta.x;
Center.y += Delta.y;
}
} /* ConvertSegmentToPicoFeat */
/*---------------------------------------------------------------------------*/
/**
* This routine steps through the specified outline and cuts it
* up into pieces of equal length. These pieces become the
* desired pico-features. Each segment in the outline
* is converted into an integral number of pico-features.
* Results are returned in FeatureSet.
*
* Globals:
* - classify_pico_feature_length length of features to be extracted
* @param Outline outline to extract micro-features from
* @param FeatureSet set of features to add pico-features to
*/
void ConvertToPicoFeatures2(MFOUTLINE Outline, FEATURE_SET FeatureSet) {
MFOUTLINE Next;
MFOUTLINE First;
MFOUTLINE Current;
if (DegenerateOutline(Outline)) {
return;
}
First = Outline;
Current = First;
Next = NextPointAfter(Current);
do {
/* note that an edge is hidden if the ending point of the edge is
marked as hidden. This situation happens because the order of
the outlines is reversed when they are converted from the old
format. In the old format, a hidden edge is marked by the
starting point for that edge. */
if (!(PointAt(Next)->Hidden)) {
ConvertSegmentToPicoFeat(&(PointAt(Current)->Point), &(PointAt(Next)->Point), FeatureSet);
}
Current = Next;
Next = NextPointAfter(Current);
} while (Current != First);
} /* ConvertToPicoFeatures2 */
/*---------------------------------------------------------------------------*/
/**
* This routine computes the average x position over all
* of the pico-features in FeatureSet and then renormalizes
* the pico-features to force this average to be the x origin
* (i.e. x=0).
* FeatureSet is changed.
* @param FeatureSet pico-features to be normalized
*/
void NormalizePicoX(FEATURE_SET FeatureSet) {
int i;
FEATURE Feature;
float Origin = 0.0;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Origin += Feature->Params[PicoFeatX];
}
Origin /= FeatureSet->NumFeatures;
for (i = 0; i < FeatureSet->NumFeatures; i++) {
Feature = FeatureSet->Features[i];
Feature->Params[PicoFeatX] -= Origin;
}
} /* NormalizePicoX */
/*---------------------------------------------------------------------------*/
/**
* @param blob blob to extract features from
* @param fx_info
* @return Integer character-normalized features for blob.
*/
FEATURE_SET Classify::ExtractIntCNFeatures(const TBLOB &blob, const INT_FX_RESULT_STRUCT &fx_info) {
INT_FX_RESULT_STRUCT local_fx_info(fx_info);
std::vector<INT_FEATURE_STRUCT> bl_features;
tesseract::TrainingSample *sample =
tesseract::BlobToTrainingSample(blob, false, &local_fx_info, &bl_features);
if (sample == nullptr) {
return nullptr;
}
uint32_t num_features = sample->num_features();
const INT_FEATURE_STRUCT *features = sample->features();
auto feature_set = new FEATURE_SET_STRUCT(num_features);
for (uint32_t f = 0; f < num_features; ++f) {
auto feature = new FEATURE_STRUCT(&IntFeatDesc);
feature->Params[IntX] = features[f].X;
feature->Params[IntY] = features[f].Y;
feature->Params[IntDir] = features[f].Theta;
AddFeature(feature_set, feature);
}
delete sample;
return feature_set;
} /* ExtractIntCNFeatures */
/*---------------------------------------------------------------------------*/
/**
* @param blob blob to extract features from
* @param fx_info
* @return Geometric (top/bottom/width) features for blob.
*/
FEATURE_SET Classify::ExtractIntGeoFeatures(const TBLOB &blob,
const INT_FX_RESULT_STRUCT &fx_info) {
INT_FX_RESULT_STRUCT local_fx_info(fx_info);
std::vector<INT_FEATURE_STRUCT> bl_features;
tesseract::TrainingSample *sample =
tesseract::BlobToTrainingSample(blob, false, &local_fx_info, &bl_features);
if (sample == nullptr) {
return nullptr;
}
auto feature_set = new FEATURE_SET_STRUCT(1);
auto feature = new FEATURE_STRUCT(&IntFeatDesc);
feature->Params[GeoBottom] = sample->geo_feature(GeoBottom);
feature->Params[GeoTop] = sample->geo_feature(GeoTop);
feature->Params[GeoWidth] = sample->geo_feature(GeoWidth);
AddFeature(feature_set, feature);
delete sample;
return feature_set;
} /* ExtractIntGeoFeatures */
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/classify/picofeat.cpp
|
C++
|
apache-2.0
| 9,006
|
/******************************************************************************
** Filename: picofeat.h
** Purpose: Definition of pico features.
** Author: Dan Johnson
** History: 9/4/90, DSJ, Created.
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef PICOFEAT_H
#define PICOFEAT_H
#include "ocrfeatures.h"
#include "params.h"
namespace tesseract {
// Enum for the order/type of params in IntFeatDesc.
enum IntParams {
IntX, // x-position (0-255).
IntY, // y-position (0-255).
IntDir // Direction (0-255, circular).
};
// Enum for the order/type of params in GeoFeatDesc.
enum GeoParams {
GeoBottom, // Bounding box bottom in baseline space (0-255).
GeoTop, // Bounding box top in baseline space (0-255).
GeoWidth, // Bounding box width in baseline space (0-255).
GeoCount // Number of geo features.
};
typedef enum { PicoFeatY, PicoFeatDir, PicoFeatX } PICO_FEAT_PARAM_NAME;
#define MAX_PICO_FEATURES (1000)
/*---------------------------------------------------------------------------
Variables
----------------------------------------------------------------------------*/
extern double_VAR_H(classify_pico_feature_length);
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
#define GetPicoFeatureLength() (PicoFeatureLength)
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
extern TESS_API float PicoFeatureLength;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/picofeat.h
|
C++
|
apache-2.0
| 2,367
|
/******************************************************************************
*
* File: protos.cpp (Formerly protos.c)
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#define _USE_MATH_DEFINES // for M_PI
#include "protos.h"
#include "classify.h"
#include "intproto.h"
#include "params.h"
#include "tprintf.h"
#include <cmath> // for M_PI
#include <cstdio>
namespace tesseract {
#define PROTO_INCREMENT 32
#define CONFIG_INCREMENT 16
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**
* @name AddConfigToClass
*
* Add a new config to this class. Malloc new space and copy the
* old configs if necessary. Return the config id for the new config.
*
* @param Class The class to add to
*/
int AddConfigToClass(CLASS_TYPE Class) {
int NewNumConfigs;
int NewConfig;
int MaxNumProtos;
BIT_VECTOR Config;
MaxNumProtos = Class->MaxNumProtos;
ASSERT_HOST(MaxNumProtos <= MAX_NUM_PROTOS);
if (Class->NumConfigs >= Class->MaxNumConfigs) {
/* add configs in CONFIG_INCREMENT chunks at a time */
NewNumConfigs =
(((Class->MaxNumConfigs + CONFIG_INCREMENT) / CONFIG_INCREMENT) * CONFIG_INCREMENT);
Class->Configurations.resize(NewNumConfigs);
Class->MaxNumConfigs = NewNumConfigs;
}
NewConfig = Class->NumConfigs++;
Config = NewBitVector(MAX_NUM_PROTOS);
Class->Configurations[NewConfig] = Config;
zero_all_bits(Config, WordsInVectorOfSize(MAX_NUM_PROTOS));
return (NewConfig);
}
/**
* @name AddProtoToClass
*
* Add a new proto to this class. Malloc new space and copy the
* old protos if necessary. Return the proto id for the new proto.
*
* @param Class The class to add to
*/
int AddProtoToClass(CLASS_TYPE Class) {
if (Class->NumProtos >= Class->MaxNumProtos) {
/* add protos in PROTO_INCREMENT chunks at a time */
int NewNumProtos =
(((Class->MaxNumProtos + PROTO_INCREMENT) / PROTO_INCREMENT) * PROTO_INCREMENT);
Class->Prototypes.resize(NewNumProtos);
Class->MaxNumProtos = NewNumProtos;
ASSERT_HOST(NewNumProtos <= MAX_NUM_PROTOS);
}
int NewProto = Class->NumProtos++;
ASSERT_HOST(Class->NumProtos <= MAX_NUM_PROTOS);
return (NewProto);
}
/**********************************************************************
* FillABC
*
* Fill in Protos A, B, C fields based on the X, Y, Angle fields.
**********************************************************************/
void FillABC(PROTO_STRUCT *Proto) {
float Slope, Intercept, Normalizer;
Slope = tan(Proto->Angle * 2.0 * M_PI);
Intercept = Proto->Y - Slope * Proto->X;
Normalizer = 1.0 / sqrt(Slope * Slope + 1.0);
Proto->A = Slope * Normalizer;
Proto->B = -Normalizer;
Proto->C = Intercept * Normalizer;
}
/**********************************************************************
* FreeClass
*
* Deallocate the memory consumed by the specified class.
**********************************************************************/
void FreeClass(CLASS_TYPE Class) {
if (Class) {
FreeClassFields(Class);
delete Class;
}
}
/**********************************************************************
* FreeClassFields
*
* Deallocate the memory consumed by subfields of the specified class.
**********************************************************************/
void FreeClassFields(CLASS_TYPE Class) {
if (Class) {
for (int i = 0; i < Class->NumConfigs; i++) {
FreeBitVector(Class->Configurations[i]);
}
}
}
/**********************************************************************
* NewClass
*
* Allocate a new class with enough memory to hold the specified number
* of prototypes and configurations.
**********************************************************************/
CLASS_TYPE NewClass(int NumProtos, int NumConfigs) {
CLASS_TYPE Class;
Class = new CLASS_STRUCT;
Class->Prototypes.resize(NumProtos);
Class->Configurations.resize(NumConfigs);
Class->MaxNumProtos = NumProtos;
Class->MaxNumConfigs = NumConfigs;
Class->NumProtos = 0;
Class->NumConfigs = 0;
return (Class);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/protos.cpp
|
C++
|
apache-2.0
| 5,035
|
/******************************************************************************
*
* File: protos.h
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef PROTOS_H
#define PROTOS_H
#include "bitvec.h"
#include "params.h"
#include "unicity_table.h"
#include <tesseract/unichar.h>
namespace tesseract {
struct PROTO_STRUCT {
float A;
float B;
float C;
float X;
float Y;
float Angle;
float Length;
};
struct CLASS_STRUCT {
int16_t NumProtos = 0;
int16_t MaxNumProtos = 0;
int16_t NumConfigs = 0;
int16_t MaxNumConfigs = 0;
std::vector<PROTO_STRUCT> Prototypes;
std::vector<BIT_VECTOR> Configurations;
UnicityTable<int> font_set;
};
using CLASS_TYPE = CLASS_STRUCT *;
using CLASSES = CLASS_STRUCT *;
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**
* AddProtoToConfig
*
* Set a single proto bit in the specified configuration.
*/
#define AddProtoToConfig(Pid, Config) (SET_BIT(Config, Pid))
/**
* ProtoIn
*
* Choose the selected prototype in this class record. Return the
* pointer to it (PROTO_STRUCT *).
*/
#define ProtoIn(Class, Pid) (&(Class)->Prototypes[Pid])
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
TESS_API
int AddConfigToClass(CLASS_TYPE Class);
TESS_API
int AddProtoToClass(CLASS_TYPE Class);
TESS_API
void FillABC(PROTO_STRUCT *Proto);
TESS_API
void FreeClass(CLASS_TYPE Class);
TESS_API
void FreeClassFields(CLASS_TYPE Class);
void InitPrototypes();
TESS_API
CLASS_TYPE NewClass(int NumProtos, int NumConfigs);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/protos.h
|
C++
|
apache-2.0
| 2,468
|
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapeclassifier.cpp
// Description: Base interface class for classifiers that return a
// shape index.
// Author: Ray Smith
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "shapeclassifier.h"
#include "scrollview.h"
#include "shapetable.h"
#ifndef GRAPHICS_DISABLED
#include "svmnode.h"
#endif
#include "tprintf.h"
#include "trainingsample.h"
namespace tesseract {
// Classifies the given [training] sample, writing to results.
// See shapeclassifier.h for a full description.
// Default implementation calls the ShapeRating version.
int ShapeClassifier::UnicharClassifySample(const TrainingSample &sample, Image page_pix, int debug,
UNICHAR_ID keep_this,
std::vector<UnicharRating> *results) {
results->clear();
std::vector<ShapeRating> shape_results;
int num_shape_results = ClassifySample(sample, page_pix, debug, keep_this, &shape_results);
const ShapeTable *shapes = GetShapeTable();
std::vector<int> unichar_map(shapes->unicharset().size(), -1);
for (int r = 0; r < num_shape_results; ++r) {
shapes->AddShapeToResults(shape_results[r], &unichar_map, results);
}
return results->size();
}
// Classifies the given [training] sample, writing to results.
// See shapeclassifier.h for a full description.
// Default implementation aborts.
int ShapeClassifier::ClassifySample(const TrainingSample &sample, Image page_pix, int debug,
int keep_this, std::vector<ShapeRating> *results) {
ASSERT_HOST("Must implement ClassifySample!" == nullptr);
return 0;
}
// Returns the shape that contains unichar_id that has the best result.
// If result is not nullptr, it is set with the shape_id and rating.
// Does not need to be overridden if ClassifySample respects the keep_this
// rule.
int ShapeClassifier::BestShapeForUnichar(const TrainingSample &sample, Image page_pix,
UNICHAR_ID unichar_id, ShapeRating *result) {
std::vector<ShapeRating> results;
const ShapeTable *shapes = GetShapeTable();
int num_results = ClassifySample(sample, page_pix, 0, unichar_id, &results);
for (int r = 0; r < num_results; ++r) {
if (shapes->GetShape(results[r].shape_id).ContainsUnichar(unichar_id)) {
if (result != nullptr) {
*result = results[r];
}
return results[r].shape_id;
}
}
return -1;
}
// Provides access to the UNICHARSET that this classifier works with.
// Only needs to be overridden if GetShapeTable() can return nullptr.
const UNICHARSET &ShapeClassifier::GetUnicharset() const {
return GetShapeTable()->unicharset();
}
#ifndef GRAPHICS_DISABLED
// Visual debugger classifies the given sample, displays the results and
// solicits user input to display other classifications. Returns when
// the user has finished with debugging the sample.
// Probably doesn't need to be overridden if the subclass provides
// DisplayClassifyAs.
void ShapeClassifier::DebugDisplay(const TrainingSample &sample, Image page_pix,
UNICHAR_ID unichar_id) {
static ScrollView *terminator = nullptr;
if (terminator == nullptr) {
terminator = new ScrollView("XIT", 0, 0, 50, 50, 50, 50, true);
}
ScrollView *debug_win = CreateFeatureSpaceWindow("ClassifierDebug", 0, 0);
// Provide a right-click menu to choose the class.
auto *popup_menu = new SVMenuNode();
popup_menu->AddChild("Choose class to debug", 0, "x", "Class to debug");
popup_menu->BuildMenu(debug_win, false);
// Display the features in green.
const INT_FEATURE_STRUCT *features = sample.features();
uint32_t num_features = sample.num_features();
for (uint32_t f = 0; f < num_features; ++f) {
RenderIntFeature(debug_win, &features[f], ScrollView::GREEN);
}
debug_win->Update();
std::vector<UnicharRating> results;
// Debug classification until the user quits.
const UNICHARSET &unicharset = GetUnicharset();
SVEventType ev_type;
do {
std::vector<ScrollView *> windows;
if (unichar_id >= 0) {
tprintf("Debugging class %d = %s\n", unichar_id, unicharset.id_to_unichar(unichar_id));
UnicharClassifySample(sample, page_pix, 1, unichar_id, &results);
DisplayClassifyAs(sample, page_pix, unichar_id, 1, windows);
} else {
tprintf("Invalid unichar_id: %d\n", unichar_id);
UnicharClassifySample(sample, page_pix, 1, -1, &results);
}
if (unichar_id >= 0) {
tprintf("Debugged class %d = %s\n", unichar_id, unicharset.id_to_unichar(unichar_id));
}
tprintf("Right-click in ClassifierDebug window to choose debug class,");
tprintf(" Left-click or close window to quit...\n");
UNICHAR_ID old_unichar_id;
do {
old_unichar_id = unichar_id;
auto ev = debug_win->AwaitEvent(SVET_ANY);
ev_type = ev->type;
if (ev_type == SVET_POPUP) {
if (unicharset.contains_unichar(ev->parameter)) {
unichar_id = unicharset.unichar_to_id(ev->parameter);
} else {
tprintf("Char class '%s' not found in unicharset", ev->parameter);
}
}
} while (unichar_id == old_unichar_id && ev_type != SVET_CLICK && ev_type != SVET_DESTROY);
for (auto window : windows) {
delete window;
}
} while (ev_type != SVET_CLICK && ev_type != SVET_DESTROY);
delete debug_win;
}
#endif // !GRAPHICS_DISABLED
// Displays classification as the given shape_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
int ShapeClassifier::DisplayClassifyAs(const TrainingSample &sample, Image page_pix,
UNICHAR_ID unichar_id, int index,
std::vector<ScrollView *> &windows) {
// Does nothing in the default implementation.
return index;
}
// Prints debug information on the results.
void ShapeClassifier::UnicharPrintResults(const char *context,
const std::vector<UnicharRating> &results) const {
tprintf("%s\n", context);
for (const auto &result : results) {
tprintf("%g: c_id=%d=%s", result.rating, result.unichar_id,
GetUnicharset().id_to_unichar(result.unichar_id));
if (!result.fonts.empty()) {
tprintf(" Font Vector:");
for (auto &&font : result.fonts) {
tprintf(" %d", font.fontinfo_id);
}
}
tprintf("\n");
}
}
void ShapeClassifier::PrintResults(const char *context,
const std::vector<ShapeRating> &results) const {
tprintf("%s\n", context);
for (const auto &result : results) {
tprintf("%g:", result.rating);
if (result.joined) {
tprintf("[J]");
}
if (result.broken) {
tprintf("[B]");
}
tprintf(" %s\n", GetShapeTable()->DebugStr(result.shape_id).c_str());
}
}
// Removes any result that has all its unichars covered by a better choice,
// regardless of font.
void ShapeClassifier::FilterDuplicateUnichars(std::vector<ShapeRating> *results) const {
std::vector<ShapeRating> filtered_results;
// Copy results to filtered results and knock out duplicate unichars.
const ShapeTable *shapes = GetShapeTable();
for (unsigned r = 0; r < results->size(); ++r) {
if (r > 0) {
const Shape &shape_r = shapes->GetShape((*results)[r].shape_id);
int c;
for (c = 0; c < shape_r.size(); ++c) {
int unichar_id = shape_r[c].unichar_id;
unsigned s;
for (s = 0; s < r; ++s) {
const Shape &shape_s = shapes->GetShape((*results)[s].shape_id);
if (shape_s.ContainsUnichar(unichar_id)) {
break; // We found unichar_id.
}
}
if (s == r) {
break; // We didn't find unichar_id.
}
}
if (c == shape_r.size()) {
continue; // We found all the unichar ids in previous answers.
}
}
filtered_results.push_back((*results)[r]);
}
*results = std::move(filtered_results);
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/classify/shapeclassifier.cpp
|
C++
|
apache-2.0
| 9,082
|
///////////////////////////////////////////////////////////////////////
// File: shapeclassifier.h
// Description: Base interface class for classifiers that return a
// shape index.
// Author: Ray Smith
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_SHAPECLASSIFIER_H_
#define TESSERACT_CLASSIFY_SHAPECLASSIFIER_H_
#include "image.h"
#include <tesseract/unichar.h>
#include <vector>
struct Pix;
namespace tesseract {
class ScrollView;
class UNICHARSET;
struct ShapeRating;
class ShapeTable;
class TrainingSample;
class TrainingSampleSet;
struct UnicharRating;
// Interface base class for classifiers that produce ShapeRating results.
class TESS_API ShapeClassifier {
public:
virtual ~ShapeClassifier() = default;
// Classifies the given [training] sample, writing to results.
// If page_pix is not nullptr, the overriding function may call
// sample.GetSamplePix(padding, page_pix) to get an image of the sample
// padded (with real image data) by the given padding to extract features
// from the image of the character. Other members of TrainingSample:
// features(), micro_features(), cn_feature(), geo_feature() may be used
// to get the appropriate tesseract features.
// If debug is non-zero, then various degrees of classifier dependent debug
// information is provided.
// If keep_this (a UNICHAR_ID) is >= 0, then the results should always
// contain keep_this, and (if possible) anything of intermediate confidence.
// (Used for answering "Why didn't it get that right?" questions.) It must
// be a UNICHAR_ID as the callers have no clue how to choose the best shape
// that may contain a desired answer.
// The return value is the number of classes saved in results.
// NOTE that overriding functions MUST clear and sort the results by
// descending rating unless the classifier is working with a team of such
// classifiers.
// NOTE: Neither overload of ClassifySample is pure, but at least one must
// be overridden by a classifier in order for it to do anything.
virtual int UnicharClassifySample(const TrainingSample &sample, Image page_pix, int debug,
UNICHAR_ID keep_this, std::vector<UnicharRating> *results);
protected:
virtual int ClassifySample(const TrainingSample &sample, Image page_pix, int debug,
UNICHAR_ID keep_this, std::vector<ShapeRating> *results);
public:
// Returns the shape that contains unichar_id that has the best result.
// If result is not nullptr, it is set with the shape_id and rating.
// Returns -1 if ClassifySample fails to provide any result containing
// unichar_id. BestShapeForUnichar does not need to be overridden if
// ClassifySample respects the keep_this rule.
virtual int BestShapeForUnichar(const TrainingSample &sample, Image page_pix,
UNICHAR_ID unichar_id, ShapeRating *result);
// Provides access to the ShapeTable that this classifier works with.
virtual const ShapeTable *GetShapeTable() const = 0;
// Provides access to the UNICHARSET that this classifier works with.
// Must be overridden IFF GetShapeTable() returns nullptr.
virtual const UNICHARSET &GetUnicharset() const;
// Visual debugger classifies the given sample, displays the results and
// solicits user input to display other classifications. Returns when
// the user has finished with debugging the sample.
// Probably doesn't need to be overridden if the subclass provides
// DisplayClassifyAs.
void DebugDisplay(const TrainingSample &sample, Image page_pix, UNICHAR_ID unichar_id);
// Displays classification as the given unichar_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
virtual int DisplayClassifyAs(const TrainingSample &sample, Image page_pix, UNICHAR_ID unichar_id,
int index, std::vector<ScrollView *> &windows);
// Prints debug information on the results. context is some introductory/title
// message.
virtual void UnicharPrintResults(const char *context,
const std::vector<UnicharRating> &results) const;
virtual void PrintResults(const char *context, const std::vector<ShapeRating> &results) const;
protected:
// Removes any result that has all its unichars covered by a better choice,
// regardless of font.
void FilterDuplicateUnichars(std::vector<ShapeRating> *results) const;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_SHAPECLASSIFIER_H_
|
2301_81045437/tesseract
|
src/classify/shapeclassifier.h
|
C++
|
apache-2.0
| 5,412
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapetable.cpp
// Description: Class to map a classifier shape index to unicharset
// indices and font indices.
// Author: Ray Smith
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "shapetable.h"
#include "bitvector.h"
#include "fontinfo.h"
#include "intfeaturespace.h"
#include "unicharset.h"
#include "unicity_table.h"
#include <algorithm>
namespace tesseract {
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
int ShapeRating::FirstResultWithUnichar(const std::vector<ShapeRating> &results,
const ShapeTable &shape_table, UNICHAR_ID unichar_id) {
for (unsigned r = 0; r < results.size(); ++r) {
const auto shape_id = results[r].shape_id;
const Shape &shape = shape_table.GetShape(shape_id);
if (shape.ContainsUnichar(unichar_id)) {
return r;
}
}
return -1;
}
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
int UnicharRating::FirstResultWithUnichar(const std::vector<UnicharRating> &results,
UNICHAR_ID unichar_id) {
for (unsigned r = 0; r < results.size(); ++r) {
if (results[r].unichar_id == unichar_id) {
return r;
}
}
return -1;
}
// Writes to the given file. Returns false in case of error.
bool UnicharAndFonts::Serialize(FILE *fp) const {
return tesseract::Serialize(fp, &unichar_id) && tesseract::Serialize(fp, font_ids);
}
// Reads from the given file. Returns false in case of error.
bool UnicharAndFonts::DeSerialize(TFile *fp) {
return fp->DeSerialize(&unichar_id) && fp->DeSerialize(font_ids);
}
// Sort function to sort a pair of UnicharAndFonts by unichar_id.
int UnicharAndFonts::SortByUnicharId(const void *v1, const void *v2) {
const auto *p1 = static_cast<const UnicharAndFonts *>(v1);
const auto *p2 = static_cast<const UnicharAndFonts *>(v2);
return p1->unichar_id - p2->unichar_id;
}
bool UnicharAndFonts::StdSortByUnicharId(const UnicharAndFonts &v1, const UnicharAndFonts &v2) {
return v1.unichar_id < v2.unichar_id;
}
// Writes to the given file. Returns false in case of error.
bool Shape::Serialize(FILE *fp) const {
uint8_t sorted = unichars_sorted_;
return tesseract::Serialize(fp, &sorted) && tesseract::Serialize(fp, unichars_);
}
// Reads from the given file. Returns false in case of error.
bool Shape::DeSerialize(TFile *fp) {
uint8_t sorted;
if (!fp->DeSerialize(&sorted)) {
return false;
}
unichars_sorted_ = sorted != 0;
return fp->DeSerialize(unichars_);
}
// Adds a font_id for the given unichar_id. If the unichar_id is not
// in the shape, it is added.
void Shape::AddToShape(int unichar_id, int font_id) {
for (auto &unichar : unichars_) {
if (unichar.unichar_id == unichar_id) {
// Found the unichar in the shape table.
std::vector<int> &font_list = unichar.font_ids;
for (int f : font_list) {
if (f == font_id) {
return; // Font is already there.
}
}
font_list.push_back(font_id);
return;
}
}
// Unichar_id is not in shape, so add it to shape.
unichars_.emplace_back(unichar_id, font_id);
unichars_sorted_ = unichars_.size() <= 1;
}
// Adds everything in other to this.
void Shape::AddShape(const Shape &other) {
for (const auto &unichar : other.unichars_) {
for (unsigned f = 0; f < unichar.font_ids.size(); ++f) {
AddToShape(unichar.unichar_id, unichar.font_ids[f]);
}
}
unichars_sorted_ = unichars_.size() <= 1;
}
// Returns true if the shape contains the given unichar_id, font_id pair.
bool Shape::ContainsUnicharAndFont(int unichar_id, int font_id) const {
for (const auto &unichar : unichars_) {
if (unichar.unichar_id == unichar_id) {
// Found the unichar, so look for the font.
auto &font_list = unichar.font_ids;
for (int f : font_list) {
if (f == font_id) {
return true;
}
}
return false;
}
}
return false;
}
// Returns true if the shape contains the given unichar_id, ignoring font.
bool Shape::ContainsUnichar(int unichar_id) const {
for (const auto &unichar : unichars_) {
if (unichar.unichar_id == unichar_id) {
return true;
}
}
return false;
}
// Returns true if the shape contains the given font, ignoring unichar_id.
bool Shape::ContainsFont(int font_id) const {
for (const auto &unichar : unichars_) {
auto &font_list = unichar.font_ids;
for (int f : font_list) {
if (f == font_id) {
return true;
}
}
}
return false;
}
// Returns true if the shape contains the given font properties, ignoring
// unichar_id.
bool Shape::ContainsFontProperties(const FontInfoTable &font_table, uint32_t properties) const {
for (const auto &unichar : unichars_) {
auto &font_list = unichar.font_ids;
for (int f : font_list) {
if (font_table.at(f).properties == properties) {
return true;
}
}
}
return false;
}
// Returns true if the shape contains multiple different font properties,
// ignoring unichar_id.
bool Shape::ContainsMultipleFontProperties(const FontInfoTable &font_table) const {
uint32_t properties = font_table.at(unichars_[0].font_ids[0]).properties;
for (const auto &unichar : unichars_) {
auto &font_list = unichar.font_ids;
for (int f : font_list) {
if (font_table.at(f).properties != properties) {
return true;
}
}
}
return false;
}
// Returns true if this shape is equal to other (ignoring order of unichars
// and fonts).
bool Shape::operator==(const Shape &other) const {
return IsSubsetOf(other) && other.IsSubsetOf(*this);
}
// Returns true if this is a subset (including equal) of other.
bool Shape::IsSubsetOf(const Shape &other) const {
for (const auto &unichar : unichars_) {
int unichar_id = unichar.unichar_id;
const std::vector<int> &font_list = unichar.font_ids;
for (int f : font_list) {
if (!other.ContainsUnicharAndFont(unichar_id, f)) {
return false;
}
}
}
return true;
}
// Returns true if the lists of unichar ids are the same in this and other,
// ignoring fonts.
// NOT const, as it will sort the unichars on demand.
bool Shape::IsEqualUnichars(Shape *other) {
if (unichars_.size() != other->unichars_.size()) {
return false;
}
if (!unichars_sorted_) {
SortUnichars();
}
if (!other->unichars_sorted_) {
other->SortUnichars();
}
for (unsigned c = 0; c < unichars_.size(); ++c) {
if (unichars_[c].unichar_id != other->unichars_[c].unichar_id) {
return false;
}
}
return true;
}
// Sorts the unichars_ vector by unichar.
void Shape::SortUnichars() {
std::sort(unichars_.begin(), unichars_.end(), UnicharAndFonts::StdSortByUnicharId);
unichars_sorted_ = true;
}
ShapeTable::ShapeTable() : unicharset_(nullptr), num_fonts_(0) {}
ShapeTable::ShapeTable(const UNICHARSET &unicharset) : unicharset_(&unicharset), num_fonts_(0) {}
// Writes to the given file. Returns false in case of error.
bool ShapeTable::Serialize(FILE *fp) const {
return tesseract::Serialize(fp, shape_table_);
}
// Reads from the given file. Returns false in case of error.
bool ShapeTable::DeSerialize(TFile *fp) {
if (!fp->DeSerialize(shape_table_)) {
return false;
}
num_fonts_ = 0;
return true;
}
// Returns the number of fonts used in this ShapeTable, computing it if
// necessary.
int ShapeTable::NumFonts() const {
if (num_fonts_ <= 0) {
for (auto shape_id : shape_table_) {
const Shape &shape = *shape_id;
for (int c = 0; c < shape.size(); ++c) {
for (int font_id : shape[c].font_ids) {
if (font_id >= num_fonts_) {
num_fonts_ = font_id + 1;
}
}
}
}
}
return num_fonts_;
}
// Re-indexes the class_ids in the shapetable according to the given map.
// Useful in conjunction with set_unicharset.
void ShapeTable::ReMapClassIds(const std::vector<int> &unicharset_map) {
for (auto shape : shape_table_) {
for (int c = 0; c < shape->size(); ++c) {
shape->SetUnicharId(c, unicharset_map[(*shape)[c].unichar_id]);
}
}
}
// Returns a string listing the classes/fonts in a shape.
std::string ShapeTable::DebugStr(unsigned shape_id) const {
if (shape_id >= shape_table_.size()) {
return "INVALID_UNICHAR_ID";
}
const Shape &shape = GetShape(shape_id);
std::string result;
result += "Shape" + std::to_string(shape_id);
if (shape.size() > 100) {
result += " Num unichars=" + std::to_string(shape.size());
return result;
}
for (int c = 0; c < shape.size(); ++c) {
result += " c_id=" + std::to_string(shape[c].unichar_id);
result += "=";
result += unicharset_->id_to_unichar(shape[c].unichar_id);
if (shape.size() < 10) {
result += ", " + std::to_string(shape[c].font_ids.size());
result += " fonts =";
int num_fonts = shape[c].font_ids.size();
if (num_fonts > 10) {
result += " " + std::to_string(shape[c].font_ids[0]);
result += " ... " + std::to_string(shape[c].font_ids[num_fonts - 1]);
} else {
for (int f = 0; f < num_fonts; ++f) {
result += " " + std::to_string(shape[c].font_ids[f]);
}
}
}
}
return result;
}
// Returns a debug string summarizing the table.
std::string ShapeTable::SummaryStr() const {
int max_unichars = 0;
int num_multi_shapes = 0;
int num_master_shapes = 0;
for (unsigned s = 0; s < shape_table_.size(); ++s) {
if (MasterDestinationIndex(s) != s) {
continue;
}
++num_master_shapes;
int shape_size = GetShape(s).size();
if (shape_size > 1) {
++num_multi_shapes;
}
if (shape_size > max_unichars) {
max_unichars = shape_size;
}
}
std::string result;
result += "Number of shapes = " + std::to_string(num_master_shapes);
result += " max unichars = " + std::to_string(max_unichars);
result += " number with multiple unichars = " + std::to_string(num_multi_shapes);
return result;
}
// Adds a new shape starting with the given unichar_id and font_id.
// Returns the assigned index.
unsigned ShapeTable::AddShape(int unichar_id, int font_id) {
auto index = shape_table_.size();
auto *shape = new Shape;
shape->AddToShape(unichar_id, font_id);
shape_table_.push_back(shape);
num_fonts_ = std::max(num_fonts_, font_id + 1);
return index;
}
// Adds a copy of the given shape unless it is already present.
// Returns the assigned index or index of existing shape if already present.
unsigned ShapeTable::AddShape(const Shape &other) {
unsigned index;
for (index = 0; index < shape_table_.size() && !(other == *shape_table_[index]); ++index) {
continue;
}
if (index == shape_table_.size()) {
auto *shape = new Shape(other);
shape_table_.push_back(shape);
}
num_fonts_ = 0;
return index;
}
// Removes the shape given by the shape index.
void ShapeTable::DeleteShape(unsigned shape_id) {
delete shape_table_[shape_id];
shape_table_.erase(shape_table_.begin() + shape_id);
}
// Adds a font_id to the given existing shape index for the given
// unichar_id. If the unichar_id is not in the shape, it is added.
void ShapeTable::AddToShape(unsigned shape_id, int unichar_id, int font_id) {
Shape &shape = *shape_table_[shape_id];
shape.AddToShape(unichar_id, font_id);
num_fonts_ = std::max(num_fonts_, font_id + 1);
}
// Adds the given shape to the existing shape with the given index.
void ShapeTable::AddShapeToShape(unsigned shape_id, const Shape &other) {
Shape &shape = *shape_table_[shape_id];
shape.AddShape(other);
num_fonts_ = 0;
}
// Returns the id of the shape that contains the given unichar and font.
// If not found, returns -1.
// If font_id < 0, the font_id is ignored and the first shape that matches
// the unichar_id is returned.
int ShapeTable::FindShape(int unichar_id, int font_id) const {
for (unsigned s = 0; s < shape_table_.size(); ++s) {
const Shape &shape = GetShape(s);
for (int c = 0; c < shape.size(); ++c) {
if (shape[c].unichar_id == unichar_id) {
if (font_id < 0) {
return s; // We don't care about the font.
}
for (int f : shape[c].font_ids) {
if (f == font_id) {
return s;
}
}
}
}
}
return -1;
}
// Returns the first unichar_id and font_id in the given shape.
void ShapeTable::GetFirstUnicharAndFont(unsigned shape_id, int *unichar_id, int *font_id) const {
const UnicharAndFonts &unichar_and_fonts = (*shape_table_[shape_id])[0];
*unichar_id = unichar_and_fonts.unichar_id;
*font_id = unichar_and_fonts.font_ids[0];
}
// Expands all the classes/fonts in the shape individually to build
// a ShapeTable.
int ShapeTable::BuildFromShape(const Shape &shape, const ShapeTable &master_shapes) {
BitVector shape_map(master_shapes.NumShapes());
for (int u_ind = 0; u_ind < shape.size(); ++u_ind) {
for (unsigned f_ind = 0; f_ind < shape[u_ind].font_ids.size(); ++f_ind) {
int c = shape[u_ind].unichar_id;
int f = shape[u_ind].font_ids[f_ind];
int master_id = master_shapes.FindShape(c, f);
if (master_id >= 0) {
shape_map.SetBit(master_id);
} else if (FindShape(c, f) < 0) {
AddShape(c, f);
}
}
}
int num_masters = 0;
for (unsigned s = 0; s < master_shapes.NumShapes(); ++s) {
if (shape_map[s]) {
AddShape(master_shapes.GetShape(s));
++num_masters;
}
}
return num_masters;
}
// Returns true if the shapes are already merged.
bool ShapeTable::AlreadyMerged(unsigned shape_id1, unsigned shape_id2) const {
return MasterDestinationIndex(shape_id1) == MasterDestinationIndex(shape_id2);
}
// Returns true if any shape contains multiple unichars.
bool ShapeTable::AnyMultipleUnichars() const {
auto num_shapes = NumShapes();
for (unsigned s1 = 0; s1 < num_shapes; ++s1) {
if (MasterDestinationIndex(s1) != s1) {
continue;
}
if (GetShape(s1).size() > 1) {
return true;
}
}
return false;
}
// Returns the maximum number of unichars over all shapes.
int ShapeTable::MaxNumUnichars() const {
int max_num_unichars = 0;
int num_shapes = NumShapes();
for (int s = 0; s < num_shapes; ++s) {
if (GetShape(s).size() > max_num_unichars) {
max_num_unichars = GetShape(s).size();
}
}
return max_num_unichars;
}
// Merges shapes with a common unichar over the [start, end) interval.
// Assumes single unichar per shape.
void ShapeTable::ForceFontMerges(unsigned start, unsigned end) {
for (unsigned s1 = start; s1 < end; ++s1) {
if (MasterDestinationIndex(s1) == s1 && GetShape(s1).size() == 1) {
int unichar_id = GetShape(s1)[0].unichar_id;
for (auto s2 = s1 + 1; s2 < end; ++s2) {
if (MasterDestinationIndex(s2) == s2 && GetShape(s2).size() == 1 &&
unichar_id == GetShape(s2)[0].unichar_id) {
MergeShapes(s1, s2);
}
}
}
}
ShapeTable compacted(*unicharset_);
compacted.AppendMasterShapes(*this, nullptr);
*this = compacted;
}
// Returns the number of unichars in the master shape.
unsigned ShapeTable::MasterUnicharCount(unsigned shape_id) const {
int master_id = MasterDestinationIndex(shape_id);
return GetShape(master_id).size();
}
// Returns the sum of the font counts in the master shape.
int ShapeTable::MasterFontCount(unsigned shape_id) const {
int master_id = MasterDestinationIndex(shape_id);
const Shape &shape = GetShape(master_id);
int font_count = 0;
for (int c = 0; c < shape.size(); ++c) {
font_count += shape[c].font_ids.size();
}
return font_count;
}
// Returns the number of unichars that would result from merging the shapes.
int ShapeTable::MergedUnicharCount(unsigned shape_id1, unsigned shape_id2) const {
// Do it the easy way for now.
int master_id1 = MasterDestinationIndex(shape_id1);
int master_id2 = MasterDestinationIndex(shape_id2);
Shape combined_shape(*shape_table_[master_id1]);
combined_shape.AddShape(*shape_table_[master_id2]);
return combined_shape.size();
}
// Merges two shape_ids, leaving shape_id2 marked as merged.
void ShapeTable::MergeShapes(unsigned shape_id1, unsigned shape_id2) {
auto master_id1 = MasterDestinationIndex(shape_id1);
auto master_id2 = MasterDestinationIndex(shape_id2);
// Point master_id2 (and all merged shapes) to master_id1.
shape_table_[master_id2]->set_destination_index(master_id1);
// Add all the shapes of master_id2 to master_id1.
shape_table_[master_id1]->AddShape(*shape_table_[master_id2]);
}
// Swaps two shape_ids.
void ShapeTable::SwapShapes(unsigned shape_id1, unsigned shape_id2) {
Shape *tmp = shape_table_[shape_id1];
shape_table_[shape_id1] = shape_table_[shape_id2];
shape_table_[shape_id2] = tmp;
}
// Returns the destination of this shape, (if merged), taking into account
// the fact that the destination may itself have been merged.
unsigned ShapeTable::MasterDestinationIndex(unsigned shape_id) const {
auto dest_id = shape_table_[shape_id]->destination_index();
if (static_cast<unsigned>(dest_id) == shape_id || dest_id < 0) {
return shape_id; // Is master already.
}
auto master_id = shape_table_[dest_id]->destination_index();
if (master_id == dest_id || master_id < 0) {
return dest_id; // Dest is the master and shape_id points to it.
}
master_id = MasterDestinationIndex(master_id);
return master_id;
}
// Returns false if the unichars in neither shape is a subset of the other.
bool ShapeTable::SubsetUnichar(unsigned shape_id1, unsigned shape_id2) const {
const Shape &shape1 = GetShape(shape_id1);
const Shape &shape2 = GetShape(shape_id2);
int c1, c2;
for (c1 = 0; c1 < shape1.size(); ++c1) {
int unichar_id1 = shape1[c1].unichar_id;
if (!shape2.ContainsUnichar(unichar_id1)) {
break;
}
}
for (c2 = 0; c2 < shape2.size(); ++c2) {
int unichar_id2 = shape2[c2].unichar_id;
if (!shape1.ContainsUnichar(unichar_id2)) {
break;
}
}
return c1 == shape1.size() || c2 == shape2.size();
}
// Returns false if the unichars in neither shape is a subset of the other.
bool ShapeTable::MergeSubsetUnichar(int merge_id1, int merge_id2, unsigned shape_id) const {
const Shape &merge1 = GetShape(merge_id1);
const Shape &merge2 = GetShape(merge_id2);
const Shape &shape = GetShape(shape_id);
int cm1, cm2, cs;
for (cs = 0; cs < shape.size(); ++cs) {
int unichar_id = shape[cs].unichar_id;
if (!merge1.ContainsUnichar(unichar_id) && !merge2.ContainsUnichar(unichar_id)) {
break; // Shape is not a subset of the merge.
}
}
for (cm1 = 0; cm1 < merge1.size(); ++cm1) {
int unichar_id1 = merge1[cm1].unichar_id;
if (!shape.ContainsUnichar(unichar_id1)) {
break; // Merge is not a subset of shape
}
}
for (cm2 = 0; cm2 < merge2.size(); ++cm2) {
int unichar_id2 = merge2[cm2].unichar_id;
if (!shape.ContainsUnichar(unichar_id2)) {
break; // Merge is not a subset of shape
}
}
return cs == shape.size() || (cm1 == merge1.size() && cm2 == merge2.size());
}
// Returns true if the unichar sets are equal between the shapes.
bool ShapeTable::EqualUnichars(unsigned shape_id1, unsigned shape_id2) const {
const Shape &shape1 = GetShape(shape_id1);
const Shape &shape2 = GetShape(shape_id2);
for (int c1 = 0; c1 < shape1.size(); ++c1) {
int unichar_id1 = shape1[c1].unichar_id;
if (!shape2.ContainsUnichar(unichar_id1)) {
return false;
}
}
for (int c2 = 0; c2 < shape2.size(); ++c2) {
int unichar_id2 = shape2[c2].unichar_id;
if (!shape1.ContainsUnichar(unichar_id2)) {
return false;
}
}
return true;
}
// Returns true if the unichar sets are equal between the shapes.
bool ShapeTable::MergeEqualUnichars(int merge_id1, int merge_id2, unsigned shape_id) const {
const Shape &merge1 = GetShape(merge_id1);
const Shape &merge2 = GetShape(merge_id2);
const Shape &shape = GetShape(shape_id);
for (int cs = 0; cs < shape.size(); ++cs) {
int unichar_id = shape[cs].unichar_id;
if (!merge1.ContainsUnichar(unichar_id) && !merge2.ContainsUnichar(unichar_id)) {
return false; // Shape has a unichar that appears in neither merge.
}
}
for (int cm1 = 0; cm1 < merge1.size(); ++cm1) {
int unichar_id1 = merge1[cm1].unichar_id;
if (!shape.ContainsUnichar(unichar_id1)) {
return false; // Merge has a unichar that is not in shape.
}
}
for (int cm2 = 0; cm2 < merge2.size(); ++cm2) {
int unichar_id2 = merge2[cm2].unichar_id;
if (!shape.ContainsUnichar(unichar_id2)) {
return false; // Merge has a unichar that is not in shape.
}
}
return true;
}
// Returns true if there is a common unichar between the shapes.
bool ShapeTable::CommonUnichars(unsigned shape_id1, unsigned shape_id2) const {
const Shape &shape1 = GetShape(shape_id1);
const Shape &shape2 = GetShape(shape_id2);
for (int c1 = 0; c1 < shape1.size(); ++c1) {
int unichar_id1 = shape1[c1].unichar_id;
if (shape2.ContainsUnichar(unichar_id1)) {
return true;
}
}
return false;
}
// Returns true if there is a common font id between the shapes.
bool ShapeTable::CommonFont(unsigned shape_id1, unsigned shape_id2) const {
const Shape &shape1 = GetShape(shape_id1);
const Shape &shape2 = GetShape(shape_id2);
for (int c1 = 0; c1 < shape1.size(); ++c1) {
const std::vector<int> &font_list1 = shape1[c1].font_ids;
for (int f : font_list1) {
if (shape2.ContainsFont(f)) {
return true;
}
}
}
return false;
}
// Appends the master shapes from other to this.
// If not nullptr, shape_map is set to map other shape_ids to this's shape_ids.
void ShapeTable::AppendMasterShapes(const ShapeTable &other, std::vector<int> *shape_map) {
if (shape_map != nullptr) {
shape_map->clear();
shape_map->resize(other.NumShapes(), -1);
}
for (unsigned s = 0; s < other.shape_table_.size(); ++s) {
if (other.shape_table_[s]->destination_index() < 0) {
int index = AddShape(*other.shape_table_[s]);
if (shape_map != nullptr) {
(*shape_map)[s] = index;
}
}
}
}
// Returns the number of master shapes remaining after merging.
int ShapeTable::NumMasterShapes() const {
int num_shapes = 0;
for (auto s : shape_table_) {
if (s->destination_index() < 0) {
++num_shapes;
}
}
return num_shapes;
}
// Adds the unichars of the given shape_id to the vector of results. Any
// unichar_id that is already present just has the fonts added to the
// font set for that result without adding a new entry in the vector.
// NOTE: it is assumed that the results are given to this function in order
// of decreasing rating.
// The unichar_map vector indicates the index of the results entry containing
// each unichar, or -1 if the unichar is not yet included in results.
void ShapeTable::AddShapeToResults(const ShapeRating &shape_rating, std::vector<int> *unichar_map,
std::vector<UnicharRating> *results) const {
if (shape_rating.joined) {
AddUnicharToResults(UNICHAR_JOINED, shape_rating.rating, unichar_map, results);
}
if (shape_rating.broken) {
AddUnicharToResults(UNICHAR_BROKEN, shape_rating.rating, unichar_map, results);
}
const Shape &shape = GetShape(shape_rating.shape_id);
for (int u = 0; u < shape.size(); ++u) {
int result_index =
AddUnicharToResults(shape[u].unichar_id, shape_rating.rating, unichar_map, results);
for (int font_id : shape[u].font_ids) {
(*results)[result_index].fonts.emplace_back(font_id,
IntCastRounded(shape_rating.rating * INT16_MAX));
}
}
}
// Adds the given unichar_id to the results if needed, updating unichar_map
// and returning the index of unichar in results.
int ShapeTable::AddUnicharToResults(int unichar_id, float rating, std::vector<int> *unichar_map,
std::vector<UnicharRating> *results) const {
int result_index = unichar_map->at(unichar_id);
if (result_index < 0) {
UnicharRating result(unichar_id, rating);
result_index = results->size();
results->push_back(result);
(*unichar_map)[unichar_id] = result_index;
}
return result_index;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/shapetable.cpp
|
C++
|
apache-2.0
| 25,430
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: shapetable.h
// Description: Class to map a classifier shape index to unicharset
// indices and font indices.
// Author: Ray Smith
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_SHAPETABLE_H_
#define TESSERACT_CLASSIFY_SHAPETABLE_H_
#include "bitvector.h"
#include "fontinfo.h"
#include "genericheap.h"
#include "intmatcher.h"
namespace tesseract {
class UNICHARSET;
class ShapeTable;
// Simple struct to hold a single classifier unichar selection, a corresponding
// rating, and a list of appropriate fonts.
struct UnicharRating {
UnicharRating() : unichar_id(0), rating(0.0f), adapted(false), config(0), feature_misses(0) {}
UnicharRating(int u, float r)
: unichar_id(u), rating(r), adapted(false), config(0), feature_misses(0) {}
// Print debug info.
void Print() const {
tprintf(
"Unichar-id=%d, rating=%g, adapted=%d, config=%d, misses=%u,"
" %zu fonts\n",
unichar_id, static_cast<double>(rating), adapted, config, feature_misses, fonts.size());
}
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
static int FirstResultWithUnichar(const std::vector<UnicharRating> &results,
UNICHAR_ID unichar_id);
// Index into some UNICHARSET table indicates the class of the answer.
UNICHAR_ID unichar_id;
// Rating from classifier with 1.0 perfect and 0.0 impossible.
// Call it a probability if you must.
float rating;
// True if this result is from the adaptive classifier.
bool adapted;
// Index of best matching font configuration of result.
uint8_t config;
// Number of features that were total misses - were liked by no classes.
uint16_t feature_misses;
// Unsorted collection of fontinfo ids and scores. Note that a raw result
// from the IntegerMatch will contain config ids, that require transforming
// to fontinfo ids via fontsets and (possibly) shapetable.
std::vector<ScoredFont> fonts;
};
// Classifier result from a low-level classification is an index into some
// ShapeTable and a rating.
struct ShapeRating {
ShapeRating() : shape_id(0), rating(0.0f), raw(0.0f), font(0.0f), joined(false), broken(false) {}
ShapeRating(int s, float r)
: shape_id(s), rating(r), raw(1.0f), font(0.0f), joined(false), broken(false) {}
// Helper function to get the index of the first result with the required
// unichar_id. If the results are sorted by rating, this will also be the
// best result with the required unichar_id.
// Returns -1 if the unichar_id is not found
static int FirstResultWithUnichar(const std::vector<ShapeRating> &results,
const ShapeTable &shape_table, UNICHAR_ID unichar_id);
// Index into some shape table indicates the class of the answer.
int shape_id;
// Rating from classifier with 1.0 perfect and 0.0 impossible.
// Call it a probability if you must.
float rating;
// Subsidiary rating that a classifier may use internally.
float raw;
// Subsidiary rating that a classifier may use internally.
float font;
// Flag indicating that the input may be joined.
bool joined;
// Flag indicating that the input may be broken (a fragment).
bool broken;
};
// Simple struct to hold an entry for a heap-based priority queue of
// ShapeRating.
struct ShapeQueueEntry {
ShapeQueueEntry() : result(ShapeRating(0, 0.0f)), level(0) {}
ShapeQueueEntry(const ShapeRating &rating, int level0) : result(rating), level(level0) {}
// Sort by decreasing rating and decreasing level for equal rating.
bool operator<(const ShapeQueueEntry &other) const {
if (result.rating > other.result.rating) {
return true;
}
if (result.rating == other.result.rating) {
return level > other.level;
}
return false;
}
// Output from classifier.
ShapeRating result;
// Which level in the tree did this come from?
int level;
};
using ShapeQueue = GenericHeap<ShapeQueueEntry>;
// Simple struct to hold a set of fonts associated with a single unichar-id.
// A vector of UnicharAndFonts makes a shape.
struct UnicharAndFonts {
UnicharAndFonts() : unichar_id(0) {}
UnicharAndFonts(int uni_id, int font_id) : unichar_id(uni_id) {
font_ids.push_back(font_id);
}
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp);
// Sort function to sort a pair of UnicharAndFonts by unichar_id.
static int SortByUnicharId(const void *v1, const void *v2);
static bool StdSortByUnicharId(const UnicharAndFonts &v1, const UnicharAndFonts &v2);
std::vector<int32_t> font_ids;
int32_t unichar_id;
};
// A Shape is a collection of unichar-ids and a list of fonts associated with
// each, organized as a vector of UnicharAndFonts. Conceptually a Shape is
// a classifiable unit, and represents a group of characters or parts of
// characters that have a similar or identical shape. Shapes/ShapeTables may
// be organized hierarchically from identical shapes at the leaves to vaguely
// similar shapes near the root.
class TESS_API Shape {
public:
Shape() : destination_index_(-1) {}
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp);
int destination_index() const {
return destination_index_;
}
void set_destination_index(int index) {
destination_index_ = index;
}
int size() const {
return unichars_.size();
}
// Returns a UnicharAndFonts entry for the given index, which must be
// in the range [0, size()).
const UnicharAndFonts &operator[](int index) const {
return unichars_[index];
}
// Sets the unichar_id of the given index to the new unichar_id.
void SetUnicharId(int index, int unichar_id) {
unichars_[index].unichar_id = unichar_id;
}
// Adds a font_id for the given unichar_id. If the unichar_id is not
// in the shape, it is added.
void AddToShape(int unichar_id, int font_id);
// Adds everything in other to this.
void AddShape(const Shape &other);
// Returns true if the shape contains the given unichar_id, font_id pair.
bool ContainsUnicharAndFont(int unichar_id, int font_id) const;
// Returns true if the shape contains the given unichar_id, ignoring font.
bool ContainsUnichar(int unichar_id) const;
// Returns true if the shape contains the given font, ignoring unichar_id.
bool ContainsFont(int font_id) const;
// Returns true if the shape contains the given font properties, ignoring
// unichar_id.
bool ContainsFontProperties(const FontInfoTable &font_table, uint32_t properties) const;
// Returns true if the shape contains multiple different font properties,
// ignoring unichar_id.
bool ContainsMultipleFontProperties(const FontInfoTable &font_table) const;
// Returns true if this shape is equal to other (ignoring order of unichars
// and fonts).
bool operator==(const Shape &other) const;
// Returns true if this is a subset (including equal) of other.
bool IsSubsetOf(const Shape &other) const;
// Returns true if the lists of unichar ids are the same in this and other,
// ignoring fonts.
// NOT const, as it will sort the unichars on demand.
bool IsEqualUnichars(Shape *other);
private:
// Sorts the unichars_ vector by unichar.
void SortUnichars();
// Flag indicates that the unichars are sorted, allowing faster set
// operations with another shape.
bool unichars_sorted_ = false;
// If this Shape is part of a ShapeTable the destination_index_ is the index
// of some other shape in the ShapeTable with which this shape is merged.
int destination_index_ = 0;
// Array of unichars, each with a set of fonts. Each unichar has at most
// one entry in the vector.
std::vector<UnicharAndFonts> unichars_;
};
// ShapeTable is a class to encapsulate the triple indirection that is
// used here.
// ShapeTable is a vector of shapes.
// Each shape is a vector of UnicharAndFonts representing the set of unichars
// that the shape represents.
// Each UnicharAndFonts also lists the fonts of the unichar_id that were
// mapped to the shape during training.
class TESS_API ShapeTable {
public:
ShapeTable();
// The UNICHARSET reference supplied here, or in set_unicharset below must
// exist for the entire life of the ShapeTable. It is used only by DebugStr.
explicit ShapeTable(const UNICHARSET &unicharset);
~ShapeTable() {
for (auto data : shape_table_) {
delete data;
}
}
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp);
// Accessors.
unsigned NumShapes() const {
return shape_table_.size();
}
const UNICHARSET &unicharset() const {
return *unicharset_;
}
// Returns the number of fonts used in this ShapeTable, computing it if
// necessary.
int NumFonts() const;
// Shapetable takes a pointer to the UNICHARSET, so it must persist for the
// entire life of the ShapeTable.
void set_unicharset(const UNICHARSET &unicharset) {
unicharset_ = &unicharset;
}
// Re-indexes the class_ids in the shapetable according to the given map.
// Useful in conjunction with set_unicharset.
void ReMapClassIds(const std::vector<int> &unicharset_map);
// Returns a string listing the classes/fonts in a shape.
std::string DebugStr(unsigned shape_id) const;
// Returns a debug string summarizing the table.
std::string SummaryStr() const;
// Adds a new shape starting with the given unichar_id and font_id.
// Returns the assigned index.
unsigned AddShape(int unichar_id, int font_id);
// Adds a copy of the given shape unless it is already present.
// Returns the assigned index or index of existing shape if already present.
unsigned AddShape(const Shape &other);
// Removes the shape given by the shape index. All indices above are changed!
void DeleteShape(unsigned shape_id);
// Adds a font_id to the given existing shape index for the given
// unichar_id. If the unichar_id is not in the shape, it is added.
void AddToShape(unsigned shape_id, int unichar_id, int font_id);
// Adds the given shape to the existing shape with the given index.
void AddShapeToShape(unsigned shape_id, const Shape &other);
// Returns the id of the shape that contains the given unichar and font.
// If not found, returns -1.
// If font_id < 0, the font_id is ignored and the first shape that matches
// the unichar_id is returned.
int FindShape(int unichar_id, int font_id) const;
// Returns the first unichar_id and font_id in the given shape.
void GetFirstUnicharAndFont(unsigned shape_id, int *unichar_id, int *font_id) const;
// Accessors for the Shape with the given shape_id.
const Shape &GetShape(unsigned shape_id) const {
return *shape_table_[shape_id];
}
Shape *MutableShape(unsigned shape_id) {
return shape_table_[shape_id];
}
// Expands all the classes/fonts in the shape individually to build
// a ShapeTable.
int BuildFromShape(const Shape &shape, const ShapeTable &master_shapes);
// Returns true if the shapes are already merged.
bool AlreadyMerged(unsigned shape_id1, unsigned shape_id2) const;
// Returns true if any shape contains multiple unichars.
bool AnyMultipleUnichars() const;
// Returns the maximum number of unichars over all shapes.
int MaxNumUnichars() const;
// Merges shapes with a common unichar over the [start, end) interval.
// Assumes single unichar per shape.
void ForceFontMerges(unsigned start, unsigned end);
// Returns the number of unichars in the master shape.
unsigned MasterUnicharCount(unsigned shape_id) const;
// Returns the sum of the font counts in the master shape.
int MasterFontCount(unsigned shape_id) const;
// Returns the number of unichars that would result from merging the shapes.
int MergedUnicharCount(unsigned shape_id1, unsigned shape_id2) const;
// Merges two shape_ids, leaving shape_id2 marked as merged.
void MergeShapes(unsigned shape_id1, unsigned shape_id2);
// Swaps two shape_ids.
void SwapShapes(unsigned shape_id1, unsigned shape_id2);
// Appends the master shapes from other to this.
// Used to create a clean ShapeTable from a merged one, or to create a
// copy of a ShapeTable.
// If not nullptr, shape_map is set to map other shape_ids to this's
// shape_ids.
void AppendMasterShapes(const ShapeTable &other, std::vector<int> *shape_map);
// Returns the number of master shapes remaining after merging.
int NumMasterShapes() const;
// Returns the destination of this shape, (if merged), taking into account
// the fact that the destination may itself have been merged.
// For a non-merged shape, returns the input shape_id.
unsigned MasterDestinationIndex(unsigned shape_id) const;
// Returns false if the unichars in neither shape is a subset of the other..
bool SubsetUnichar(unsigned shape_id1, unsigned shape_id2) const;
// Returns false if the unichars in neither shape is a subset of the other..
bool MergeSubsetUnichar(int merge_id1, int merge_id2, unsigned shape_id) const;
// Returns true if the unichar sets are equal between the shapes.
bool EqualUnichars(unsigned shape_id1, unsigned shape_id2) const;
bool MergeEqualUnichars(int merge_id1, int merge_id2, unsigned shape_id) const;
// Returns true if there is a common unichar between the shapes.
bool CommonUnichars(unsigned shape_id1, unsigned shape_id2) const;
// Returns true if there is a common font id between the shapes.
bool CommonFont(unsigned shape_id1, unsigned shape_id2) const;
// Adds the unichars of the given shape_id to the vector of results. Any
// unichar_id that is already present just has the fonts added to the
// font set for that result without adding a new entry in the vector.
// NOTE: it is assumed that the results are given to this function in order
// of decreasing rating.
// The unichar_map vector indicates the index of the results entry containing
// each unichar, or -1 if the unichar is not yet included in results.
void AddShapeToResults(const ShapeRating &shape_rating, std::vector<int> *unichar_map,
std::vector<UnicharRating> *results) const;
private:
// Adds the given unichar_id to the results if needed, updating unichar_map
// and returning the index of unichar in results.
int AddUnicharToResults(int unichar_id, float rating, std::vector<int> *unichar_map,
std::vector<UnicharRating> *results) const;
// Pointer to a provided unicharset used only by the Debugstr member.
const UNICHARSET *unicharset_;
// Vector of pointers to the Shapes in this ShapeTable.
std::vector<Shape *> shape_table_;
// Cached data calculated on demand.
mutable int num_fonts_;
};
} // namespace tesseract.
#endif // TESSERACT_CLASSIFY_SHAPETABLE_H_
|
2301_81045437/tesseract
|
src/classify/shapetable.h
|
C++
|
apache-2.0
| 16,053
|
///////////////////////////////////////////////////////////////////////
// File: tessclassifier.cpp
// Description: Tesseract implementation of a ShapeClassifier.
// Author: Ray Smith
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "tessclassifier.h"
#include "classify.h"
#include "trainingsample.h"
namespace tesseract {
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
int TessClassifier::UnicharClassifySample(const TrainingSample &sample, Image page_pix, int debug,
UNICHAR_ID keep_this,
std::vector<UnicharRating> *results) {
const int old_matcher_level = classify_->matcher_debug_level;
const int old_matcher_flags = classify_->matcher_debug_flags;
const int old_classify_level = classify_->classify_debug_level;
if (debug) {
// Explicitly set values of various control parameters to generate debug
// output if required, restoring the old values after classifying.
classify_->matcher_debug_level.set_value(2);
classify_->matcher_debug_flags.set_value(25);
classify_->classify_debug_level.set_value(3);
}
classify_->CharNormTrainingSample(pruner_only_, keep_this, sample, results);
if (debug) {
classify_->matcher_debug_level.set_value(old_matcher_level);
classify_->matcher_debug_flags.set_value(old_matcher_flags);
classify_->classify_debug_level.set_value(old_classify_level);
}
return results->size();
}
// Provides access to the ShapeTable that this classifier works with.
const ShapeTable *TessClassifier::GetShapeTable() const {
return classify_->shape_table();
}
// Provides access to the UNICHARSET that this classifier works with.
// Only needs to be overridden if GetShapeTable() can return nullptr.
const UNICHARSET &TessClassifier::GetUnicharset() const {
return classify_->unicharset;
}
// Displays classification as the given shape_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
int TessClassifier::DisplayClassifyAs(const TrainingSample &sample, Image page_pix, int unichar_id,
int index, std::vector<ScrollView *> &windows) {
int shape_id = unichar_id;
// TODO(rays) Fix this so it works with both flat and real shapetables.
// if (GetShapeTable() != nullptr)
// shape_id = BestShapeForUnichar(sample, page_pix, unichar_id, nullptr);
if (shape_id < 0) {
return index;
}
if (UnusedClassIdIn(classify_->PreTrainedTemplates, shape_id)) {
tprintf("No built-in templates for class/shape %d\n", shape_id);
return index;
}
#ifndef GRAPHICS_DISABLED
classify_->ShowBestMatchFor(shape_id, sample.features(), sample.num_features());
#endif
return index;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/tessclassifier.cpp
|
C++
|
apache-2.0
| 3,659
|
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: tessclassifier.h
// Description: Tesseract implementation of a ShapeClassifier.
// Author: Ray Smith
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef THIRD_PARTY_TESSERACT_CLASSIFY_TESSCLASSIFIER_H_
#define THIRD_PARTY_TESSERACT_CLASSIFY_TESSCLASSIFIER_H_
#include "shapeclassifier.h"
namespace tesseract {
class Classify;
class TrainingSample;
// Tesseract implementation of a ShapeClassifier.
// Due to limitations in the content of TrainingSample, this currently
// only works for the static classifier and only works if the ShapeTable
// in classify is not nullptr.
class TESS_API TessClassifier : public ShapeClassifier {
public:
TessClassifier(bool pruner_only, tesseract::Classify *classify)
: pruner_only_(pruner_only), classify_(classify) {}
~TessClassifier() override = default;
// Classifies the given [training] sample, writing to results.
// See ShapeClassifier for a full description.
int UnicharClassifySample(const TrainingSample &sample, Image page_pix, int debug,
UNICHAR_ID keep_this, std::vector<UnicharRating> *results) override;
// Provides access to the ShapeTable that this classifier works with.
const ShapeTable *GetShapeTable() const override;
// Provides access to the UNICHARSET that this classifier works with.
// Only needs to be overridden if GetShapeTable() can return nullptr.
const UNICHARSET &GetUnicharset() const override;
// Displays classification as the given shape_id. Creates as many windows
// as it feels fit, using index as a guide for placement. Adds any created
// windows to the windows output and returns a new index that may be used
// by any subsequent classifiers. Caller waits for the user to view and
// then destroys the windows by clearing the vector.
int DisplayClassifyAs(const TrainingSample &sample, Image page_pix, int unichar_id, int index,
std::vector<ScrollView *> &windows) override;
private:
// Indicates that this classifier is to use just the ClassPruner, or the
// full classifier if false.
bool pruner_only_;
// Borrowed pointer to the actual Tesseract classifier.
tesseract::Classify *classify_;
};
} // namespace tesseract
#endif /* THIRD_PARTY_TESSERACT_CLASSIFY_TESSCLASSIFIER_H_ */
|
2301_81045437/tesseract
|
src/classify/tessclassifier.h
|
C++
|
apache-2.0
| 3,068
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#define _USE_MATH_DEFINES // for M_PI
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "trainingsample.h"
#include "helpers.h"
#include "intfeaturespace.h"
#include "normfeat.h"
#include "shapetable.h"
#include <allheaders.h>
#include <cmath> // for M_PI
namespace tesseract {
// Center of randomizing operations.
const int kRandomizingCenter = 128;
// Randomizing factors.
const int TrainingSample::kYShiftValues[kSampleYShiftSize] = {6, 3, -3, -6, 0};
const double TrainingSample::kScaleValues[kSampleScaleSize] = {1.0625, 0.9375, 1.0};
TrainingSample::~TrainingSample() {
delete[] features_;
delete[] micro_features_;
}
// WARNING! Serialize/DeSerialize do not save/restore the "cache" data
// members, which is mostly the mapped features, and the weight.
// It is assumed these can all be reconstructed from what is saved.
// Writes to the given file. Returns false in case of error.
bool TrainingSample::Serialize(FILE *fp) const {
if (fwrite(&class_id_, sizeof(class_id_), 1, fp) != 1) {
return false;
}
if (fwrite(&font_id_, sizeof(font_id_), 1, fp) != 1) {
return false;
}
if (fwrite(&page_num_, sizeof(page_num_), 1, fp) != 1) {
return false;
}
if (!bounding_box_.Serialize(fp)) {
return false;
}
if (fwrite(&num_features_, sizeof(num_features_), 1, fp) != 1) {
return false;
}
if (fwrite(&num_micro_features_, sizeof(num_micro_features_), 1, fp) != 1) {
return false;
}
if (fwrite(&outline_length_, sizeof(outline_length_), 1, fp) != 1) {
return false;
}
if (fwrite(features_, sizeof(*features_), num_features_, fp) != num_features_) {
return false;
}
if (fwrite(micro_features_, sizeof(*micro_features_), num_micro_features_, fp) !=
num_micro_features_) {
return false;
}
if (fwrite(cn_feature_, sizeof(*cn_feature_), kNumCNParams, fp) != kNumCNParams) {
return false;
}
if (fwrite(geo_feature_, sizeof(*geo_feature_), GeoCount, fp) != GeoCount) {
return false;
}
return true;
}
// Creates from the given file. Returns nullptr in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
TrainingSample *TrainingSample::DeSerializeCreate(bool swap, FILE *fp) {
auto *sample = new TrainingSample;
if (sample->DeSerialize(swap, fp)) {
return sample;
}
delete sample;
return nullptr;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TrainingSample::DeSerialize(bool swap, FILE *fp) {
if (fread(&class_id_, sizeof(class_id_), 1, fp) != 1) {
return false;
}
if (fread(&font_id_, sizeof(font_id_), 1, fp) != 1) {
return false;
}
if (fread(&page_num_, sizeof(page_num_), 1, fp) != 1) {
return false;
}
if (!bounding_box_.DeSerialize(swap, fp)) {
return false;
}
if (fread(&num_features_, sizeof(num_features_), 1, fp) != 1) {
return false;
}
if (fread(&num_micro_features_, sizeof(num_micro_features_), 1, fp) != 1) {
return false;
}
if (fread(&outline_length_, sizeof(outline_length_), 1, fp) != 1) {
return false;
}
if (swap) {
ReverseN(&class_id_, sizeof(class_id_));
ReverseN(&num_features_, sizeof(num_features_));
ReverseN(&num_micro_features_, sizeof(num_micro_features_));
ReverseN(&outline_length_, sizeof(outline_length_));
}
// Arbitrarily limit the number of elements to protect against bad data.
if (num_features_ > UINT16_MAX) {
return false;
}
if (num_micro_features_ > UINT16_MAX) {
return false;
}
delete[] features_;
features_ = new INT_FEATURE_STRUCT[num_features_];
if (fread(features_, sizeof(*features_), num_features_, fp) != num_features_) {
return false;
}
delete[] micro_features_;
micro_features_ = new MicroFeature[num_micro_features_];
if (fread(micro_features_, sizeof(*micro_features_), num_micro_features_, fp) !=
num_micro_features_) {
return false;
}
if (fread(cn_feature_, sizeof(*cn_feature_), kNumCNParams, fp) != kNumCNParams) {
return false;
}
if (fread(geo_feature_, sizeof(*geo_feature_), GeoCount, fp) != GeoCount) {
return false;
}
return true;
}
// Saves the given features into a TrainingSample.
TrainingSample *TrainingSample::CopyFromFeatures(const INT_FX_RESULT_STRUCT &fx_info,
const TBOX &bounding_box,
const INT_FEATURE_STRUCT *features,
int num_features) {
auto *sample = new TrainingSample;
sample->num_features_ = num_features;
sample->features_ = new INT_FEATURE_STRUCT[num_features];
sample->outline_length_ = fx_info.Length;
memcpy(sample->features_, features, num_features * sizeof(features[0]));
sample->geo_feature_[GeoBottom] = bounding_box.bottom();
sample->geo_feature_[GeoTop] = bounding_box.top();
sample->geo_feature_[GeoWidth] = bounding_box.width();
// Generate the cn_feature_ from the fx_info.
sample->cn_feature_[CharNormY] = MF_SCALE_FACTOR * (fx_info.Ymean - kBlnBaselineOffset);
sample->cn_feature_[CharNormLength] = MF_SCALE_FACTOR * fx_info.Length / LENGTH_COMPRESSION;
sample->cn_feature_[CharNormRx] = MF_SCALE_FACTOR * fx_info.Rx;
sample->cn_feature_[CharNormRy] = MF_SCALE_FACTOR * fx_info.Ry;
sample->features_are_indexed_ = false;
sample->features_are_mapped_ = false;
return sample;
}
// Returns the cn_feature as a FEATURE_STRUCT* needed by cntraining.
FEATURE_STRUCT *TrainingSample::GetCNFeature() const {
auto feature = new FEATURE_STRUCT(&CharNormDesc);
for (int i = 0; i < kNumCNParams; ++i) {
feature->Params[i] = cn_feature_[i];
}
return feature;
}
// Constructs and returns a copy randomized by the method given by
// the randomizer index. If index is out of [0, kSampleRandomSize) then
// an exact copy is returned.
TrainingSample *TrainingSample::RandomizedCopy(int index) const {
TrainingSample *sample = Copy();
if (index >= 0 && index < kSampleRandomSize) {
++index; // Remove the first combination.
const int yshift = kYShiftValues[index / kSampleScaleSize];
double scaling = kScaleValues[index % kSampleScaleSize];
for (uint32_t i = 0; i < num_features_; ++i) {
double result = (features_[i].X - kRandomizingCenter) * scaling;
result += kRandomizingCenter;
sample->features_[i].X = ClipToRange<int>(result + 0.5, 0, UINT8_MAX);
result = (features_[i].Y - kRandomizingCenter) * scaling;
result += kRandomizingCenter + yshift;
sample->features_[i].Y = ClipToRange<int>(result + 0.5, 0, UINT8_MAX);
}
}
return sample;
}
// Constructs and returns an exact copy.
TrainingSample *TrainingSample::Copy() const {
auto *sample = new TrainingSample;
sample->class_id_ = class_id_;
sample->font_id_ = font_id_;
sample->weight_ = weight_;
sample->sample_index_ = sample_index_;
sample->num_features_ = num_features_;
if (num_features_ > 0) {
sample->features_ = new INT_FEATURE_STRUCT[num_features_];
memcpy(sample->features_, features_, num_features_ * sizeof(features_[0]));
}
sample->num_micro_features_ = num_micro_features_;
if (num_micro_features_ > 0) {
sample->micro_features_ = new MicroFeature[num_micro_features_];
memcpy(sample->micro_features_, micro_features_,
num_micro_features_ * sizeof(micro_features_[0]));
}
memcpy(sample->cn_feature_, cn_feature_, sizeof(*cn_feature_) * kNumCNParams);
memcpy(sample->geo_feature_, geo_feature_, sizeof(*geo_feature_) * GeoCount);
return sample;
}
// Extracts the needed information from the CHAR_DESC_STRUCT.
void TrainingSample::ExtractCharDesc(int int_feature_type, int micro_type, int cn_type,
int geo_type, CHAR_DESC_STRUCT *char_desc) {
// Extract the INT features.
delete[] features_;
FEATURE_SET_STRUCT *char_features = char_desc->FeatureSets[int_feature_type];
if (char_features == nullptr) {
tprintf("Error: no features to train on of type %s\n", kIntFeatureType);
num_features_ = 0;
features_ = nullptr;
} else {
num_features_ = char_features->NumFeatures;
features_ = new INT_FEATURE_STRUCT[num_features_];
for (uint32_t f = 0; f < num_features_; ++f) {
features_[f].X = static_cast<uint8_t>(char_features->Features[f]->Params[IntX]);
features_[f].Y = static_cast<uint8_t>(char_features->Features[f]->Params[IntY]);
features_[f].Theta = static_cast<uint8_t>(char_features->Features[f]->Params[IntDir]);
features_[f].CP_misses = 0;
}
}
// Extract the Micro features.
delete[] micro_features_;
char_features = char_desc->FeatureSets[micro_type];
if (char_features == nullptr) {
tprintf("Error: no features to train on of type %s\n", kMicroFeatureType);
num_micro_features_ = 0;
micro_features_ = nullptr;
} else {
num_micro_features_ = char_features->NumFeatures;
micro_features_ = new MicroFeature[num_micro_features_];
for (uint32_t f = 0; f < num_micro_features_; ++f) {
for (int d = 0; d < (int)MicroFeatureParameter::MFCount; ++d) {
micro_features_[f][d] = char_features->Features[f]->Params[d];
}
}
}
// Extract the CN feature.
char_features = char_desc->FeatureSets[cn_type];
if (char_features == nullptr) {
tprintf("Error: no CN feature to train on.\n");
} else {
ASSERT_HOST(char_features->NumFeatures == 1);
cn_feature_[CharNormY] = char_features->Features[0]->Params[CharNormY];
cn_feature_[CharNormLength] = char_features->Features[0]->Params[CharNormLength];
cn_feature_[CharNormRx] = char_features->Features[0]->Params[CharNormRx];
cn_feature_[CharNormRy] = char_features->Features[0]->Params[CharNormRy];
}
// Extract the Geo feature.
char_features = char_desc->FeatureSets[geo_type];
if (char_features == nullptr) {
tprintf("Error: no Geo feature to train on.\n");
} else {
ASSERT_HOST(char_features->NumFeatures == 1);
geo_feature_[GeoBottom] = char_features->Features[0]->Params[GeoBottom];
geo_feature_[GeoTop] = char_features->Features[0]->Params[GeoTop];
geo_feature_[GeoWidth] = char_features->Features[0]->Params[GeoWidth];
}
features_are_indexed_ = false;
features_are_mapped_ = false;
}
// Sets the mapped_features_ from the features_ using the provided
// feature_space to the indexed versions of the features.
void TrainingSample::IndexFeatures(const IntFeatureSpace &feature_space) {
std::vector<int> indexed_features;
feature_space.IndexAndSortFeatures(features_, num_features_, &mapped_features_);
features_are_indexed_ = true;
features_are_mapped_ = false;
}
// Returns a pix representing the sample. (Int features only.)
Image TrainingSample::RenderToPix(const UNICHARSET *unicharset) const {
Image pix = pixCreate(kIntFeatureExtent, kIntFeatureExtent, 1);
for (uint32_t f = 0; f < num_features_; ++f) {
int start_x = features_[f].X;
int start_y = kIntFeatureExtent - features_[f].Y;
double dx = cos((features_[f].Theta / 256.0) * 2.0 * M_PI - M_PI);
double dy = -sin((features_[f].Theta / 256.0) * 2.0 * M_PI - M_PI);
for (int i = 0; i <= 5; ++i) {
int x = static_cast<int>(start_x + dx * i);
int y = static_cast<int>(start_y + dy * i);
if (x >= 0 && x < 256 && y >= 0 && y < 256) {
pixSetPixel(pix, x, y, 1);
}
}
}
if (unicharset != nullptr) {
pixSetText(pix, unicharset->id_to_unichar(class_id_));
}
return pix;
}
#ifndef GRAPHICS_DISABLED
// Displays the features in the given window with the given color.
void TrainingSample::DisplayFeatures(ScrollView::Color color, ScrollView *window) const {
for (uint32_t f = 0; f < num_features_; ++f) {
RenderIntFeature(window, &features_[f], color);
}
}
#endif // !GRAPHICS_DISABLED
// Returns a pix of the original sample image. The pix is padded all round
// by padding wherever possible.
// The returned Pix must be pixDestroyed after use.
// If the input page_pix is nullptr, nullptr is returned.
Image TrainingSample::GetSamplePix(int padding, Image page_pix) const {
if (page_pix == nullptr) {
return nullptr;
}
int page_width = pixGetWidth(page_pix);
int page_height = pixGetHeight(page_pix);
TBOX padded_box = bounding_box();
padded_box.pad(padding, padding);
// Clip the padded_box to the limits of the page
TBOX page_box(0, 0, page_width, page_height);
padded_box &= page_box;
Box *box =
boxCreate(page_box.left(), page_height - page_box.top(), page_box.width(), page_box.height());
Image sample_pix = pixClipRectangle(page_pix, box, nullptr);
boxDestroy(&box);
return sample_pix;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/trainingsample.cpp
|
C++
|
apache-2.0
| 13,503
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_TRAININGSAMPLE_H_
#define TESSERACT_TRAINING_TRAININGSAMPLE_H_
#include "elst.h"
#include "featdefs.h"
#include "intfx.h"
#include "intmatcher.h"
#include "matrix.h"
#include "mf.h"
#include "mfdefs.h"
#include "picofeat.h"
#include "shapetable.h"
#include "unicharset.h"
struct Pix;
namespace tesseract {
class IntFeatureMap;
class IntFeatureSpace;
class ShapeTable;
// Number of elements of cn_feature_.
static const int kNumCNParams = 4;
// Number of ways to shift the features when randomizing.
static const int kSampleYShiftSize = 5;
// Number of ways to scale the features when randomizing.
static const int kSampleScaleSize = 3;
// Total number of different ways to manipulate the features when randomizing.
// The first and last combinations are removed to avoid an excessive
// top movement (first) and an identity transformation (last).
// WARNING: To avoid patterned duplication of samples, be sure to keep
// kSampleRandomSize prime!
// Eg with current values (kSampleYShiftSize = 5 and TkSampleScaleSize = 3)
// kSampleRandomSize is 13, which is prime.
static const int kSampleRandomSize = kSampleYShiftSize * kSampleScaleSize - 2;
// ASSERT_IS_PRIME(kSampleRandomSize) !!
class TESS_API TrainingSample : public ELIST_LINK {
public:
TrainingSample()
: class_id_(INVALID_UNICHAR_ID)
, font_id_(0)
, page_num_(0)
, num_features_(0)
, num_micro_features_(0)
, outline_length_(0)
, features_(nullptr)
, micro_features_(nullptr)
, weight_(1.0)
, max_dist_(0.0)
, sample_index_(0)
, features_are_indexed_(false)
, features_are_mapped_(false)
, is_error_(false) {}
~TrainingSample();
// Saves the given features into a TrainingSample. The features are copied,
// so may be deleted afterwards. Delete the return value after use.
static TrainingSample *CopyFromFeatures(const INT_FX_RESULT_STRUCT &fx_info,
const TBOX &bounding_box,
const INT_FEATURE_STRUCT *features, int num_features);
// Returns the cn_feature as a FEATURE_STRUCT* needed by cntraining.
FEATURE_STRUCT *GetCNFeature() const;
// Constructs and returns a copy "randomized" by the method given by
// the randomizer index. If index is out of [0, kSampleRandomSize) then
// an exact copy is returned.
TrainingSample *RandomizedCopy(int index) const;
// Constructs and returns an exact copy.
TrainingSample *Copy() const;
// WARNING! Serialize/DeSerialize do not save/restore the "cache" data
// members, which is mostly the mapped features, and the weight.
// It is assumed these can all be reconstructed from what is saved.
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Creates from the given file. Returns nullptr in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
static TrainingSample *DeSerializeCreate(bool swap, FILE *fp);
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
// Extracts the needed information from the CHAR_DESC_STRUCT.
void ExtractCharDesc(int feature_type, int micro_type, int cn_type, int geo_type,
CHAR_DESC_STRUCT *char_desc);
// Sets the mapped_features_ from the features_ using the provided
// feature_space to the indexed versions of the features.
void IndexFeatures(const IntFeatureSpace &feature_space);
// Returns a pix representing the sample. (Int features only.)
Image RenderToPix(const UNICHARSET *unicharset) const;
// Displays the features in the given window with the given color.
void DisplayFeatures(ScrollView::Color color, ScrollView *window) const;
// Returns a pix of the original sample image. The pix is padded all round
// by padding wherever possible.
// The returned Pix must be pixDestroyed after use.
// If the input page_pix is nullptr, nullptr is returned.
Image GetSamplePix(int padding, Image page_pix) const;
// Accessors.
UNICHAR_ID class_id() const {
return class_id_;
}
void set_class_id(int id) {
class_id_ = id;
}
int font_id() const {
return font_id_;
}
void set_font_id(int id) {
font_id_ = id;
}
int page_num() const {
return page_num_;
}
void set_page_num(int page) {
page_num_ = page;
}
const TBOX &bounding_box() const {
return bounding_box_;
}
void set_bounding_box(const TBOX &box) {
bounding_box_ = box;
}
uint32_t num_features() const {
return num_features_;
}
const INT_FEATURE_STRUCT *features() const {
return features_;
}
uint32_t num_micro_features() const {
return num_micro_features_;
}
const MicroFeature *micro_features() const {
return micro_features_;
}
int outline_length() const {
return outline_length_;
}
float cn_feature(int index) const {
return cn_feature_[index];
}
int geo_feature(int index) const {
return geo_feature_[index];
}
double weight() const {
return weight_;
}
void set_weight(double value) {
weight_ = value;
}
double max_dist() const {
return max_dist_;
}
void set_max_dist(double value) {
max_dist_ = value;
}
int sample_index() const {
return sample_index_;
}
void set_sample_index(int value) {
sample_index_ = value;
}
bool features_are_mapped() const {
return features_are_mapped_;
}
const std::vector<int> &mapped_features() const {
ASSERT_HOST(features_are_mapped_);
return mapped_features_;
}
const std::vector<int> &indexed_features() const {
ASSERT_HOST(features_are_indexed_);
return mapped_features_;
}
bool is_error() const {
return is_error_;
}
void set_is_error(bool value) {
is_error_ = value;
}
private:
// Unichar id that this sample represents. There obviously must be a
// reference UNICHARSET somewhere. Usually in TrainingSampleSet.
UNICHAR_ID class_id_;
// Font id in which this sample was printed. Refers to a fontinfo_table_ in
// MasterTrainer.
int font_id_;
// Number of page that the sample came from.
int page_num_;
// Bounding box of sample in original image.
TBOX bounding_box_;
// Number of INT_FEATURE_STRUCT in features_ array.
uint32_t num_features_;
// Number of MicroFeature in micro_features_ array.
uint32_t num_micro_features_;
// Total length of outline in the baseline normalized coordinate space.
// See comment in WERD_RES class definition for a discussion of coordinate
// spaces.
int outline_length_;
// Array of features.
INT_FEATURE_STRUCT *features_;
// Array of features.
MicroFeature *micro_features_;
// The one and only CN feature. Indexed by NORM_PARAM_NAME enum.
float cn_feature_[kNumCNParams];
// The one and only geometric feature. (Aims at replacing cn_feature_).
// Indexed by GeoParams enum in picofeat.h
int geo_feature_[GeoCount];
// Non-serialized cache data.
// Weight used for boosting training.
double weight_;
// Maximum distance to other samples of same class/font used in computing
// the canonical sample.
double max_dist_;
// Global index of this sample.
int sample_index_;
public:
// both are used in training tools
// hide after refactoring
// Indexed/mapped features, as indicated by the bools below.
std::vector<int> mapped_features_;
bool features_are_indexed_;
bool features_are_mapped_;
private:
// True if the last classification was an error by the current definition.
bool is_error_;
// Randomizing factors.
static const int kYShiftValues[kSampleYShiftSize];
static const double kScaleValues[kSampleScaleSize];
};
ELISTIZEH(TrainingSample)
} // namespace tesseract
#endif // TESSERACT_TRAINING_TRAININGSAMPLE_H_
|
2301_81045437/tesseract
|
src/classify/trainingsample.h
|
C++
|
apache-2.0
| 8,605
|
/******************************************************************************
** Filename: bitvec.h
** Purpose: Routines for manipulating bit vectors
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef BITVEC_H
#define BITVEC_H
#include <cstddef> // for size_t
#include <cstdint> // for uint32_t
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
using BIT_VECTOR = uint32_t *;
//< no of bits in a BIT_VECTOR element
const size_t BITSINLONG = 8 * sizeof(uint32_t);
/*-----------------------------------------------------------------------------
Public Function Prototypes
-----------------------------------------------------------------------------*/
static inline void zero_all_bits(BIT_VECTOR array, size_t length) {
for (size_t index = 0; index < length; index++) {
array[index] = 0;
}
}
static inline void set_all_bits(BIT_VECTOR array, size_t length) {
for (size_t index = 0; index < length; index++) {
array[index] = ~0;
}
}
static inline void copy_all_bits(BIT_VECTOR source, BIT_VECTOR dest, size_t length) {
for (size_t index = 0; index < length; index++) {
dest[index] = source[index];
}
}
#define SET_BIT(array, bit) (array[bit / BITSINLONG] |= 1 << (bit & (BITSINLONG - 1)))
#define reset_bit(array, bit) (array[bit / BITSINLONG] &= ~(1 << (bit & (BITSINLONG - 1))))
#define test_bit(array, bit) (array[bit / BITSINLONG] & (1 << (bit & (BITSINLONG - 1))))
static inline size_t WordsInVectorOfSize(size_t NumBits) {
return (NumBits + BITSINLONG - 1) / BITSINLONG;
}
/**
* This routine frees a bit vector.
*
* @param BitVector bit vector to be freed
*
*/
static inline void FreeBitVector(BIT_VECTOR BitVector) {
delete[] BitVector;
}
/*---------------------------------------------------------------------------*/
/**
* Allocate and return a new bit vector large enough to
* hold the specified number of bits.
*
* @param NumBits number of bits in new bit vector
*
* @return New bit vector.
*/
static inline BIT_VECTOR NewBitVector(size_t NumBits) {
return new uint32_t[WordsInVectorOfSize(NumBits)];
}
#endif
|
2301_81045437/tesseract
|
src/cutil/bitvec.h
|
C++
|
apache-2.0
| 2,934
|
/******************************************************************************
#
# File: oldlist.cpp
# Description: List processing procedures.
# Author: Mark Seaman, Software Productivity
#
# (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
#
###############################################################################
This file contains a set of general purpose list manipulation routines.
These routines can be used in a wide variety of ways to provide several
different popular data structures. A new list can be created by declaring
a variable of type 'LIST', and can be initialized with the value 'NIL_LIST'.
All of these routines check for the NIL_LIST condition before dereferencing
pointers. NOTE: There is a users' manual available in printed form from
Mark Seaman at (303) 350-4492 at Greeley Hard Copy.
To implement a STACK use:
push to add to the Stack l = push(l, (LIST)"jim");
pop to remove items from the Stack l = pop(l);
first_node to access the head name = (char *)first_node(l);
To implement a QUEUE use:
push_last to add to the Queue l = push_last(l, (LIST)"x");
pop remove items from the Queue l = pop(l);
first_node to access the head name = (char *)first_node (l);
To implement LISP like functions use:
first_node CAR x = (int)first_node(l);
rest CDR l = list_rest (l);
push CONS l = push(l, (LIST)this);
last LAST x = last(l);
concat APPEND l = concat(r, s);
count LENGTH x = count(l);
search MEMBER if (search(l, x, nullptr))
The following rules of closure exist for the functions provided.
a = first_node (push (a, b))
b = list_rest (push (a, b))
a = push (pop (a), a)) For all a <> NIL_LIST
a = reverse (reverse (a))
******************************************************************************/
#include "oldlist.h"
#include "errcode.h" // for ASSERT_HOST
#include <cstdio>
#include <cstring> // for strcmp
namespace tesseract {
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* i s s a m e
*
* Compare the list node with the key value return true (non-zero)
* if they are equivalent strings. (Return false if not)
**********************************************************************/
static int is_same(void *item1, void *item2) {
return strcmp(static_cast<char *>(item1), static_cast<char *>(item2)) == 0;
}
/**********************************************************************
* d e l e t e d
*
* Delete all the elements out of the current list that match the key.
* This operation destroys the original list. The caller will supply a
* routine that will compare each node to the
* key, and return a non-zero value when they match.
**********************************************************************/
LIST delete_d(LIST list, void *key, int_compare is_equal) {
LIST result = NIL_LIST;
LIST last_one = NIL_LIST;
if (is_equal == nullptr) {
is_equal = is_same;
}
while (list != NIL_LIST) {
if (!(*is_equal)(list->first_node(), key)) {
if (last_one == NIL_LIST) {
last_one = list;
list = list->list_rest();
result = last_one;
set_rest(last_one, NIL_LIST);
} else {
set_rest(last_one, list);
last_one = list;
list = list->list_rest();
set_rest(last_one, NIL_LIST);
}
} else {
list = pop(list);
}
}
return (result);
}
/**********************************************************************
* d e s t r o y
*
* Return the space taken by a list to the heap.
**********************************************************************/
LIST destroy(LIST list) {
LIST next;
while (list != NIL_LIST) {
next = list->list_rest();
delete list;
list = next;
}
return (NIL_LIST);
}
/**********************************************************************
* d e s t r o y n o d e s
*
* Return the space taken by the LISTs of a list to the heap.
**********************************************************************/
void destroy_nodes(LIST list, void_dest destructor) {
ASSERT_HOST(destructor != nullptr);
while (list != NIL_LIST) {
if (list->first_node() != nullptr) {
(*destructor)(list->first_node());
}
list = pop(list);
}
}
/**********************************************************************
* l a s t
*
* Return the last list item (this is list type).
**********************************************************************/
LIST last(LIST var_list) {
while (var_list->list_rest() != NIL_LIST) {
var_list = var_list->list_rest();
}
return var_list;
}
/**********************************************************************
* p o p
*
* Return the list with the first element removed. Destroy the space
* that it occupied in the list.
**********************************************************************/
LIST pop(LIST list) {
LIST temp = list->list_rest();
delete list;
return temp;
}
/**********************************************************************
* p u s h
*
* Create a list element. Push the second parameter (the node) onto
* the first parameter (the list). Return the new list to the caller.
**********************************************************************/
LIST push(LIST list, void *element) {
LIST t;
t = new list_rec;
t->node = static_cast<LIST>(element);
set_rest(t, list);
return (t);
}
/**********************************************************************
* p u s h l a s t
*
* Create a list element. Add the element onto the end of the list.
**********************************************************************/
LIST push_last(LIST list, void *item) {
LIST t;
if (list != NIL_LIST) {
t = last(list);
t->next = push(NIL_LIST, item);
return (list);
} else {
return (push(NIL_LIST, item));
}
}
/**********************************************************************
* s e a r c h
*
* Search list, return NIL_LIST if not found. Return the list starting from
* the item if found. The compare routine "is_equal" is passed in as
* the third parameter to this routine.
**********************************************************************/
LIST search(LIST list, void *key, int_compare is_equal) {
if (is_equal == nullptr) {
is_equal = is_same;
}
iterate(list) if ((*is_equal)(list->first_node(), key)) return list;
return (NIL_LIST);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/cutil/oldlist.cpp
|
C++
|
apache-2.0
| 7,511
|
/******************************************************************************
*
* File: oldlist.h (Formerly list.h)
* Description: List processing procedures declarations.
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
******************************************************************************
*
* This file contains the interface for a set of general purpose list
* manipulation routines. For the implementation of these routines see
* the file "list.c".
*
******************************************************************************
*
* INDEX
* =======
*
* BASICS:
* -------
* first_node - Macro to return the first list node (not the cell).
* list_rest - Macro the return the second list cell
* pop - Destroy one list cell
* push - Create one list cell and set the node and next fields
*
* ITERATION:
* -----------------
* iterate - Macro to create a for loop to visit each cell.
*
* LIST CELL COUNTS:
* -----------------
* count - Returns the number of list cells in the list.
* last - Returns the last list cell.
*
* TRANSFORMS: (Note: These functions all modify the input list.)
* ----------
* delete_d - Removes the requested elements from the list.
* push_last - Add a new element onto the end of a list.
*
* SETS:
* -----
* search - Return the pointer to the list cell whose node matches.
*
* CELL OPERATIONS:
* -----------------
* destroy - Return all list cells in a list.
* destroy_nodes - Apply a function to each list cell and destroy the list.
* set_rest - Assign the next field in a list cell.
*
***********************************************************************/
#ifndef LIST_H
#define LIST_H
#include <tesseract/export.h>
#include <cstddef> // for size_t
namespace tesseract {
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
#define NIL_LIST static_cast<LIST>(nullptr)
using int_compare = int (*)(void *, void *);
using void_dest = void (*)(void *);
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* i t e r a t e
*
* Visit each node in the list. Replace the old list with the list
* minus the head. Continue until the list is NIL_LIST.
**********************************************************************/
#define iterate(l) for (; (l) != nullptr; (l) = (l)->list_rest())
/**********************************************************************
* s e t r e s t
*
* Change the "next" field of a list element to point to a desired place.
*
* #define set_rest(l,node) l->next = node;
**********************************************************************/
#define set_rest(l, cell) ((l)->next = (cell))
struct list_rec {
list_rec *node;
list_rec *next;
list_rec *first_node() {
return node;
}
list_rec *list_rest() {
return next;
}
//********************************************************************
// Recursively count the elements in a list. Return the count.
//********************************************************************
size_t size() {
auto var_list = this;
size_t n = 0;
iterate(var_list) n++;
return n;
}
};
using LIST = list_rec *;
/*----------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------*/
LIST delete_d(LIST list, void *key, int_compare is_equal);
TESS_API
LIST destroy(LIST list);
void destroy_nodes(LIST list, void_dest destructor);
LIST last(LIST var_list);
LIST pop(LIST list);
TESS_API
LIST push(LIST list, void *element);
TESS_API
LIST push_last(LIST list, void *item);
LIST search(LIST list, void *key, int_compare is_equal);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/cutil/oldlist.h
|
C++
|
apache-2.0
| 4,827
|
/******************************************************************************
*
* File: context.cpp (Formerly context.c)
* Description: Context checking functions
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1990, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#include "dict.h"
#include "unicharset.h"
namespace tesseract {
static const int kMinAbsoluteGarbageWordLength = 10;
static const float kMinAbsoluteGarbageAlphanumFrac = 0.5f;
const int case_state_table[6][4] = {
{/* 0. Beginning of word */
/* P U L D */
/* -1. Error on case */
0, 1, 5, 4},
{/* 1. After initial capital */
0, 3, 2, 4},
{/* 2. After lower case */
0, -1, 2, -1},
{/* 3. After upper case */
0, 3, -1, 4},
{/* 4. After a digit */
0, -1, -1, 4},
{/* 5. After initial lower case */
5, -1, 2, -1},
};
int Dict::case_ok(const WERD_CHOICE &word) const {
int state = 0;
const UNICHARSET *unicharset = word.unicharset();
for (unsigned x = 0; x < word.length(); ++x) {
UNICHAR_ID ch_id = word.unichar_id(x);
if (unicharset->get_isupper(ch_id)) {
state = case_state_table[state][1];
} else if (unicharset->get_islower(ch_id)) {
state = case_state_table[state][2];
} else if (unicharset->get_isdigit(ch_id)) {
state = case_state_table[state][3];
} else {
state = case_state_table[state][0];
}
if (state == -1) {
return false;
}
}
return state != 5; // single lower is bad
}
bool Dict::absolute_garbage(const WERD_CHOICE &word, const UNICHARSET &unicharset) {
if (word.length() < kMinAbsoluteGarbageWordLength) {
return false;
}
int num_alphanum = 0;
for (unsigned x = 0; x < word.length(); ++x) {
num_alphanum +=
(unicharset.get_isalpha(word.unichar_id(x)) || unicharset.get_isdigit(word.unichar_id(x)));
}
return (static_cast<float>(num_alphanum) / static_cast<float>(word.length()) <
kMinAbsoluteGarbageAlphanumFrac);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/context.cpp
|
C++
|
apache-2.0
| 2,745
|
/********************************************************************************
*
* File: dawg.cpp (Formerly dawg.c)
* Description: Use a Directed Acyclic Word Graph
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*********************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "dawg.h"
#include "dict.h"
#include "helpers.h"
#include "tprintf.h"
#include <memory>
/*----------------------------------------------------------------------
F u n c t i o n s f o r D a w g
----------------------------------------------------------------------*/
namespace tesseract {
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
Dawg::~Dawg() = default;
bool Dawg::prefix_in_dawg(const WERD_CHOICE &word,
bool requires_complete) const {
if (word.empty()) {
return !requires_complete;
}
NODE_REF node = 0;
int end_index = word.length() - 1;
for (int i = 0; i < end_index; i++) {
EDGE_REF edge = edge_char_of(node, word.unichar_id(i), false);
if (edge == NO_EDGE) {
return false;
}
if ((node = next_node(edge)) == 0) {
// This only happens if all words following this edge terminate --
// there are no larger words. See Trie::add_word_to_dawg()
return false;
}
}
// Now check the last character.
return edge_char_of(node, word.unichar_id(end_index), requires_complete) !=
NO_EDGE;
}
bool Dawg::word_in_dawg(const WERD_CHOICE &word) const {
return prefix_in_dawg(word, true);
}
int Dawg::check_for_words(const char *filename, const UNICHARSET &unicharset,
bool enable_wildcard) const {
if (filename == nullptr) {
return 0;
}
FILE *word_file;
char string[CHARS_PER_LINE];
int misses = 0;
UNICHAR_ID wildcard = unicharset.unichar_to_id(kWildcard);
word_file = fopen(filename, "r");
if (word_file == nullptr) {
tprintf("Error: Could not open file %s\n", filename);
ASSERT_HOST(word_file);
}
while (fgets(string, CHARS_PER_LINE, word_file) != nullptr) {
chomp_string(string); // remove newline
WERD_CHOICE word(string, unicharset);
if (word.length() > 0 && !word.contains_unichar_id(INVALID_UNICHAR_ID)) {
if (!match_words(&word, 0, 0,
enable_wildcard ? wildcard : INVALID_UNICHAR_ID)) {
tprintf("Missing word: %s\n", string);
++misses;
}
} else {
tprintf("Failed to create a valid word from %s\n", string);
}
}
fclose(word_file);
// Make sure the user sees this with fprintf instead of tprintf.
if (debug_level_) {
tprintf("Number of lost words=%d\n", misses);
}
return misses;
}
void Dawg::iterate_words(const UNICHARSET &unicharset,
std::function<void(const WERD_CHOICE *)> cb) const {
WERD_CHOICE word(&unicharset);
iterate_words_rec(word, 0, cb);
}
static void CallWithUTF8(const std::function<void(const char *)> &cb,
const WERD_CHOICE *wc) {
std::string s;
wc->string_and_lengths(&s, nullptr);
cb(s.c_str());
}
void Dawg::iterate_words(const UNICHARSET &unicharset,
const std::function<void(const char *)> &cb) const {
using namespace std::placeholders; // for _1
std::function<void(const WERD_CHOICE *)> shim(
std::bind(CallWithUTF8, cb, _1));
WERD_CHOICE word(&unicharset);
iterate_words_rec(word, 0, shim);
}
void Dawg::iterate_words_rec(
const WERD_CHOICE &word_so_far, NODE_REF to_explore,
const std::function<void(const WERD_CHOICE *)> &cb) const {
NodeChildVector children;
this->unichar_ids_of(to_explore, &children, false);
for (auto &i : children) {
WERD_CHOICE next_word(word_so_far);
next_word.append_unichar_id(i.unichar_id, 1, 0.0, 0.0);
if (this->end_of_word(i.edge_ref)) {
cb(&next_word);
}
NODE_REF next = next_node(i.edge_ref);
if (next != 0) {
iterate_words_rec(next_word, next, cb);
}
}
}
bool Dawg::match_words(WERD_CHOICE *word, uint32_t index, NODE_REF node,
UNICHAR_ID wildcard) const {
if (wildcard != INVALID_UNICHAR_ID && word->unichar_id(index) == wildcard) {
bool any_matched = false;
NodeChildVector vec;
this->unichar_ids_of(node, &vec, false);
for (auto &i : vec) {
word->set_unichar_id(i.unichar_id, index);
if (match_words(word, index, node, wildcard)) {
any_matched = true;
}
}
word->set_unichar_id(wildcard, index);
return any_matched;
} else {
auto word_end = index == word->length() - 1;
auto edge = edge_char_of(node, word->unichar_id(index), word_end);
if (edge != NO_EDGE) { // normal edge in DAWG
node = next_node(edge);
if (word_end) {
if (debug_level_ > 1) {
word->print("match_words() found: ");
}
return true;
} else if (node != 0) {
return match_words(word, index + 1, node, wildcard);
}
}
}
return false;
}
void Dawg::init(int unicharset_size) {
ASSERT_HOST(unicharset_size > 0);
unicharset_size_ = unicharset_size;
// Set bit masks. We will use the value unicharset_size_ as a null char, so
// the actual number of unichars is unicharset_size_ + 1.
flag_start_bit_ = ceil(log(unicharset_size_ + 1.0) / log(2.0));
next_node_start_bit_ = flag_start_bit_ + NUM_FLAG_BITS;
letter_mask_ = ~(~0ull << flag_start_bit_);
next_node_mask_ = ~0ull << (flag_start_bit_ + NUM_FLAG_BITS);
flags_mask_ = ~(letter_mask_ | next_node_mask_);
}
/*----------------------------------------------------------------------
F u n c t i o n s f o r S q u i s h e d D a w g
----------------------------------------------------------------------*/
SquishedDawg::~SquishedDawg() {
delete[] edges_;
}
EDGE_REF SquishedDawg::edge_char_of(NODE_REF node, UNICHAR_ID unichar_id,
bool word_end) const {
EDGE_REF edge = node;
if (node == 0) { // binary search
EDGE_REF start = 0;
EDGE_REF end = num_forward_edges_in_node0 - 1;
int compare;
while (start <= end) {
edge = (start + end) >> 1; // (start + end) / 2
compare = given_greater_than_edge_rec(NO_EDGE, word_end, unichar_id,
edges_[edge]);
if (compare == 0) { // given == vec[k]
return edge;
} else if (compare == 1) { // given > vec[k]
start = edge + 1;
} else { // given < vec[k]
end = edge - 1;
}
}
} else { // linear search
if (edge != NO_EDGE && edge_occupied(edge)) {
do {
if ((unichar_id_from_edge_rec(edges_[edge]) == unichar_id) &&
(!word_end || end_of_word_from_edge_rec(edges_[edge]))) {
return (edge);
}
} while (!last_edge(edge++));
}
}
return (NO_EDGE); // not found
}
int32_t SquishedDawg::num_forward_edges(NODE_REF node) const {
EDGE_REF edge = node;
int32_t num = 0;
if (forward_edge(edge)) {
do {
num++;
} while (!last_edge(edge++));
}
return (num);
}
void SquishedDawg::print_node(NODE_REF node, int max_num_edges) const {
if (node == NO_EDGE) {
return; // nothing to print
}
EDGE_REF edge = node;
const char *forward_string = "FORWARD";
const char *backward_string = " ";
const char *last_string = "LAST";
const char *not_last_string = " ";
const char *eow_string = "EOW";
const char *not_eow_string = " ";
const char *direction;
const char *is_last;
const char *eow;
UNICHAR_ID unichar_id;
if (edge_occupied(edge)) {
do {
direction = forward_edge(edge) ? forward_string : backward_string;
is_last = last_edge(edge) ? last_string : not_last_string;
eow = end_of_word(edge) ? eow_string : not_eow_string;
unichar_id = edge_letter(edge);
tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = %d, %s %s %s\n",
edge, next_node(edge), unichar_id, direction, is_last, eow);
if (edge - node > max_num_edges) {
return;
}
} while (!last_edge(edge++));
if (edge < num_edges_ && edge_occupied(edge) && backward_edge(edge)) {
do {
direction = forward_edge(edge) ? forward_string : backward_string;
is_last = last_edge(edge) ? last_string : not_last_string;
eow = end_of_word(edge) ? eow_string : not_eow_string;
unichar_id = edge_letter(edge);
tprintf(REFFORMAT " : next = " REFFORMAT
", unichar_id = %d, %s %s %s\n",
edge, next_node(edge), unichar_id, direction, is_last, eow);
if (edge - node > MAX_NODE_EDGES_DISPLAY) {
return;
}
} while (!last_edge(edge++));
}
} else {
tprintf(REFFORMAT " : no edges in this node\n", node);
}
tprintf("\n");
}
void SquishedDawg::print_edge(EDGE_REF edge) const {
if (edge == NO_EDGE) {
tprintf("NO_EDGE\n");
} else {
tprintf(REFFORMAT " : next = " REFFORMAT ", unichar_id = '%d', %s %s %s\n",
edge, next_node(edge), edge_letter(edge),
(forward_edge(edge) ? "FORWARD" : " "),
(last_edge(edge) ? "LAST" : " "),
(end_of_word(edge) ? "EOW" : ""));
}
}
bool SquishedDawg::read_squished_dawg(TFile *file) {
if (debug_level_) {
tprintf("Reading squished dawg\n");
}
// Read the magic number and check that it matches kDawgMagicNumber, as
// auto-endian fixing should make sure it is always correct.
int16_t magic;
if (!file->DeSerialize(&magic)) {
return false;
}
if (magic != kDawgMagicNumber) {
tprintf("Bad magic number on dawg: %d vs %d\n", magic, kDawgMagicNumber);
return false;
}
int32_t unicharset_size;
if (!file->DeSerialize(&unicharset_size)) {
return false;
}
if (!file->DeSerialize(&num_edges_)) {
return false;
}
ASSERT_HOST(num_edges_ > 0); // DAWG should not be empty
Dawg::init(unicharset_size);
edges_ = new EDGE_RECORD[num_edges_];
if (!file->DeSerialize(&edges_[0], num_edges_)) {
return false;
}
if (debug_level_ > 2) {
tprintf("type: %d lang: %s perm: %d unicharset_size: %d num_edges: %d\n",
type_, lang_.c_str(), perm_, unicharset_size_, num_edges_);
for (EDGE_REF edge = 0; edge < num_edges_; ++edge) {
print_edge(edge);
}
}
return true;
}
std::unique_ptr<EDGE_REF[]> SquishedDawg::build_node_map(
int32_t *num_nodes) const {
EDGE_REF edge;
std::unique_ptr<EDGE_REF[]> node_map(new EDGE_REF[num_edges_]);
int32_t node_counter;
int32_t num_edges;
for (edge = 0; edge < num_edges_; edge++) { // init all slots
node_map[edge] = -1;
}
node_counter = num_forward_edges(0);
*num_nodes = 0;
for (edge = 0; edge < num_edges_; edge++) { // search all slots
if (forward_edge(edge)) {
(*num_nodes)++; // count nodes links
node_map[edge] = (edge ? node_counter : 0);
num_edges = num_forward_edges(edge);
if (edge != 0) {
node_counter += num_edges;
}
edge += num_edges;
if (edge >= num_edges_) {
break;
}
if (backward_edge(edge)) {
while (!last_edge(edge++)) {
;
}
}
edge--;
}
}
return node_map;
}
bool SquishedDawg::write_squished_dawg(TFile *file) {
EDGE_REF edge;
int32_t num_edges;
int32_t node_count = 0;
EDGE_REF old_index;
EDGE_RECORD temp_record;
if (debug_level_) {
tprintf("write_squished_dawg\n");
}
std::unique_ptr<EDGE_REF[]> node_map(build_node_map(&node_count));
// Write the magic number to help detecting a change in endianness.
int16_t magic = kDawgMagicNumber;
if (!file->Serialize(&magic)) {
return false;
}
if (!file->Serialize(&unicharset_size_)) {
return false;
}
// Count the number of edges in this Dawg.
num_edges = 0;
for (edge = 0; edge < num_edges_; edge++) {
if (forward_edge(edge)) {
num_edges++;
}
}
// Write edge count to file.
if (!file->Serialize(&num_edges)) {
return false;
}
if (debug_level_) {
tprintf("%d nodes in DAWG\n", node_count);
tprintf("%d edges in DAWG\n", num_edges);
}
for (edge = 0; edge < num_edges_; edge++) {
if (forward_edge(edge)) { // write forward edges
do {
old_index = next_node_from_edge_rec(edges_[edge]);
set_next_node(edge, node_map[old_index]);
temp_record = edges_[edge];
if (!file->Serialize(&temp_record)) {
return false;
}
set_next_node(edge, old_index);
} while (!last_edge(edge++));
if (edge >= num_edges_) {
break;
}
if (backward_edge(edge)) { // skip back links
while (!last_edge(edge++)) {
;
}
}
edge--;
}
}
return true;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/dawg.cpp
|
C++
|
apache-2.0
| 13,683
|
/******************************************************************************
*
* File: dawg.h
* Description: Definition of a class that represents Directed Acyclic Word
* Graph (DAWG), functions to build and manipulate the DAWG.
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef DICT_DAWG_H_
#define DICT_DAWG_H_
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include <cinttypes> // for PRId64
#include <functional> // for std::function
#include <memory>
#include "elst.h"
#include "params.h"
#include "ratngs.h"
#ifndef __GNUC__
# ifdef _WIN32
# define NO_EDGE static_cast<int64_t>(0xffffffffffffffffi64)
# endif /*_WIN32*/
#else
# define NO_EDGE static_cast<int64_t>(0xffffffffffffffffll)
#endif /*__GNUC__*/
namespace tesseract {
class UNICHARSET;
using EDGE_RECORD = uint64_t;
using EDGE_ARRAY = EDGE_RECORD *;
using EDGE_REF = int64_t;
using NODE_REF = int64_t;
using NODE_MAP = EDGE_REF *;
struct NodeChild {
UNICHAR_ID unichar_id;
EDGE_REF edge_ref;
NodeChild(UNICHAR_ID id, EDGE_REF ref) : unichar_id(id), edge_ref(ref) {}
NodeChild() : unichar_id(INVALID_UNICHAR_ID), edge_ref(NO_EDGE) {}
};
using NodeChildVector = std::vector<NodeChild>;
using SuccessorList = std::vector<int>;
using SuccessorListsVector = std::vector<SuccessorList *>;
enum DawgType {
DAWG_TYPE_PUNCTUATION,
DAWG_TYPE_WORD,
DAWG_TYPE_NUMBER,
DAWG_TYPE_PATTERN,
DAWG_TYPE_COUNT // number of enum entries
};
/*----------------------------------------------------------------------
C o n s t a n t s
----------------------------------------------------------------------*/
#define FORWARD_EDGE static_cast<int32_t>(0)
#define BACKWARD_EDGE static_cast<int32_t>(1)
#define MAX_NODE_EDGES_DISPLAY static_cast<int64_t>(100)
#define MARKER_FLAG static_cast<int64_t>(1)
#define DIRECTION_FLAG static_cast<int64_t>(2)
#define WERD_END_FLAG static_cast<int64_t>(4)
#define LETTER_START_BIT 0
#define NUM_FLAG_BITS 3
#define REFFORMAT "%" PRId64
static const bool kDawgSuccessors[DAWG_TYPE_COUNT][DAWG_TYPE_COUNT] = {
{false, true, true, false}, // for DAWG_TYPE_PUNCTUATION
{true, false, false, false}, // for DAWG_TYPE_WORD
{true, false, false, false}, // for DAWG_TYPE_NUMBER
{false, false, false, false}, // for DAWG_TYPE_PATTERN
};
static const char kWildcard[] = "*";
/*----------------------------------------------------------------------
C l a s s e s a n d S t r u c t s
----------------------------------------------------------------------*/
//
/// Abstract class (an interface) that declares methods needed by the
/// various tesseract classes to operate on SquishedDawg and Trie objects.
///
/// This class initializes all the edge masks (since their usage by
/// SquishedDawg and Trie is identical) and implements simple accessors
/// for each of the fields encoded in an EDGE_RECORD.
/// This class also implements word_in_dawg() and check_for_words()
/// (since they use only the public methods of SquishedDawg and Trie
/// classes that are inherited from the Dawg base class).
//
class TESS_API Dawg {
public:
/// Magic number to determine endianness when reading the Dawg from file.
static const int16_t kDawgMagicNumber = 42;
/// A special unichar id that indicates that any appropriate pattern
/// (e.g.dictionary word, 0-9 digit, etc) can be inserted instead
/// Used for expressing patterns in punctuation and number Dawgs.
static const UNICHAR_ID kPatternUnicharID = 0;
inline DawgType type() const {
return type_;
}
inline const std::string &lang() const {
return lang_;
}
inline PermuterType permuter() const {
return perm_;
}
virtual ~Dawg();
/// Returns true if the given word is in the Dawg.
bool word_in_dawg(const WERD_CHOICE &word) const;
// Returns true if the given word prefix is not contraindicated by the dawg.
// If requires_complete is true, then the exact complete word must be present.
bool prefix_in_dawg(const WERD_CHOICE &prefix, bool requires_complete) const;
/// Checks the Dawg for the words that are listed in the requested file.
/// Returns the number of words in the given file missing from the Dawg.
int check_for_words(const char *filename, const UNICHARSET &unicharset,
bool enable_wildcard) const;
// For each word in the Dawg, call the given (permanent) callback with the
// text (UTF-8) version of the word.
void iterate_words(const UNICHARSET &unicharset,
std::function<void(const WERD_CHOICE *)> cb) const;
// For each word in the Dawg, call the given (permanent) callback with the
// text (UTF-8) version of the word.
void iterate_words(const UNICHARSET &unicharset,
const std::function<void(const char *)> &cb) const;
// Pure virtual function that should be implemented by the derived classes.
/// Returns the edge that corresponds to the letter out of this node.
virtual EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id,
bool word_end) const = 0;
/// Fills the given NodeChildVector with all the unichar ids (and the
/// corresponding EDGE_REFs) for which there is an edge out of this node.
virtual void unichar_ids_of(NODE_REF node, NodeChildVector *vec,
bool word_end) const = 0;
/// Returns the next node visited by following the edge
/// indicated by the given EDGE_REF.
virtual NODE_REF next_node(EDGE_REF edge_ref) const = 0;
/// Returns true if the edge indicated by the given EDGE_REF
/// marks the end of a word.
virtual bool end_of_word(EDGE_REF edge_ref) const = 0;
/// Returns UNICHAR_ID stored in the edge indicated by the given EDGE_REF.
virtual UNICHAR_ID edge_letter(EDGE_REF edge_ref) const = 0;
/// Prints the contents of the node indicated by the given NODE_REF.
/// At most max_num_edges will be printed.
virtual void print_node(NODE_REF node, int max_num_edges) const = 0;
/// Fills vec with unichar ids that represent the character classes
/// of the given unichar_id.
virtual void unichar_id_to_patterns(UNICHAR_ID unichar_id,
const UNICHARSET &unicharset,
std::vector<UNICHAR_ID> *vec) const {
(void)unichar_id;
(void)unicharset;
(void)vec;
}
/// Returns the given EDGE_REF if the EDGE_RECORD that it points to has
/// a self loop and the given unichar_id matches the unichar_id stored in the
/// EDGE_RECORD, returns NO_EDGE otherwise.
virtual EDGE_REF pattern_loop_edge(EDGE_REF edge_ref, UNICHAR_ID unichar_id,
bool word_end) const {
(void)edge_ref;
(void)unichar_id;
(void)word_end;
return false;
}
protected:
Dawg(DawgType type, const std::string &lang, PermuterType perm,
int debug_level)
: lang_(lang),
type_(type),
perm_(perm),
unicharset_size_(0),
debug_level_(debug_level) {}
/// Returns the next node visited by following this edge.
inline NODE_REF next_node_from_edge_rec(const EDGE_RECORD &edge_rec) const {
return ((edge_rec & next_node_mask_) >> next_node_start_bit_);
}
/// Returns the marker flag of this edge.
inline bool marker_flag_from_edge_rec(const EDGE_RECORD &edge_rec) const {
return (edge_rec & (MARKER_FLAG << flag_start_bit_)) != 0;
}
/// Returns the direction flag of this edge.
inline int direction_from_edge_rec(const EDGE_RECORD &edge_rec) const {
return ((edge_rec & (DIRECTION_FLAG << flag_start_bit_))) ? BACKWARD_EDGE
: FORWARD_EDGE;
}
/// Returns true if this edge marks the end of a word.
inline bool end_of_word_from_edge_rec(const EDGE_RECORD &edge_rec) const {
return (edge_rec & (WERD_END_FLAG << flag_start_bit_)) != 0;
}
/// Returns UNICHAR_ID recorded in this edge.
inline UNICHAR_ID unichar_id_from_edge_rec(
const EDGE_RECORD &edge_rec) const {
return ((edge_rec & letter_mask_) >> LETTER_START_BIT);
}
/// Sets the next node link for this edge in the Dawg.
inline void set_next_node_in_edge_rec(EDGE_RECORD *edge_rec, EDGE_REF value) {
*edge_rec &= (~next_node_mask_);
*edge_rec |= ((value << next_node_start_bit_) & next_node_mask_);
}
/// Sets this edge record to be the last one in a sequence of edges.
inline void set_marker_flag_in_edge_rec(EDGE_RECORD *edge_rec) {
*edge_rec |= (MARKER_FLAG << flag_start_bit_);
}
/// Sequentially compares the given values of unichar ID, next node
/// and word end marker with the values in the given EDGE_RECORD.
/// Returns: 1 if at any step the given input value exceeds
/// that of edge_rec (and all the values already
/// checked are the same)
/// 0 if edge_rec_match() returns true
/// -1 otherwise
inline int given_greater_than_edge_rec(NODE_REF next_node, bool word_end,
UNICHAR_ID unichar_id,
const EDGE_RECORD &edge_rec) const {
UNICHAR_ID curr_unichar_id = unichar_id_from_edge_rec(edge_rec);
NODE_REF curr_next_node = next_node_from_edge_rec(edge_rec);
bool curr_word_end = end_of_word_from_edge_rec(edge_rec);
if (edge_rec_match(next_node, word_end, unichar_id, curr_next_node,
curr_word_end, curr_unichar_id)) {
return 0;
}
if (unichar_id > curr_unichar_id) {
return 1;
}
if (unichar_id == curr_unichar_id) {
if (next_node > curr_next_node) {
return 1;
}
if (next_node == curr_next_node) {
if (word_end > curr_word_end) {
return 1;
}
}
}
return -1;
}
/// Returns true if all the values are equal (any value matches
/// next_node if next_node == NO_EDGE, any value matches word_end
/// if word_end is false).
inline bool edge_rec_match(NODE_REF next_node, bool word_end,
UNICHAR_ID unichar_id, NODE_REF other_next_node,
bool other_word_end,
UNICHAR_ID other_unichar_id) const {
return ((unichar_id == other_unichar_id) &&
(next_node == NO_EDGE || next_node == other_next_node) &&
(!word_end || (word_end == other_word_end)));
}
/// Sets unicharset_size_.
/// Initializes the values of various masks from unicharset_size_.
void init(int unicharset_size);
/// Matches all of the words that are represented by this string.
/// If wildcard is set to something other than INVALID_UNICHAR_ID,
/// the *'s in this string are interpreted as wildcards.
/// WERD_CHOICE param is not passed by const so that wildcard searches
/// can modify it and work without having to copy WERD_CHOICEs.
bool match_words(WERD_CHOICE *word, uint32_t index, NODE_REF node,
UNICHAR_ID wildcard) const;
// Recursively iterate over all words in a dawg (see public iterate_words).
void iterate_words_rec(
const WERD_CHOICE &word_so_far, NODE_REF to_explore,
const std::function<void(const WERD_CHOICE *)> &cb) const;
// Member Variables.
std::string lang_;
DawgType type_;
/// Permuter code that should be used if the word is found in this Dawg.
PermuterType perm_;
// Variables to construct various edge masks. Formerly:
// #define NEXT_EDGE_MASK (int64_t) 0xfffffff800000000i64
// #define FLAGS_MASK (int64_t) 0x0000000700000000i64
// #define LETTER_MASK (int64_t) 0x00000000ffffffffi64
uint64_t next_node_mask_ = 0;
uint64_t flags_mask_ = 0;
uint64_t letter_mask_ = 0;
int unicharset_size_;
int flag_start_bit_ = 0;
int next_node_start_bit_ = 0;
// Level of debug statements to print to stdout.
int debug_level_;
};
//
// DawgPosition keeps track of where we are in the primary dawg we're searching
// as well as where we may be in the "punctuation dawg" which may provide
// surrounding context.
//
// Example:
// punctuation dawg -- space is the "pattern character"
// " " // no punctuation
// "' '" // leading and trailing apostrophes
// " '" // trailing apostrophe
// word dawg:
// "cat"
// "cab"
// "cat's"
//
// DawgPosition(dawg_index, dawg_ref, punc_index, punc_ref, rtp)
//
// DawgPosition(-1, NO_EDGE, p, pe, false)
// We're in the punctuation dawg, no other dawg has been started.
// (1) If there's a pattern edge as a punc dawg child of us,
// for each punc-following dawg starting with ch, produce:
// Result: DawgPosition(k, w, p', false)
// (2) If there's a valid continuation in the punc dawg, produce:
// Result: DawgPosition(-k, NO_EDGE, p', false)
//
// DawgPosition(k, w, -1, NO_EDGE, false)
// We're in dawg k. Going back to punctuation dawg is not an option.
// Follow ch in dawg k.
//
// DawgPosition(k, w, p, pe, false)
// We're in dawg k. Continue in dawg k and/or go back to the punc dawg.
// If ending, check that the punctuation dawg is also ok to end here.
//
// DawgPosition(k, w, p, pe true)
// We're back in the punctuation dawg. Continuing there is the only option.
struct DawgPosition {
DawgPosition() = default;
DawgPosition(int dawg_idx, EDGE_REF dawgref, int punc_idx, EDGE_REF puncref,
bool backtopunc)
: dawg_ref(dawgref),
punc_ref(puncref),
dawg_index(dawg_idx),
punc_index(punc_idx),
back_to_punc(backtopunc) {}
bool operator==(const DawgPosition &other) const {
return dawg_index == other.dawg_index && dawg_ref == other.dawg_ref &&
punc_index == other.punc_index && punc_ref == other.punc_ref &&
back_to_punc == other.back_to_punc;
}
EDGE_REF dawg_ref = NO_EDGE;
EDGE_REF punc_ref = NO_EDGE;
int8_t dawg_index = -1;
int8_t punc_index = -1;
// Have we returned to the punc dawg at the end of the word?
bool back_to_punc = false;
};
class DawgPositionVector : public std::vector<DawgPosition> {
public:
/// Adds an entry for the given dawg_index with the given node to the vec.
/// Returns false if the same entry already exists in the vector,
/// true otherwise.
inline bool add_unique(const DawgPosition &new_pos, bool debug,
const char *debug_msg) {
for (auto &&position : *this) {
if (position == new_pos) {
return false;
}
}
push_back(new_pos);
if (debug) {
tprintf("%s[%d, " REFFORMAT "] [punc: " REFFORMAT "%s]\n", debug_msg,
new_pos.dawg_index, new_pos.dawg_ref, new_pos.punc_ref,
new_pos.back_to_punc ? " returned" : "");
}
return true;
}
};
//
/// Concrete class that can operate on a compacted (squished) Dawg (read,
/// search and write to file). This class is read-only in the sense that
/// new words cannot be added to an instance of SquishedDawg.
/// The underlying representation of the nodes and edges in SquishedDawg
/// is stored as a contiguous EDGE_ARRAY (read from file or given as an
/// argument to the constructor).
//
class TESS_API SquishedDawg : public Dawg {
public:
SquishedDawg(DawgType type, const std::string &lang, PermuterType perm,
int debug_level)
: Dawg(type, lang, perm, debug_level) {}
SquishedDawg(const char *filename, DawgType type, const std::string &lang,
PermuterType perm, int debug_level)
: Dawg(type, lang, perm, debug_level) {
TFile file;
ASSERT_HOST(file.Open(filename, nullptr));
ASSERT_HOST(read_squished_dawg(&file));
num_forward_edges_in_node0 = num_forward_edges(0);
}
SquishedDawg(EDGE_ARRAY edges, int num_edges, DawgType type,
const std::string &lang, PermuterType perm, int unicharset_size,
int debug_level)
: Dawg(type, lang, perm, debug_level),
edges_(edges),
num_edges_(num_edges) {
init(unicharset_size);
num_forward_edges_in_node0 = num_forward_edges(0);
if (debug_level > 3) {
print_all("SquishedDawg:");
}
}
~SquishedDawg() override;
// Loads using the given TFile. Returns false on failure.
bool Load(TFile *fp) {
if (!read_squished_dawg(fp)) {
return false;
}
num_forward_edges_in_node0 = num_forward_edges(0);
return true;
}
int NumEdges() {
return num_edges_;
}
/// Returns the edge that corresponds to the letter out of this node.
EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id,
bool word_end) const override;
/// Fills the given NodeChildVector with all the unichar ids (and the
/// corresponding EDGE_REFs) for which there is an edge out of this node.
void unichar_ids_of(NODE_REF node, NodeChildVector *vec,
bool word_end) const override {
EDGE_REF edge = node;
if (!edge_occupied(edge) || edge == NO_EDGE) {
return;
}
assert(forward_edge(edge)); // we don't expect any backward edges to
do { // be present when this function is called
if (!word_end || end_of_word_from_edge_rec(edges_[edge])) {
vec->push_back(NodeChild(unichar_id_from_edge_rec(edges_[edge]), edge));
}
} while (!last_edge(edge++));
}
/// Returns the next node visited by following the edge
/// indicated by the given EDGE_REF.
NODE_REF next_node(EDGE_REF edge) const override {
return next_node_from_edge_rec((edges_[edge]));
}
/// Returns true if the edge indicated by the given EDGE_REF
/// marks the end of a word.
bool end_of_word(EDGE_REF edge_ref) const override {
return end_of_word_from_edge_rec((edges_[edge_ref]));
}
/// Returns UNICHAR_ID stored in the edge indicated by the given EDGE_REF.
UNICHAR_ID edge_letter(EDGE_REF edge_ref) const override {
return unichar_id_from_edge_rec((edges_[edge_ref]));
}
/// Prints the contents of the node indicated by the given NODE_REF.
/// At most max_num_edges will be printed.
void print_node(NODE_REF node, int max_num_edges) const override;
/// Writes the squished/reduced Dawg to a file.
bool write_squished_dawg(TFile *file);
/// Opens the file with the given filename and writes the
/// squished/reduced Dawg to the file.
bool write_squished_dawg(const char *filename) {
TFile file;
file.OpenWrite(nullptr);
if (!this->write_squished_dawg(&file)) {
tprintf("Error serializing %s\n", filename);
return false;
}
if (!file.CloseWrite(filename, nullptr)) {
tprintf("Error writing file %s\n", filename);
return false;
}
return true;
}
private:
/// Sets the next node link for this edge.
inline void set_next_node(EDGE_REF edge_ref, EDGE_REF value) {
set_next_node_in_edge_rec(&(edges_[edge_ref]), value);
}
/// Sets the edge to be empty.
inline void set_empty_edge(EDGE_REF edge_ref) {
(edges_[edge_ref] = next_node_mask_);
}
/// Goes through all the edges and clears each one out.
inline void clear_all_edges() {
for (int edge = 0; edge < num_edges_; edge++) {
set_empty_edge(edge);
}
}
/// Clears the last flag of this edge.
inline void clear_marker_flag(EDGE_REF edge_ref) {
(edges_[edge_ref] &= ~(MARKER_FLAG << flag_start_bit_));
}
/// Returns true if this edge is in the forward direction.
inline bool forward_edge(EDGE_REF edge_ref) const {
return (edge_occupied(edge_ref) &&
(FORWARD_EDGE == direction_from_edge_rec(edges_[edge_ref])));
}
/// Returns true if this edge is in the backward direction.
inline bool backward_edge(EDGE_REF edge_ref) const {
return (edge_occupied(edge_ref) &&
(BACKWARD_EDGE == direction_from_edge_rec(edges_[edge_ref])));
}
/// Returns true if the edge spot in this location is occupied.
inline bool edge_occupied(EDGE_REF edge_ref) const {
return (edges_[edge_ref] != next_node_mask_);
}
/// Returns true if this edge is the last edge in a sequence.
inline bool last_edge(EDGE_REF edge_ref) const {
return (edges_[edge_ref] & (MARKER_FLAG << flag_start_bit_)) != 0;
}
/// Counts and returns the number of forward edges in this node.
int32_t num_forward_edges(NODE_REF node) const;
/// Reads SquishedDawg from a file.
bool read_squished_dawg(TFile *file);
/// Prints the contents of an edge indicated by the given EDGE_REF.
void print_edge(EDGE_REF edge) const;
/// Prints the contents of the SquishedDawg.
void print_all(const char *msg) {
tprintf("\n__________________________\n%s\n", msg);
for (int i = 0; i < num_edges_; ++i) {
print_edge(i);
}
tprintf("__________________________\n");
}
/// Constructs a mapping from the memory node indices to disk node indices.
std::unique_ptr<EDGE_REF[]> build_node_map(int32_t *num_nodes) const;
// Member variables.
EDGE_ARRAY edges_ = nullptr;
int32_t num_edges_ = 0;
int num_forward_edges_in_node0 = 0;
};
} // namespace tesseract
#endif // DICT_DAWG_H_
|
2301_81045437/tesseract
|
src/dict/dawg.h
|
C++
|
apache-2.0
| 21,859
|
///////////////////////////////////////////////////////////////////////
// File: dawg_cache.cpp
// Description: A class that knows about loading and caching dawgs.
// Author: David Eger
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "dawg_cache.h"
#include "dawg.h"
#include "object_cache.h"
#include "tessdatamanager.h"
namespace tesseract {
struct DawgLoader {
DawgLoader(const std::string &lang, TessdataType tessdata_dawg_type, int dawg_debug_level,
TessdataManager *data_file)
: lang_(lang)
, data_file_(data_file)
, tessdata_dawg_type_(tessdata_dawg_type)
, dawg_debug_level_(dawg_debug_level) {}
Dawg *Load();
std::string lang_;
TessdataManager *data_file_;
TessdataType tessdata_dawg_type_;
int dawg_debug_level_;
};
Dawg *DawgCache::GetSquishedDawg(const std::string &lang, TessdataType tessdata_dawg_type,
int debug_level, TessdataManager *data_file) {
std::string data_id = data_file->GetDataFileName();
data_id += kTessdataFileSuffixes[tessdata_dawg_type];
DawgLoader loader(lang, tessdata_dawg_type, debug_level, data_file);
return dawgs_.Get(data_id, std::bind(&DawgLoader::Load, &loader));
}
Dawg *DawgLoader::Load() {
TFile fp;
if (!data_file_->GetComponent(tessdata_dawg_type_, &fp)) {
return nullptr;
}
DawgType dawg_type;
PermuterType perm_type;
switch (tessdata_dawg_type_) {
case TESSDATA_PUNC_DAWG:
case TESSDATA_LSTM_PUNC_DAWG:
dawg_type = DAWG_TYPE_PUNCTUATION;
perm_type = PUNC_PERM;
break;
case TESSDATA_SYSTEM_DAWG:
case TESSDATA_LSTM_SYSTEM_DAWG:
dawg_type = DAWG_TYPE_WORD;
perm_type = SYSTEM_DAWG_PERM;
break;
case TESSDATA_NUMBER_DAWG:
case TESSDATA_LSTM_NUMBER_DAWG:
dawg_type = DAWG_TYPE_NUMBER;
perm_type = NUMBER_PERM;
break;
case TESSDATA_BIGRAM_DAWG:
dawg_type = DAWG_TYPE_WORD; // doesn't actually matter
perm_type = COMPOUND_PERM; // doesn't actually matter
break;
case TESSDATA_UNAMBIG_DAWG:
dawg_type = DAWG_TYPE_WORD;
perm_type = SYSTEM_DAWG_PERM;
break;
case TESSDATA_FREQ_DAWG:
dawg_type = DAWG_TYPE_WORD;
perm_type = FREQ_DAWG_PERM;
break;
default:
return nullptr;
}
auto *retval = new SquishedDawg(dawg_type, lang_, perm_type, dawg_debug_level_);
if (retval->Load(&fp)) {
return retval;
}
delete retval;
return nullptr;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/dawg_cache.cpp
|
C++
|
apache-2.0
| 3,111
|
///////////////////////////////////////////////////////////////////////
// File: dawg_cache.h
// Description: A class that knows about loading and caching dawgs.
// Author: David Eger
// Created: Fri Jan 27 12:08:00 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_DICT_DAWG_CACHE_H_
#define TESSERACT_DICT_DAWG_CACHE_H_
#include "dawg.h"
#include "object_cache.h"
#include "tessdatamanager.h"
namespace tesseract {
class DawgCache {
public:
Dawg *GetSquishedDawg(const std::string &lang, TessdataType tessdata_dawg_type, int debug_level,
TessdataManager *data_file);
// If we manage the given dawg, decrement its count,
// and possibly delete it if the count reaches zero.
// If dawg is unknown to us, return false.
bool FreeDawg(Dawg *dawg) {
return dawgs_.Free(dawg);
}
// Free up any currently unused dawgs.
void DeleteUnusedDawgs() {
dawgs_.DeleteUnusedObjects();
}
private:
ObjectCache<Dawg> dawgs_;
};
} // namespace tesseract
#endif // TESSERACT_DICT_DAWG_CACHE_H_
|
2301_81045437/tesseract
|
src/dict/dawg_cache.h
|
C++
|
apache-2.0
| 1,686
|
///////////////////////////////////////////////////////////////////////
// File: dict.cpp
// Description: dict class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "dict.h"
#include "tprintf.h"
#include <cstdio>
namespace tesseract {
class Image;
Dict::Dict(CCUtil *ccutil)
: letter_is_okay_(&tesseract::Dict::def_letter_is_okay)
, probability_in_context_(&tesseract::Dict::def_probability_in_context)
, ccutil_(ccutil)
, wildcard_unichar_id_(INVALID_UNICHAR_ID)
, apostrophe_unichar_id_(INVALID_UNICHAR_ID)
, question_unichar_id_(INVALID_UNICHAR_ID)
, slash_unichar_id_(INVALID_UNICHAR_ID)
, hyphen_unichar_id_(INVALID_UNICHAR_ID)
, STRING_MEMBER(user_words_file, "", "A filename of user-provided words.",
getCCUtil()->params())
, STRING_INIT_MEMBER(user_words_suffix, "",
"A suffix of user-provided words located in tessdata.",
getCCUtil()->params())
, STRING_MEMBER(user_patterns_file, "", "A filename of user-provided patterns.",
getCCUtil()->params())
, STRING_INIT_MEMBER(user_patterns_suffix, "",
"A suffix of user-provided patterns located in "
"tessdata.",
getCCUtil()->params())
, BOOL_INIT_MEMBER(load_system_dawg, true, "Load system word dawg.", getCCUtil()->params())
, BOOL_INIT_MEMBER(load_freq_dawg, true, "Load frequent word dawg.", getCCUtil()->params())
, BOOL_INIT_MEMBER(load_unambig_dawg, true, "Load unambiguous word dawg.",
getCCUtil()->params())
, BOOL_INIT_MEMBER(load_punc_dawg, true,
"Load dawg with punctuation"
" patterns.",
getCCUtil()->params())
, BOOL_INIT_MEMBER(load_number_dawg, true,
"Load dawg with number"
" patterns.",
getCCUtil()->params())
, BOOL_INIT_MEMBER(load_bigram_dawg, true,
"Load dawg with special word "
"bigrams.",
getCCUtil()->params())
, double_MEMBER(xheight_penalty_subscripts, 0.125,
"Score penalty (0.1 = 10%) added if there are subscripts "
"or superscripts in a word, but it is otherwise OK.",
getCCUtil()->params())
, double_MEMBER(xheight_penalty_inconsistent, 0.25,
"Score penalty (0.1 = 10%) added if an xheight is "
"inconsistent.",
getCCUtil()->params())
, double_MEMBER(segment_penalty_dict_frequent_word, 1.0,
"Score multiplier for word matches which have good case and"
" are frequent in the given language (lower is better).",
getCCUtil()->params())
, double_MEMBER(segment_penalty_dict_case_ok, 1.1,
"Score multiplier for word matches that have good case "
"(lower is better).",
getCCUtil()->params())
, double_MEMBER(segment_penalty_dict_case_bad, 1.3125,
"Default score multiplier for word matches, which may have "
"case issues (lower is better).",
getCCUtil()->params())
, double_MEMBER(segment_penalty_dict_nonword, 1.25,
"Score multiplier for glyph fragment segmentations which "
"do not match a dictionary word (lower is better).",
getCCUtil()->params())
, double_MEMBER(segment_penalty_garbage, 1.50,
"Score multiplier for poorly cased strings that are not in"
" the dictionary and generally look like garbage (lower is"
" better).",
getCCUtil()->params())
, STRING_MEMBER(output_ambig_words_file, "",
"Output file for ambiguities found in the dictionary", getCCUtil()->params())
, INT_MEMBER(dawg_debug_level, 0,
"Set to 1 for general debug info"
", to 2 for more details, to 3 to see all the debug messages",
getCCUtil()->params())
, INT_MEMBER(hyphen_debug_level, 0, "Debug level for hyphenated words.", getCCUtil()->params())
, BOOL_MEMBER(use_only_first_uft8_step, false,
"Use only the first UTF8 step of the given string"
" when computing log probabilities.",
getCCUtil()->params())
, double_MEMBER(certainty_scale, 20.0, "Certainty scaling factor", getCCUtil()->params())
, double_MEMBER(stopper_nondict_certainty_base, -2.50, "Certainty threshold for non-dict words",
getCCUtil()->params())
, double_MEMBER(stopper_phase2_certainty_rejection_offset, 1.0, "Reject certainty offset",
getCCUtil()->params())
, INT_MEMBER(stopper_smallword_size, 2, "Size of dict word to be treated as non-dict word",
getCCUtil()->params())
, double_MEMBER(stopper_certainty_per_char, -0.50,
"Certainty to add"
" for each dict char above small word size.",
getCCUtil()->params())
, double_MEMBER(stopper_allowable_character_badness, 3.0,
"Max certainty variation allowed in a word (in sigma)", getCCUtil()->params())
, INT_MEMBER(stopper_debug_level, 0, "Stopper debug level", getCCUtil()->params())
, BOOL_MEMBER(stopper_no_acceptable_choices, false,
"Make AcceptableChoice() always return false. Useful"
" when there is a need to explore all segmentations",
getCCUtil()->params())
, INT_MEMBER(tessedit_truncate_wordchoice_log, 10, "Max words to keep in list",
getCCUtil()->params())
, STRING_MEMBER(word_to_debug, "",
"Word for which stopper debug"
" information should be printed to stdout",
getCCUtil()->params())
, BOOL_MEMBER(segment_nonalphabetic_script, false,
"Don't use any alphabetic-specific tricks."
" Set to true in the traineddata config file for"
" scripts that are cursive or inherently fixed-pitch",
getCCUtil()->params())
, BOOL_MEMBER(save_doc_words, 0, "Save Document Words", getCCUtil()->params())
, double_MEMBER(doc_dict_pending_threshold, 0.0, "Worst certainty for using pending dictionary",
getCCUtil()->params())
, double_MEMBER(doc_dict_certainty_threshold, -2.25,
"Worst certainty for words that can be inserted into the"
" document dictionary",
getCCUtil()->params())
, INT_MEMBER(max_permuter_attempts, 10000,
"Maximum number of different"
" character choices to consider during permutation."
" This limit is especially useful when user patterns"
" are specified, since overly generic patterns can result in"
" dawg search exploring an overly large number of options.",
getCCUtil()->params()) {
reject_offset_ = 0.0;
go_deeper_fxn_ = nullptr;
hyphen_word_ = nullptr;
last_word_on_line_ = false;
document_words_ = nullptr;
dawg_cache_ = nullptr;
dawg_cache_is_ours_ = false;
pending_words_ = nullptr;
bigram_dawg_ = nullptr;
freq_dawg_ = nullptr;
punc_dawg_ = nullptr;
unambig_dawg_ = nullptr;
wordseg_rating_adjust_factor_ = -1.0f;
output_ambig_words_file_ = nullptr;
}
Dict::~Dict() {
End();
delete hyphen_word_;
if (output_ambig_words_file_ != nullptr) {
fclose(output_ambig_words_file_);
}
}
DawgCache *Dict::GlobalDawgCache() {
// This global cache (a singleton) will outlive every Tesseract instance
// (even those that someone else might declare as global static variables).
static DawgCache cache;
return &cache;
}
// Sets up ready for a Load or LoadLSTM.
void Dict::SetupForLoad(DawgCache *dawg_cache) {
if (dawgs_.size() != 0) {
this->End();
}
apostrophe_unichar_id_ = getUnicharset().unichar_to_id(kApostropheSymbol);
question_unichar_id_ = getUnicharset().unichar_to_id(kQuestionSymbol);
slash_unichar_id_ = getUnicharset().unichar_to_id(kSlashSymbol);
hyphen_unichar_id_ = getUnicharset().unichar_to_id(kHyphenSymbol);
if (dawg_cache != nullptr) {
dawg_cache_ = dawg_cache;
dawg_cache_is_ours_ = false;
} else {
dawg_cache_ = new DawgCache();
dawg_cache_is_ours_ = true;
}
}
// Loads the dawgs needed by Tesseract. Call FinishLoad() after.
void Dict::Load(const std::string &lang, TessdataManager *data_file) {
// Load dawgs_.
if (load_punc_dawg) {
punc_dawg_ =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_PUNC_DAWG, dawg_debug_level, data_file);
if (punc_dawg_) {
dawgs_.push_back(punc_dawg_);
}
}
if (load_system_dawg) {
Dawg *system_dawg =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_SYSTEM_DAWG, dawg_debug_level, data_file);
if (system_dawg) {
dawgs_.push_back(system_dawg);
}
}
if (load_number_dawg) {
Dawg *number_dawg =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_NUMBER_DAWG, dawg_debug_level, data_file);
if (number_dawg) {
dawgs_.push_back(number_dawg);
}
}
if (load_bigram_dawg) {
bigram_dawg_ =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_BIGRAM_DAWG, dawg_debug_level, data_file);
// The bigram_dawg_ is NOT used like the other dawgs! DO NOT add to the
// dawgs_!!
}
if (load_freq_dawg) {
freq_dawg_ =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_FREQ_DAWG, dawg_debug_level, data_file);
if (freq_dawg_) {
dawgs_.push_back(freq_dawg_);
}
}
if (load_unambig_dawg) {
unambig_dawg_ =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_UNAMBIG_DAWG, dawg_debug_level, data_file);
if (unambig_dawg_) {
dawgs_.push_back(unambig_dawg_);
}
}
std::string name;
if (!user_words_suffix.empty() || !user_words_file.empty()) {
Trie *trie_ptr =
new Trie(DAWG_TYPE_WORD, lang, USER_DAWG_PERM, getUnicharset().size(), dawg_debug_level);
if (!user_words_file.empty()) {
name = user_words_file;
} else {
name = getCCUtil()->language_data_path_prefix;
name += user_words_suffix;
}
if (!trie_ptr->read_and_add_word_list(name.c_str(), getUnicharset(),
Trie::RRP_REVERSE_IF_HAS_RTL)) {
tprintf("Error: failed to load %s\n", name.c_str());
delete trie_ptr;
} else {
dawgs_.push_back(trie_ptr);
}
}
if (!user_patterns_suffix.empty() || !user_patterns_file.empty()) {
Trie *trie_ptr = new Trie(DAWG_TYPE_PATTERN, lang, USER_PATTERN_PERM, getUnicharset().size(),
dawg_debug_level);
trie_ptr->initialize_patterns(&(getUnicharset()));
if (!user_patterns_file.empty()) {
name = user_patterns_file;
} else {
name = getCCUtil()->language_data_path_prefix;
name += user_patterns_suffix;
}
if (!trie_ptr->read_pattern_list(name.c_str(), getUnicharset())) {
tprintf("Error: failed to load %s\n", name.c_str());
delete trie_ptr;
} else {
dawgs_.push_back(trie_ptr);
}
}
document_words_ =
new Trie(DAWG_TYPE_WORD, lang, DOC_DAWG_PERM, getUnicharset().size(), dawg_debug_level);
dawgs_.push_back(document_words_);
// This dawg is temporary and should not be searched by letter_is_ok.
pending_words_ =
new Trie(DAWG_TYPE_WORD, lang, NO_PERM, getUnicharset().size(), dawg_debug_level);
}
// Loads the dawgs needed by the LSTM model. Call FinishLoad() after.
void Dict::LoadLSTM(const std::string &lang, TessdataManager *data_file) {
// Load dawgs_.
if (load_punc_dawg) {
punc_dawg_ =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_LSTM_PUNC_DAWG, dawg_debug_level, data_file);
if (punc_dawg_) {
dawgs_.push_back(punc_dawg_);
}
}
if (load_system_dawg) {
Dawg *system_dawg =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_LSTM_SYSTEM_DAWG, dawg_debug_level, data_file);
if (system_dawg) {
dawgs_.push_back(system_dawg);
}
}
if (load_number_dawg) {
Dawg *number_dawg =
dawg_cache_->GetSquishedDawg(lang, TESSDATA_LSTM_NUMBER_DAWG, dawg_debug_level, data_file);
if (number_dawg) {
dawgs_.push_back(number_dawg);
}
}
// stolen from Dict::Load (but needs params_ from Tesseract
// langdata/config/api):
std::string name;
if (!user_words_suffix.empty() || !user_words_file.empty()) {
Trie *trie_ptr =
new Trie(DAWG_TYPE_WORD, lang, USER_DAWG_PERM, getUnicharset().size(), dawg_debug_level);
if (!user_words_file.empty()) {
name = user_words_file;
} else {
name = getCCUtil()->language_data_path_prefix;
name += user_words_suffix;
}
if (!trie_ptr->read_and_add_word_list(name.c_str(), getUnicharset(),
Trie::RRP_REVERSE_IF_HAS_RTL)) {
tprintf("Error: failed to load %s\n", name.c_str());
delete trie_ptr;
} else {
dawgs_.push_back(trie_ptr);
}
}
if (!user_patterns_suffix.empty() || !user_patterns_file.empty()) {
Trie *trie_ptr = new Trie(DAWG_TYPE_PATTERN, lang, USER_PATTERN_PERM, getUnicharset().size(),
dawg_debug_level);
trie_ptr->initialize_patterns(&(getUnicharset()));
if (!user_patterns_file.empty()) {
name = user_patterns_file;
} else {
name = getCCUtil()->language_data_path_prefix;
name += user_patterns_suffix;
}
if (!trie_ptr->read_pattern_list(name.c_str(), getUnicharset())) {
tprintf("Error: failed to load %s\n", name.c_str());
delete trie_ptr;
} else {
dawgs_.push_back(trie_ptr);
}
}
}
// Completes the loading process after Load() and/or LoadLSTM().
// Returns false if no dictionaries were loaded.
bool Dict::FinishLoad() {
if (dawgs_.empty()) {
return false;
}
// Construct a list of corresponding successors for each dawg. Each entry, i,
// in the successors_ vector is a vector of integers that represent the
// indices into the dawgs_ vector of the successors for dawg i.
successors_.reserve(dawgs_.size());
for (auto dawg : dawgs_) {
auto *lst = new SuccessorList();
for (unsigned j = 0; j < dawgs_.size(); ++j) {
const Dawg *other = dawgs_[j];
if (dawg != nullptr && other != nullptr && (dawg->lang() == other->lang()) &&
kDawgSuccessors[dawg->type()][other->type()]) {
lst->push_back(j);
}
}
successors_.push_back(lst);
}
return true;
}
void Dict::End() {
if (dawgs_.empty()) {
return; // Not safe to call twice.
}
for (auto &dawg : dawgs_) {
if (!dawg_cache_->FreeDawg(dawg)) {
delete dawg;
}
}
dawg_cache_->FreeDawg(bigram_dawg_);
if (dawg_cache_is_ours_) {
delete dawg_cache_;
dawg_cache_ = nullptr;
}
for (auto successor : successors_) {
delete successor;
}
dawgs_.clear();
successors_.clear();
document_words_ = nullptr;
delete pending_words_;
pending_words_ = nullptr;
}
// Returns true if in light of the current state unichar_id is allowed
// according to at least one of the dawgs in the dawgs_ vector.
// See more extensive comments in dict.h where this function is declared.
int Dict::def_letter_is_okay(void *void_dawg_args, const UNICHARSET &unicharset,
UNICHAR_ID unichar_id, bool word_end) const {
auto *dawg_args = static_cast<DawgArgs *>(void_dawg_args);
ASSERT_HOST(unicharset.contains_unichar_id(unichar_id));
if (dawg_debug_level >= 3) {
tprintf(
"def_letter_is_okay: current unichar=%s word_end=%d"
" num active dawgs=%zu\n",
getUnicharset().debug_str(unichar_id).c_str(), word_end, dawg_args->active_dawgs->size());
}
// Do not accept words that contain kPatternUnicharID.
// (otherwise pattern dawgs would not function correctly).
// Do not accept words containing INVALID_UNICHAR_IDs.
if (unichar_id == Dawg::kPatternUnicharID || unichar_id == INVALID_UNICHAR_ID) {
dawg_args->permuter = NO_PERM;
return NO_PERM;
}
// Initialization.
PermuterType curr_perm = NO_PERM;
dawg_args->updated_dawgs->clear();
dawg_args->valid_end = false;
// Go over the active_dawgs vector and insert DawgPosition records
// with the updated ref (an edge with the corresponding unichar id) into
// dawg_args->updated_pos.
for (unsigned a = 0; a < dawg_args->active_dawgs->size(); ++a) {
const DawgPosition &pos = (*dawg_args->active_dawgs)[a];
const Dawg *punc_dawg = pos.punc_index >= 0 ? dawgs_[pos.punc_index] : nullptr;
const Dawg *dawg = pos.dawg_index >= 0 ? dawgs_[pos.dawg_index] : nullptr;
if (!dawg && !punc_dawg) {
// shouldn't happen.
tprintf("Received DawgPosition with no dawg or punc_dawg. wth?\n");
continue;
}
if (!dawg) {
// We're in the punctuation dawg. A core dawg has not been chosen.
NODE_REF punc_node = GetStartingNode(punc_dawg, pos.punc_ref);
EDGE_REF punc_transition_edge =
punc_dawg->edge_char_of(punc_node, Dawg::kPatternUnicharID, word_end);
if (punc_transition_edge != NO_EDGE) {
// Find all successors, and see which can transition.
const SuccessorList &slist = *(successors_[pos.punc_index]);
for (int sdawg_index : slist) {
const Dawg *sdawg = dawgs_[sdawg_index];
UNICHAR_ID ch = char_for_dawg(unicharset, unichar_id, sdawg);
EDGE_REF dawg_edge = sdawg->edge_char_of(0, ch, word_end);
if (dawg_edge != NO_EDGE) {
if (dawg_debug_level >= 3) {
tprintf("Letter found in dawg %d\n", sdawg_index);
}
dawg_args->updated_dawgs->add_unique(
DawgPosition(sdawg_index, dawg_edge, pos.punc_index, punc_transition_edge, false),
dawg_debug_level > 0, "Append transition from punc dawg to current dawgs: ");
if (sdawg->permuter() > curr_perm) {
curr_perm = sdawg->permuter();
}
if (sdawg->end_of_word(dawg_edge) && punc_dawg->end_of_word(punc_transition_edge)) {
dawg_args->valid_end = true;
}
}
}
}
EDGE_REF punc_edge = punc_dawg->edge_char_of(punc_node, unichar_id, word_end);
if (punc_edge != NO_EDGE) {
if (dawg_debug_level >= 3) {
tprintf("Letter found in punctuation dawg\n");
}
dawg_args->updated_dawgs->add_unique(
DawgPosition(-1, NO_EDGE, pos.punc_index, punc_edge, false), dawg_debug_level > 0,
"Extend punctuation dawg: ");
if (PUNC_PERM > curr_perm) {
curr_perm = PUNC_PERM;
}
if (punc_dawg->end_of_word(punc_edge)) {
dawg_args->valid_end = true;
}
}
continue;
}
if (punc_dawg && dawg->end_of_word(pos.dawg_ref)) {
// We can end the main word here.
// If we can continue on the punc ref, add that possibility.
NODE_REF punc_node = GetStartingNode(punc_dawg, pos.punc_ref);
EDGE_REF punc_edge =
punc_node == NO_EDGE ? NO_EDGE : punc_dawg->edge_char_of(punc_node, unichar_id, word_end);
if (punc_edge != NO_EDGE) {
dawg_args->updated_dawgs->add_unique(
DawgPosition(pos.dawg_index, pos.dawg_ref, pos.punc_index, punc_edge, true),
dawg_debug_level > 0, "Return to punctuation dawg: ");
if (dawg->permuter() > curr_perm) {
curr_perm = dawg->permuter();
}
if (punc_dawg->end_of_word(punc_edge)) {
dawg_args->valid_end = true;
}
}
}
if (pos.back_to_punc) {
continue;
}
// If we are dealing with the pattern dawg, look up all the
// possible edges, not only for the exact unichar_id, but also
// for all its character classes (alpha, digit, etc).
if (dawg->type() == DAWG_TYPE_PATTERN) {
ProcessPatternEdges(dawg, pos, unichar_id, word_end, dawg_args, &curr_perm);
// There can't be any successors to dawg that is of type
// DAWG_TYPE_PATTERN, so we are done examining this DawgPosition.
continue;
}
// Find the edge out of the node for the unichar_id.
NODE_REF node = GetStartingNode(dawg, pos.dawg_ref);
EDGE_REF edge =
(node == NO_EDGE)
? NO_EDGE
: dawg->edge_char_of(node, char_for_dawg(unicharset, unichar_id, dawg), word_end);
if (dawg_debug_level >= 3) {
tprintf("Active dawg: [%d, " REFFORMAT "] edge=" REFFORMAT "\n", pos.dawg_index, node, edge);
}
if (edge != NO_EDGE) { // the unichar was found in the current dawg
if (dawg_debug_level >= 3) {
tprintf("Letter found in dawg %d\n", pos.dawg_index);
}
if (word_end && punc_dawg && !punc_dawg->end_of_word(pos.punc_ref)) {
if (dawg_debug_level >= 3) {
tprintf("Punctuation constraint not satisfied at end of word.\n");
}
continue;
}
if (dawg->permuter() > curr_perm) {
curr_perm = dawg->permuter();
}
if (dawg->end_of_word(edge) &&
(punc_dawg == nullptr || punc_dawg->end_of_word(pos.punc_ref))) {
dawg_args->valid_end = true;
}
dawg_args->updated_dawgs->add_unique(
DawgPosition(pos.dawg_index, edge, pos.punc_index, pos.punc_ref, false),
dawg_debug_level > 0, "Append current dawg to updated active dawgs: ");
}
} // end for
// Update dawg_args->permuter if it used to be NO_PERM or became NO_PERM
// or if we found the current letter in a non-punctuation dawg. This
// allows preserving information on which dawg the "core" word came from.
// Keep the old value of dawg_args->permuter if it is COMPOUND_PERM.
if (dawg_args->permuter == NO_PERM || curr_perm == NO_PERM ||
(curr_perm != PUNC_PERM && dawg_args->permuter != COMPOUND_PERM)) {
dawg_args->permuter = curr_perm;
}
if (dawg_debug_level >= 2) {
tprintf("Returning %d for permuter code for this character.\n", dawg_args->permuter);
}
return dawg_args->permuter;
}
void Dict::ProcessPatternEdges(const Dawg *dawg, const DawgPosition &pos, UNICHAR_ID unichar_id,
bool word_end, DawgArgs *dawg_args, PermuterType *curr_perm) const {
NODE_REF node = GetStartingNode(dawg, pos.dawg_ref);
// Try to find the edge corresponding to the exact unichar_id and to all the
// edges corresponding to the character class of unichar_id.
std::vector<UNICHAR_ID> unichar_id_patterns;
unichar_id_patterns.push_back(unichar_id);
dawg->unichar_id_to_patterns(unichar_id, getUnicharset(), &unichar_id_patterns);
for (int unichar_id_pattern : unichar_id_patterns) {
// On the first iteration check all the outgoing edges.
// On the second iteration check all self-loops.
for (int k = 0; k < 2; ++k) {
EDGE_REF edge = (k == 0)
? dawg->edge_char_of(node, unichar_id_pattern, word_end)
: dawg->pattern_loop_edge(pos.dawg_ref, unichar_id_pattern, word_end);
if (edge == NO_EDGE) {
continue;
}
if (dawg_debug_level >= 3) {
tprintf("Pattern dawg: [%d, " REFFORMAT "] edge=" REFFORMAT "\n", pos.dawg_index, node,
edge);
tprintf("Letter found in pattern dawg %d\n", pos.dawg_index);
}
if (dawg->permuter() > *curr_perm) {
*curr_perm = dawg->permuter();
}
if (dawg->end_of_word(edge)) {
dawg_args->valid_end = true;
}
dawg_args->updated_dawgs->add_unique(
DawgPosition(pos.dawg_index, edge, pos.punc_index, pos.punc_ref, pos.back_to_punc),
dawg_debug_level > 0, "Append current dawg to updated active dawgs: ");
}
}
}
// Fill the given active_dawgs vector with dawgs that could contain the
// beginning of the word. If hyphenated() returns true, copy the entries
// from hyphen_active_dawgs_ instead.
void Dict::init_active_dawgs(DawgPositionVector *active_dawgs, bool ambigs_mode) const {
if (hyphenated()) {
*active_dawgs = hyphen_active_dawgs_;
if (dawg_debug_level >= 3) {
for (unsigned i = 0; i < hyphen_active_dawgs_.size(); ++i) {
tprintf("Adding hyphen beginning dawg [%d, " REFFORMAT "]\n",
hyphen_active_dawgs_[i].dawg_index, hyphen_active_dawgs_[i].dawg_ref);
}
}
} else {
default_dawgs(active_dawgs, ambigs_mode);
}
}
void Dict::default_dawgs(DawgPositionVector *dawg_pos_vec, bool suppress_patterns) const {
bool punc_dawg_available = (punc_dawg_ != nullptr) &&
punc_dawg_->edge_char_of(0, Dawg::kPatternUnicharID, true) != NO_EDGE;
for (unsigned i = 0; i < dawgs_.size(); i++) {
if (dawgs_[i] != nullptr && !(suppress_patterns && (dawgs_[i])->type() == DAWG_TYPE_PATTERN)) {
int dawg_ty = dawgs_[i]->type();
bool subsumed_by_punc = kDawgSuccessors[DAWG_TYPE_PUNCTUATION][dawg_ty];
if (dawg_ty == DAWG_TYPE_PUNCTUATION) {
dawg_pos_vec->push_back(DawgPosition(-1, NO_EDGE, i, NO_EDGE, false));
if (dawg_debug_level >= 3) {
tprintf("Adding beginning punc dawg [%d, " REFFORMAT "]\n", i, NO_EDGE);
}
} else if (!punc_dawg_available || !subsumed_by_punc) {
dawg_pos_vec->push_back(DawgPosition(i, NO_EDGE, -1, NO_EDGE, false));
if (dawg_debug_level >= 3) {
tprintf("Adding beginning dawg [%d, " REFFORMAT "]\n", i, NO_EDGE);
}
}
}
}
}
void Dict::add_document_word(const WERD_CHOICE &best_choice) {
// Do not add hyphenated word parts to the document dawg.
// hyphen_word_ will be non-nullptr after the set_hyphen_word() is
// called when the first part of the hyphenated word is
// discovered and while the second part of the word is recognized.
// hyphen_word_ is cleared in cc_recg() before the next word on
// the line is recognized.
if (hyphen_word_) {
return;
}
int stringlen = best_choice.length();
if (valid_word(best_choice) || stringlen < 2) {
return;
}
// Discard words that contain >= kDocDictMaxRepChars repeating unichars.
if (best_choice.length() >= kDocDictMaxRepChars) {
int num_rep_chars = 1;
UNICHAR_ID uch_id = best_choice.unichar_id(0);
for (unsigned i = 1; i < best_choice.length(); ++i) {
if (best_choice.unichar_id(i) != uch_id) {
num_rep_chars = 1;
uch_id = best_choice.unichar_id(i);
} else {
++num_rep_chars;
if (num_rep_chars == kDocDictMaxRepChars) {
return;
}
}
}
}
if (best_choice.certainty() < doc_dict_certainty_threshold || stringlen == 2) {
if (best_choice.certainty() < doc_dict_pending_threshold) {
return;
}
if (!pending_words_->word_in_dawg(best_choice)) {
if (stringlen > 2 ||
(stringlen == 2 && getUnicharset().get_isupper(best_choice.unichar_id(0)) &&
getUnicharset().get_isupper(best_choice.unichar_id(1)))) {
pending_words_->add_word_to_dawg(best_choice);
}
return;
}
}
if (save_doc_words) {
std::string filename(getCCUtil()->imagefile);
filename += ".doc";
FILE *doc_word_file = fopen(filename.c_str(), "a");
if (doc_word_file == nullptr) {
tprintf("Error: Could not open file %s\n", filename.c_str());
ASSERT_HOST(doc_word_file);
}
fprintf(doc_word_file, "%s\n", best_choice.debug_string().c_str());
fclose(doc_word_file);
}
document_words_->add_word_to_dawg(best_choice);
}
void Dict::adjust_word(WERD_CHOICE *word, bool nonword, XHeightConsistencyEnum xheight_consistency,
float additional_adjust, bool modify_rating, bool debug) {
bool is_han = (getUnicharset().han_sid() != getUnicharset().null_sid() &&
word->GetTopScriptID() == getUnicharset().han_sid());
bool case_is_ok = (is_han || case_ok(*word));
bool punc_is_ok = (is_han || !nonword || valid_punctuation(*word));
float adjust_factor = additional_adjust;
float new_rating = word->rating();
new_rating += kRatingPad;
const char *xheight_triggered = "";
if (word->length() > 1) {
// Calculate x-height and y-offset consistency penalties.
switch (xheight_consistency) {
case XH_INCONSISTENT:
adjust_factor += xheight_penalty_inconsistent;
xheight_triggered = ", xhtBAD";
break;
case XH_SUBNORMAL:
adjust_factor += xheight_penalty_subscripts;
xheight_triggered = ", xhtSUB";
break;
case XH_GOOD:
// leave the factor alone - all good!
break;
}
// TODO(eger): if nonword is true, but there is a "core" that is a dict
// word, negate nonword status.
} else {
if (debug) {
tprintf("Consistency could not be calculated.\n");
}
}
if (debug) {
tprintf("%sWord: %s %4.2f%s", nonword ? "Non-" : "", word->unichar_string().c_str(),
word->rating(), xheight_triggered);
}
if (nonword) { // non-dictionary word
if (case_is_ok && punc_is_ok) {
adjust_factor += segment_penalty_dict_nonword;
new_rating *= adjust_factor;
if (debug) {
tprintf(", W");
}
} else {
adjust_factor += segment_penalty_garbage;
new_rating *= adjust_factor;
if (debug) {
if (!case_is_ok) {
tprintf(", C");
}
if (!punc_is_ok) {
tprintf(", P");
}
}
}
} else { // dictionary word
if (case_is_ok) {
if (!is_han && freq_dawg_ != nullptr && freq_dawg_->word_in_dawg(*word)) {
word->set_permuter(FREQ_DAWG_PERM);
adjust_factor += segment_penalty_dict_frequent_word;
new_rating *= adjust_factor;
if (debug) {
tprintf(", F");
}
} else {
adjust_factor += segment_penalty_dict_case_ok;
new_rating *= adjust_factor;
if (debug) {
tprintf(", ");
}
}
} else {
adjust_factor += segment_penalty_dict_case_bad;
new_rating *= adjust_factor;
if (debug) {
tprintf(", C");
}
}
}
new_rating -= kRatingPad;
if (modify_rating) {
word->set_rating(new_rating);
}
if (debug) {
tprintf(" %4.2f --> %4.2f\n", adjust_factor, new_rating);
}
word->set_adjust_factor(adjust_factor);
}
int Dict::valid_word(const WERD_CHOICE &word, bool numbers_ok) const {
const WERD_CHOICE *word_ptr = &word;
WERD_CHOICE temp_word(word.unicharset());
if (hyphenated() && hyphen_word_->unicharset() == word.unicharset()) {
copy_hyphen_info(&temp_word);
temp_word += word;
word_ptr = &temp_word;
}
if (word_ptr->empty()) {
return NO_PERM;
}
// Allocate vectors for holding current and updated
// active_dawgs and initialize them.
DawgPositionVector active_dawgs[2];
init_active_dawgs(&(active_dawgs[0]), false);
DawgArgs dawg_args(&(active_dawgs[0]), &(active_dawgs[1]), NO_PERM);
int last_index = word_ptr->length() - 1;
// Call letter_is_okay for each letter in the word.
for (int i = hyphen_base_size(); i <= last_index; ++i) {
if (!((this->*letter_is_okay_)(&dawg_args, *word_ptr->unicharset(), word_ptr->unichar_id(i),
i == last_index))) {
break;
}
// Swap active_dawgs, constraints with the corresponding updated vector.
if (dawg_args.updated_dawgs == &(active_dawgs[1])) {
dawg_args.updated_dawgs = &(active_dawgs[0]);
++(dawg_args.active_dawgs);
} else {
++(dawg_args.updated_dawgs);
dawg_args.active_dawgs = &(active_dawgs[0]);
}
}
return valid_word_permuter(dawg_args.permuter, numbers_ok) ? dawg_args.permuter : NO_PERM;
}
bool Dict::valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) const {
if (bigram_dawg_ == nullptr) {
return false;
}
// Extract the core word from the middle of each word with any digits
// replaced with question marks.
unsigned w1start, w1end, w2start, w2end;
word1.punct_stripped(&w1start, &w1end);
word2.punct_stripped(&w2start, &w2end);
// We don't want to penalize a single guillemet, hyphen, etc.
// But our bigram list doesn't have any information about punctuation.
if (w1start >= w1end) {
return word1.length() < 3;
}
if (w2start >= w2end) {
return word2.length() < 3;
}
const UNICHARSET &uchset = getUnicharset();
std::vector<UNICHAR_ID> bigram_string;
bigram_string.reserve(w1end + w2end + 1);
for (auto i = w1start; i < w1end; i++) {
const auto &normed_ids = getUnicharset().normed_ids(word1.unichar_id(i));
if (normed_ids.size() == 1 && uchset.get_isdigit(normed_ids[0])) {
bigram_string.push_back(question_unichar_id_);
} else {
bigram_string.insert(bigram_string.end(), normed_ids.begin(), normed_ids.end());
}
}
bigram_string.push_back(UNICHAR_SPACE);
for (auto i = w2start; i < w2end; i++) {
const auto &normed_ids = getUnicharset().normed_ids(word2.unichar_id(i));
if (normed_ids.size() == 1 && uchset.get_isdigit(normed_ids[0])) {
bigram_string.push_back(question_unichar_id_);
} else {
bigram_string.insert(bigram_string.end(), normed_ids.begin(), normed_ids.end());
}
}
WERD_CHOICE normalized_word(&uchset, bigram_string.size());
for (int i : bigram_string) {
normalized_word.append_unichar_id_space_allocated(i, 1, 0.0f, 0.0f);
}
return bigram_dawg_->word_in_dawg(normalized_word);
}
bool Dict::valid_punctuation(const WERD_CHOICE &word) {
if (word.empty()) {
return NO_PERM;
}
WERD_CHOICE new_word(word.unicharset());
auto last_index = word.length() - 1;
int new_len;
for (unsigned i = 0; i <= last_index; ++i) {
UNICHAR_ID unichar_id = (word.unichar_id(i));
if (getUnicharset().get_ispunctuation(unichar_id)) {
new_word.append_unichar_id(unichar_id, 1, 0.0, 0.0);
} else if (!getUnicharset().get_isalpha(unichar_id) &&
!getUnicharset().get_isdigit(unichar_id)) {
return false; // neither punc, nor alpha, nor digit
} else if ((new_len = new_word.length()) == 0 ||
new_word.unichar_id(new_len - 1) != Dawg::kPatternUnicharID) {
new_word.append_unichar_id(Dawg::kPatternUnicharID, 1, 0.0, 0.0);
}
}
for (unsigned i = 0; i < dawgs_.size(); ++i) {
if (dawgs_[i] != nullptr && dawgs_[i]->type() == DAWG_TYPE_PUNCTUATION &&
dawgs_[i]->word_in_dawg(new_word)) {
return true;
}
}
return false;
}
/// Returns true if the language is space-delimited (not CJ, or T).
bool Dict::IsSpaceDelimitedLang() const {
const UNICHARSET &u_set = getUnicharset();
if (u_set.han_sid() > 0) {
return false;
}
if (u_set.katakana_sid() > 0) {
return false;
}
if (u_set.thai_sid() > 0) {
return false;
}
return true;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/dict.cpp
|
C++
|
apache-2.0
| 35,739
|
///////////////////////////////////////////////////////////////////////
// File: dict.h
// Description: dict class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_DICT_DICT_H_
#define TESSERACT_DICT_DICT_H_
#ifdef HAVE_CONFIG_H
# include "config_auto.h" // DISABLED_LEGACY_ENGINE
#endif
#ifndef DISABLED_LEGACY_ENGINE
# include "ambigs.h"
#endif
#include "dawg.h"
#include "dawg_cache.h"
#include "ratngs.h"
#include "stopper.h"
#include "trie.h"
#include "unicharset.h"
#ifndef DISABLED_LEGACY_ENGINE
# include "params_training_featdef.h"
#endif // ndef DISABLED_LEGACY_ENGINE
namespace tesseract {
class MATRIX;
class WERD_RES;
#define CHARS_PER_LINE 500
#define MAX_WERD_LENGTH (int64_t)128
#define NO_RATING -1
/** Struct used to hold temporary information about fragments. */
struct CHAR_FRAGMENT_INFO {
UNICHAR_ID unichar_id;
const CHAR_FRAGMENT *fragment;
int num_fragments;
float rating;
float certainty;
};
using DawgVector = std::vector<Dawg *>;
//
// Constants
//
static const int kRatingPad = 4;
static const int kDictMaxWildcards = 2; // max wildcards for a word
// TODO(daria): If hyphens are different in different languages and can be
// inferred from training data we should load their values dynamically.
static const char kHyphenSymbol[] = "-";
static const char kSlashSymbol[] = "/";
static const char kQuestionSymbol[] = "?";
static const char kApostropheSymbol[] = "'";
static const float kSimCertaintyScale = -10.0; // similarity matcher scaling
static const float kSimCertaintyOffset = -10.0; // similarity matcher offset
static const float kSimilarityFloor = 100.0; // worst E*L product to stop on
static const int kDocDictMaxRepChars = 4;
// Enum for describing whether the x-height for the word is consistent:
// 0 - everything is good.
// 1 - there are one or two secondary (but consistent) baselines
// [think subscript and superscript], or there is an oversized
// first character.
// 2 - the word is inconsistent.
enum XHeightConsistencyEnum { XH_GOOD, XH_SUBNORMAL, XH_INCONSISTENT };
struct DawgArgs {
DawgArgs(DawgPositionVector *d, DawgPositionVector *up, PermuterType p)
: active_dawgs(d), updated_dawgs(up), permuter(p), valid_end(false) {}
DawgPositionVector *active_dawgs;
DawgPositionVector *updated_dawgs;
PermuterType permuter;
// True if the current position is a valid word end.
bool valid_end;
};
class TESS_API Dict {
public:
Dict(CCUtil *image_ptr);
~Dict();
const CCUtil *getCCUtil() const {
return ccutil_;
}
CCUtil *getCCUtil() {
return ccutil_;
}
const UNICHARSET &getUnicharset() const {
return getCCUtil()->unicharset;
}
UNICHARSET &getUnicharset() {
return getCCUtil()->unicharset;
}
#ifndef DISABLED_LEGACY_ENGINE
const UnicharAmbigs &getUnicharAmbigs() const {
return getCCUtil()->unichar_ambigs;
}
#endif
// Returns true if unichar_id is a word compounding character like - or /.
inline bool compound_marker(UNICHAR_ID unichar_id) {
const UNICHARSET &unicharset = getUnicharset();
ASSERT_HOST(unicharset.contains_unichar_id(unichar_id));
const auto &normed_ids = unicharset.normed_ids(unichar_id);
return normed_ids.size() == 1 &&
(normed_ids[0] == hyphen_unichar_id_ || normed_ids[0] == slash_unichar_id_);
}
// Returns true if unichar_id is an apostrophe-like character that may
// separate prefix/suffix words from a main body word.
inline bool is_apostrophe(UNICHAR_ID unichar_id) {
const UNICHARSET &unicharset = getUnicharset();
ASSERT_HOST(unicharset.contains_unichar_id(unichar_id));
const auto &normed_ids = unicharset.normed_ids(unichar_id);
return normed_ids.size() == 1 && normed_ids[0] == apostrophe_unichar_id_;
}
/* hyphen.cpp ************************************************************/
/// Returns true if we've recorded the beginning of a hyphenated word.
inline bool hyphenated() const {
return !last_word_on_line_ && hyphen_word_;
}
/// Size of the base word (the part on the line before) of a hyphenated word.
inline int hyphen_base_size() const {
return this->hyphenated() ? hyphen_word_->length() : 0;
}
/// If this word is hyphenated copy the base word (the part on
/// the line before) of a hyphenated word into the given word.
/// This function assumes that word is not nullptr.
inline void copy_hyphen_info(WERD_CHOICE *word) const {
if (this->hyphenated()) {
*word = *hyphen_word_;
if (hyphen_debug_level) {
word->print("copy_hyphen_info: ");
}
}
}
/// Check whether the word has a hyphen at the end.
inline bool has_hyphen_end(const UNICHARSET *unicharset, UNICHAR_ID unichar_id,
bool first_pos) const {
if (!last_word_on_line_ || first_pos) {
return false;
}
ASSERT_HOST(unicharset->contains_unichar_id(unichar_id));
const auto &normed_ids = unicharset->normed_ids(unichar_id);
return normed_ids.size() == 1 && normed_ids[0] == hyphen_unichar_id_;
}
/// Same as above, but check the unichar at the end of the word.
inline bool has_hyphen_end(const WERD_CHOICE &word) const {
int word_index = word.length() - 1;
return has_hyphen_end(word.unicharset(), word.unichar_id(word_index), word_index == 0);
}
/// Unless the previous word was the last one on the line, and the current
/// one is not (thus it is the first one on the line), erase hyphen_word_,
/// clear hyphen_active_dawgs_, update last_word_on_line_.
void reset_hyphen_vars(bool last_word_on_line);
/// Update hyphen_word_, and copy the given DawgPositionVectors into
/// hyphen_active_dawgs_ .
void set_hyphen_word(const WERD_CHOICE &word, const DawgPositionVector &active_dawgs);
/* permdawg.cpp ************************************************************/
// Note: Functions in permdawg.cpp are only used by NoDangerousAmbig().
// When this function is refactored, permdawg.cpp can be removed.
/// Copies word into best_choice if its rating is smaller
/// than that of best_choice.
inline void update_best_choice(const WERD_CHOICE &word, WERD_CHOICE *best_choice) {
if (word.rating() < best_choice->rating()) {
*best_choice = word;
}
}
/// Fill the given active_dawgs vector with dawgs that could contain the
/// beginning of the word. If hyphenated() returns true, copy the entries
/// from hyphen_active_dawgs_ instead.
void init_active_dawgs(DawgPositionVector *active_dawgs, bool ambigs_mode) const;
// Fill the given vector with the default collection of any-length dawgs
void default_dawgs(DawgPositionVector *anylength_dawgs, bool suppress_patterns) const;
/// Recursively explore all the possible character combinations in
/// the given char_choices. Use go_deeper_dawg_fxn() to explore all the
/// dawgs in the dawgs_ vector in parallel and discard invalid words.
///
/// Allocate and return a WERD_CHOICE with the best valid word found.
WERD_CHOICE *dawg_permute_and_select(const BLOB_CHOICE_LIST_VECTOR &char_choices,
float rating_limit);
/// If the choice being composed so far could be a dictionary word
/// and we have not reached the end of the word keep exploring the
/// char_choices further.
void go_deeper_dawg_fxn(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info,
bool word_ending, WERD_CHOICE *word, float certainties[], float *limit,
WERD_CHOICE *best_choice, int *attempts_left, void *void_more_args);
/// Pointer to go_deeper function.
void (Dict::*go_deeper_fxn_)(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info,
bool word_ending, WERD_CHOICE *word, float certainties[],
float *limit, WERD_CHOICE *best_choice, int *attempts_left,
void *void_more_args);
//
// Helper functions for dawg_permute_and_select().
//
void permute_choices(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info,
WERD_CHOICE *word, float certainties[], float *limit,
WERD_CHOICE *best_choice, int *attempts_left, void *more_args);
void append_choices(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
const BLOB_CHOICE &blob_choice, int char_choice_index,
const CHAR_FRAGMENT_INFO *prev_char_frag_info, WERD_CHOICE *word,
float certainties[], float *limit, WERD_CHOICE *best_choice,
int *attempts_left, void *more_args);
bool fragment_state_okay(UNICHAR_ID curr_unichar_id, float curr_rating, float curr_certainty,
const CHAR_FRAGMENT_INFO *prev_char_frag_info, const char *debug,
int word_ending, CHAR_FRAGMENT_INFO *char_frag_info);
/* stopper.cpp *************************************************************/
#if !defined(DISABLED_LEGACY_ENGINE)
bool NoDangerousAmbig(WERD_CHOICE *BestChoice, DANGERR *fixpt, bool fix_replaceable,
MATRIX *ratings);
#endif // !defined(DISABLED_LEGACY_ENGINE)
// Replaces the corresponding wrong ngram in werd_choice with the correct
// one. The whole correct n-gram is inserted into the ratings matrix and
// the werd_choice: no more fragments!. Rating and certainty of new entries
// in matrix and werd_choice are the sum and mean of the wrong ngram
// respectively.
// E.g. for werd_choice mystring'' and ambiguity ''->": werd_choice becomes
// mystring", with a new entry in the ratings matrix for ".
void ReplaceAmbig(int wrong_ngram_begin_index, int wrong_ngram_size, UNICHAR_ID correct_ngram_id,
WERD_CHOICE *werd_choice, MATRIX *ratings);
/// Returns the length of the shortest alpha run in WordChoice.
int LengthOfShortestAlphaRun(const WERD_CHOICE &WordChoice) const;
/// Returns true if the certainty of the BestChoice word is within a
/// reasonable range of the average certainties for the best choices for
/// each character in the segmentation. This test is used to catch words
/// in which one character is much worse than the other characters in the
/// word (i.e. false will be returned in that case). The algorithm computes
/// the mean and std deviation of the certainties in the word with the worst
/// certainty thrown out.
int UniformCertainties(const WERD_CHOICE &word);
/// Returns true if the given best_choice is good enough to stop.
bool AcceptableChoice(const WERD_CHOICE &best_choice, XHeightConsistencyEnum xheight_consistency);
/// Returns false if the best choice for the current word is questionable
/// and should be tried again on the second pass or should be flagged to
/// the user.
bool AcceptableResult(WERD_RES *word) const;
#if !defined(DISABLED_LEGACY_ENGINE)
void EndDangerousAmbigs();
#endif // !defined(DISABLED_LEGACY_ENGINE)
/// Prints the current choices for this word to stdout.
void DebugWordChoices();
/// Sets up stopper variables in preparation for the first pass.
void SettupStopperPass1();
/// Sets up stopper variables in preparation for the second pass.
void SettupStopperPass2();
/* context.cpp *************************************************************/
/// Check a string to see if it matches a set of lexical rules.
int case_ok(const WERD_CHOICE &word) const;
/// Returns true if the word looks like an absolute garbage
/// (e.g. image mistakenly recognized as text).
bool absolute_garbage(const WERD_CHOICE &word, const UNICHARSET &unicharset);
/* dict.cpp ****************************************************************/
/// Initialize Dict class - load dawgs from [lang].traineddata and
/// user-specified wordlist and parttern list.
static DawgCache *GlobalDawgCache();
// Sets up ready for a Load or LoadLSTM.
void SetupForLoad(DawgCache *dawg_cache);
// Loads the dawgs needed by Tesseract. Call FinishLoad() after.
void Load(const std::string &lang, TessdataManager *data_file);
// Loads the dawgs needed by the LSTM model. Call FinishLoad() after.
void LoadLSTM(const std::string &lang, TessdataManager *data_file);
// Completes the loading process after Load() and/or LoadLSTM().
// Returns false if no dictionaries were loaded.
bool FinishLoad();
void End();
// Resets the document dictionary analogous to ResetAdaptiveClassifier.
void ResetDocumentDictionary() {
if (pending_words_ != nullptr) {
pending_words_->clear();
}
if (document_words_ != nullptr) {
document_words_->clear();
}
}
/**
* Returns the maximal permuter code (from ccstruct/ratngs.h) if in light
* of the current state the letter at word_index in the given word
* is allowed according to at least one of the dawgs in dawgs_,
* otherwise returns NO_PERM.
*
* The state is described by void_dawg_args, which are interpreted as
* DawgArgs and contain relevant active dawg positions.
* Each entry in the active_dawgs vector contains an index
* into the dawgs_ vector and an EDGE_REF that indicates the last edge
* followed in the dawg. It also may contain a position in the punctuation
* dawg which describes surrounding punctuation (see struct DawgPosition).
*
* Input:
* At word_index 0 dawg_args->active_dawgs should contain an entry for each
* dawg that may start at the beginning of a word, with punc_ref and edge_ref
* initialized to NO_EDGE. Since the punctuation dawg includes the empty
* pattern " " (meaning anything without surrounding punctuation), having a
* single entry for the punctuation dawg will cover all dawgs reachable
* there from -- that includes all number and word dawgs. The only dawg
* non-reachable from the punctuation_dawg is the pattern dawg.
* If hyphen state needs to be applied, initial dawg_args->active_dawgs can
* be copied from the saved hyphen state (maintained by Dict).
* For word_index > 0 the corresponding state (active_dawgs and punc position)
* can be obtained from dawg_args->updated_dawgs passed to
* def_letter_is_okay for word_index-1.
* Note: the function assumes that active_dawgs, and updated_dawgs
* member variables of dawg_args are not nullptr.
*
* Output:
* The function fills in dawg_args->updated_dawgs vector with the
* entries for dawgs that contain the word up to the letter at word_index.
*
*/
//
int def_letter_is_okay(void *void_dawg_args, const UNICHARSET &unicharset, UNICHAR_ID unichar_id,
bool word_end) const;
int (Dict::*letter_is_okay_)(void *void_dawg_args, const UNICHARSET &unicharset,
UNICHAR_ID unichar_id, bool word_end) const;
/// Calls letter_is_okay_ member function.
int LetterIsOkay(void *void_dawg_args, const UNICHARSET &unicharset, UNICHAR_ID unichar_id,
bool word_end) const {
return (this->*letter_is_okay_)(void_dawg_args, unicharset, unichar_id, word_end);
}
/// Probability in context function used by the ngram permuter.
double (Dict::*probability_in_context_)(const char *lang, const char *context, int context_bytes,
const char *character, int character_bytes);
/// Calls probability_in_context_ member function.
double ProbabilityInContext(const char *context, int context_bytes, const char *character,
int character_bytes) {
return (this->*probability_in_context_)(getCCUtil()->lang.c_str(), context, context_bytes,
character, character_bytes);
}
/// Default (no-op) implementation of probability in context function.
double def_probability_in_context(const char *lang, const char *context, int context_bytes,
const char *character, int character_bytes) {
(void)lang;
(void)context;
(void)context_bytes;
(void)character;
(void)character_bytes;
return 0.0;
}
inline void SetWildcardID(UNICHAR_ID id) {
wildcard_unichar_id_ = id;
}
inline UNICHAR_ID WildcardID() const {
return wildcard_unichar_id_;
}
/// Return the number of dawgs in the dawgs_ vector.
inline int NumDawgs() const {
return dawgs_.size();
}
/// Return i-th dawg pointer recorded in the dawgs_ vector.
inline const Dawg *GetDawg(int index) const {
return dawgs_[index];
}
/// Return the points to the punctuation dawg.
inline const Dawg *GetPuncDawg() const {
return punc_dawg_;
}
/// Return the points to the unambiguous words dawg.
inline const Dawg *GetUnambigDawg() const {
return unambig_dawg_;
}
/// Returns the appropriate next node given the EDGE_REF.
static inline NODE_REF GetStartingNode(const Dawg *dawg, EDGE_REF edge_ref) {
if (edge_ref == NO_EDGE) {
return 0; // beginning to explore the dawg
}
NODE_REF node = dawg->next_node(edge_ref);
if (node == 0) {
node = NO_EDGE; // end of word
}
return node;
}
// Given a unichar from a string and a given dawg, return the unichar
// we should use to match in that dawg type. (for example, in the number
// dawg, all numbers are transformed to kPatternUnicharId).
UNICHAR_ID char_for_dawg(const UNICHARSET &unicharset, UNICHAR_ID ch, const Dawg *dawg) const {
if (!dawg) {
return ch;
}
switch (dawg->type()) {
case DAWG_TYPE_NUMBER:
return unicharset.get_isdigit(ch) ? Dawg::kPatternUnicharID : ch;
default:
return ch;
}
}
/// For each of the character classes of the given unichar_id (and the
/// unichar_id itself) finds the corresponding outgoing node or self-loop
/// in the given dawg and (after checking that it is valid) records it in
/// dawg_args->updated_ative_dawgs. Updates current_permuter if any valid
/// edges were found.
void ProcessPatternEdges(const Dawg *dawg, const DawgPosition &info, UNICHAR_ID unichar_id,
bool word_end, DawgArgs *dawg_args,
PermuterType *current_permuter) const;
/// Read/Write/Access special purpose dawgs which contain words
/// only of a certain length (used for phrase search for
/// non-space-delimited languages).
/// Check all the DAWGs to see if this word is in any of them.
inline static bool valid_word_permuter(uint8_t perm, bool numbers_ok) {
return (perm == SYSTEM_DAWG_PERM || perm == FREQ_DAWG_PERM || perm == DOC_DAWG_PERM ||
perm == USER_DAWG_PERM || perm == USER_PATTERN_PERM || perm == COMPOUND_PERM ||
(numbers_ok && perm == NUMBER_PERM));
}
int valid_word(const WERD_CHOICE &word, bool numbers_ok) const;
int valid_word(const WERD_CHOICE &word) const {
return valid_word(word, false); // return NO_PERM for words with digits
}
int valid_word_or_number(const WERD_CHOICE &word) const {
return valid_word(word, true); // return NUMBER_PERM for valid numbers
}
/// This function is used by api/tesseract_cube_combiner.cpp
int valid_word(const char *string) const {
WERD_CHOICE word(string, getUnicharset());
return valid_word(word);
}
// Do the two WERD_CHOICEs form a meaningful bigram?
bool valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) const;
/// Returns true if the word contains a valid punctuation pattern.
/// Note: Since the domains of punctuation symbols and symblos
/// used in numbers are not disjoint, a valid number might contain
/// an invalid punctuation pattern (e.g. .99).
bool valid_punctuation(const WERD_CHOICE &word);
/// Returns true if a good answer is found for the unknown blob rating.
int good_choice(const WERD_CHOICE &choice);
/// Adds a word found on this document to the document specific dictionary.
void add_document_word(const WERD_CHOICE &best_choice);
/// Adjusts the rating of the given word.
void adjust_word(WERD_CHOICE *word, bool nonword, XHeightConsistencyEnum xheight_consistency,
float additional_adjust, bool modify_rating, bool debug);
/// Set wordseg_rating_adjust_factor_ to the given value.
inline void SetWordsegRatingAdjustFactor(float f) {
wordseg_rating_adjust_factor_ = f;
}
/// Returns true if the language is space-delimited (not CJ, or T).
bool IsSpaceDelimitedLang() const;
private:
/** Private member variables. */
CCUtil *ccutil_;
/**
* Table that stores ambiguities computed during training
* (loaded when NoDangerousAmbigs() is called for the first time).
* Each entry i in the table stores a set of amibiguities whose
* wrong ngram starts with unichar id i.
*/
#ifndef DISABLED_LEGACY_ENGINE
UnicharAmbigs *dang_ambigs_table_ = nullptr;
/** Same as above, but for ambiguities with replace flag set. */
UnicharAmbigs *replace_ambigs_table_ = nullptr;
#endif
/** Additional certainty padding allowed before a word is rejected. */
float reject_offset_;
// Cached UNICHAR_IDs:
UNICHAR_ID wildcard_unichar_id_; // kDictWildcard.
UNICHAR_ID apostrophe_unichar_id_; // kApostropheSymbol.
UNICHAR_ID question_unichar_id_; // kQuestionSymbol.
UNICHAR_ID slash_unichar_id_; // kSlashSymbol.
UNICHAR_ID hyphen_unichar_id_; // kHyphenSymbol.
// Hyphen-related variables.
WERD_CHOICE *hyphen_word_;
DawgPositionVector hyphen_active_dawgs_;
bool last_word_on_line_;
// List of lists of "equivalent" UNICHAR_IDs for the purposes of dictionary
// matching. The first member of each list is taken as canonical. For
// example, the first list contains hyphens and dashes with the first symbol
// being the ASCII hyphen minus.
std::vector<std::vector<UNICHAR_ID>> equivalent_symbols_;
// Dawg Cache reference - this is who we ask to allocate/deallocate dawgs.
DawgCache *dawg_cache_;
bool dawg_cache_is_ours_; // we should delete our own dawg_cache_
// Dawgs.
DawgVector dawgs_;
SuccessorListsVector successors_;
Trie *pending_words_;
/// The following pointers are only cached for convenience.
/// The dawgs will be deleted when dawgs_ vector is destroyed.
// bigram_dawg_ points to a dawg of two-word bigrams which always supersede if
// any of them are present on the best choices list for a word pair.
// the bigrams are stored as space-separated words where:
// (1) leading and trailing punctuation has been removed from each word and
// (2) any digits have been replaced with '?' marks.
Dawg *bigram_dawg_;
// TODO(daria): need to support multiple languages in the future,
// so maybe will need to maintain a list of dawgs of each kind.
Dawg *freq_dawg_;
Dawg *unambig_dawg_;
Dawg *punc_dawg_;
Trie *document_words_;
/// Current segmentation cost adjust factor for word rating.
/// See comments in incorporate_segcost.
float wordseg_rating_adjust_factor_;
// File for recording ambiguities discovered during dictionary search.
FILE *output_ambig_words_file_;
public:
/// Variable members.
/// These have to be declared and initialized after image_ptr_, which contains
/// the pointer to the params vector - the member of its base CCUtil class.
STRING_VAR_H(user_words_file);
STRING_VAR_H(user_words_suffix);
STRING_VAR_H(user_patterns_file);
STRING_VAR_H(user_patterns_suffix);
BOOL_VAR_H(load_system_dawg);
BOOL_VAR_H(load_freq_dawg);
BOOL_VAR_H(load_unambig_dawg);
BOOL_VAR_H(load_punc_dawg);
BOOL_VAR_H(load_number_dawg);
BOOL_VAR_H(load_bigram_dawg);
double_VAR_H(xheight_penalty_subscripts);
double_VAR_H(xheight_penalty_inconsistent);
double_VAR_H(segment_penalty_dict_frequent_word);
double_VAR_H(segment_penalty_dict_case_ok);
double_VAR_H(segment_penalty_dict_case_bad);
double_VAR_H(segment_penalty_dict_nonword);
double_VAR_H(segment_penalty_garbage);
STRING_VAR_H(output_ambig_words_file);
INT_VAR_H(dawg_debug_level);
INT_VAR_H(hyphen_debug_level);
BOOL_VAR_H(use_only_first_uft8_step);
double_VAR_H(certainty_scale);
double_VAR_H(stopper_nondict_certainty_base);
double_VAR_H(stopper_phase2_certainty_rejection_offset);
INT_VAR_H(stopper_smallword_size);
double_VAR_H(stopper_certainty_per_char);
double_VAR_H(stopper_allowable_character_badness);
INT_VAR_H(stopper_debug_level);
BOOL_VAR_H(stopper_no_acceptable_choices);
INT_VAR_H(tessedit_truncate_wordchoice_log);
STRING_VAR_H(word_to_debug);
BOOL_VAR_H(segment_nonalphabetic_script);
BOOL_VAR_H(save_doc_words);
double_VAR_H(doc_dict_pending_threshold);
double_VAR_H(doc_dict_certainty_threshold);
INT_VAR_H(max_permuter_attempts);
};
} // namespace tesseract
#endif // THIRD_PARTY_TESSERACT_DICT_DICT_H_
|
2301_81045437/tesseract
|
src/dict/dict.h
|
C++
|
apache-2.0
| 25,686
|
/******************************************************************************
* File: hyphen.cpp (Formerly hyphen.c)
* Description: Functions for maintaining information about hyphenated words.
* Author: Mark Seaman, OCR Technology
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#include "dict.h"
namespace tesseract {
// Unless the previous word was the last one on the line, and the current
// one is not (thus it is the first one on the line), erase hyphen_word_,
// clear hyphen_active_dawgs_, hyphen_constraints_ update last_word_on_line_.
void Dict::reset_hyphen_vars(bool last_word_on_line) {
if (!(last_word_on_line_ == true && last_word_on_line == false)) {
if (hyphen_word_ != nullptr) {
delete hyphen_word_;
hyphen_word_ = nullptr;
hyphen_active_dawgs_.clear();
}
}
if (hyphen_debug_level) {
tprintf("reset_hyphen_vars: last_word_on_line %d -> %d\n", last_word_on_line_,
last_word_on_line);
}
last_word_on_line_ = last_word_on_line;
}
// Update hyphen_word_, and copy the given DawgPositionVectors into
// hyphen_active_dawgs_.
void Dict::set_hyphen_word(const WERD_CHOICE &word, const DawgPositionVector &active_dawgs) {
if (hyphen_word_ == nullptr) {
hyphen_word_ = new WERD_CHOICE(word.unicharset());
hyphen_word_->make_bad();
}
if (hyphen_word_->rating() > word.rating()) {
*hyphen_word_ = word;
// Remove the last unichar id as it is a hyphen, and remove
// any unichar_string/lengths that are present.
hyphen_word_->remove_last_unichar_id();
hyphen_active_dawgs_ = active_dawgs;
}
if (hyphen_debug_level) {
hyphen_word_->print("set_hyphen_word: ");
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/hyphen.cpp
|
C++
|
apache-2.0
| 2,402
|
/******************************************************************************
** Filename: matchdefs.h
** Purpose: Generic interface definitions for feature matchers.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef MATCHDEFS_H
#define MATCHDEFS_H
#include <tesseract/unichar.h>
#include <climits> // INT16_MAX
#include <cstdint> // int16_t
namespace tesseract {
/* define the maximum number of classes defined for any matcher
and the maximum class id for any matcher. This must be changed
if more different classes need to be classified */
#define MAX_NUM_CLASSES INT16_MAX
/** a CLASS_ID is the ascii character to be associated with a class */
using CLASS_ID = UNICHAR_ID;
#define NO_CLASS (0)
/** a PROTO_ID is the index of a prototype within it's class. Valid proto
id's are 0 to N-1 where N is the number of prototypes that make up the
class. */
using PROTO_ID = int16_t;
#define NO_PROTO (-1)
/** FEATURE_ID is the index of a feature within a character description
The feature id ranges from 0 to N-1 where N is the number
of features in a character description. */
using FEATURE_ID = uint8_t;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/dict/matchdefs.h
|
C++
|
apache-2.0
| 1,842
|
/******************************************************************************
*
* File: permdawg.cpp (Formerly permdawg.c)
* Description: Scale word choices by a dictionary
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "dawg.h"
#include "params.h"
#include "stopper.h"
#include "tprintf.h"
#include <algorithm>
#include <cctype>
#include "dict.h"
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
namespace tesseract {
/**
* @name go_deeper_dawg_fxn
*
* If the choice being composed so far could be a dictionary word
* keep exploring choices.
*/
void Dict::go_deeper_dawg_fxn(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info,
bool word_ending, WERD_CHOICE *word, float certainties[],
float *limit, WERD_CHOICE *best_choice, int *attempts_left,
void *void_more_args) {
auto *more_args = static_cast<DawgArgs *>(void_more_args);
word_ending = (static_cast<unsigned>(char_choice_index) == char_choices.size() - 1);
int word_index = word->length() - 1;
if (best_choice->rating() < *limit) {
return;
}
// Look up char in DAWG
// If the current unichar is an ngram first try calling
// letter_is_okay() for each unigram it contains separately.
UNICHAR_ID orig_uch_id = word->unichar_id(word_index);
bool checked_unigrams = false;
if (getUnicharset().get_isngram(orig_uch_id)) {
if (dawg_debug_level) {
tprintf("checking unigrams in an ngram %s\n", getUnicharset().debug_str(orig_uch_id).c_str());
}
int num_unigrams = 0;
word->remove_last_unichar_id();
std::vector<UNICHAR_ID> encoding;
const char *ngram_str = getUnicharset().id_to_unichar(orig_uch_id);
// Since the string came out of the unicharset, failure is impossible.
ASSERT_HOST(getUnicharset().encode_string(ngram_str, true, &encoding, nullptr, nullptr));
bool unigrams_ok = true;
// Construct DawgArgs that reflect the current state.
DawgPositionVector unigram_active_dawgs = *(more_args->active_dawgs);
DawgPositionVector unigram_updated_dawgs;
DawgArgs unigram_dawg_args(&unigram_active_dawgs, &unigram_updated_dawgs, more_args->permuter);
// Check unigrams in the ngram with letter_is_okay().
for (size_t i = 0; unigrams_ok && i < encoding.size(); ++i) {
UNICHAR_ID uch_id = encoding[i];
ASSERT_HOST(uch_id != INVALID_UNICHAR_ID);
++num_unigrams;
word->append_unichar_id(uch_id, 1, 0.0, 0.0);
unigrams_ok = (this->*letter_is_okay_)(&unigram_dawg_args, *word->unicharset(),
word->unichar_id(word_index + num_unigrams - 1),
word_ending && i == encoding.size() - 1);
(*unigram_dawg_args.active_dawgs) = *(unigram_dawg_args.updated_dawgs);
if (dawg_debug_level) {
tprintf("unigram %s is %s\n", getUnicharset().debug_str(uch_id).c_str(),
unigrams_ok ? "OK" : "not OK");
}
}
// Restore the word and copy the updated dawg state if needed.
while (num_unigrams-- > 0) {
word->remove_last_unichar_id();
}
word->append_unichar_id_space_allocated(orig_uch_id, 1, 0.0, 0.0);
if (unigrams_ok) {
checked_unigrams = true;
more_args->permuter = unigram_dawg_args.permuter;
*(more_args->updated_dawgs) = *(unigram_dawg_args.updated_dawgs);
}
}
// Check which dawgs from the dawgs_ vector contain the word
// up to and including the current unichar.
if (checked_unigrams || (this->*letter_is_okay_)(more_args, *word->unicharset(),
word->unichar_id(word_index), word_ending)) {
// Add a new word choice
if (word_ending) {
if (dawg_debug_level) {
tprintf("found word = %s\n", word->debug_string().c_str());
}
if (strcmp(output_ambig_words_file.c_str(), "") != 0) {
if (output_ambig_words_file_ == nullptr) {
output_ambig_words_file_ = fopen(output_ambig_words_file.c_str(), "wb+");
if (output_ambig_words_file_ == nullptr) {
tprintf("Failed to open output_ambig_words_file %s\n", output_ambig_words_file.c_str());
exit(1);
}
std::string word_str;
word->string_and_lengths(&word_str, nullptr);
word_str += " ";
fprintf(output_ambig_words_file_, "%s", word_str.c_str());
}
std::string word_str;
word->string_and_lengths(&word_str, nullptr);
word_str += " ";
fprintf(output_ambig_words_file_, "%s", word_str.c_str());
}
WERD_CHOICE *adjusted_word = word;
adjusted_word->set_permuter(more_args->permuter);
update_best_choice(*adjusted_word, best_choice);
} else { // search the next letter
// Make updated_* point to the next entries in the DawgPositionVector
// arrays (that were originally created in dawg_permute_and_select)
++(more_args->updated_dawgs);
// Make active_dawgs and constraints point to the updated ones.
++(more_args->active_dawgs);
permute_choices(debug, char_choices, char_choice_index + 1, prev_char_frag_info, word,
certainties, limit, best_choice, attempts_left, more_args);
// Restore previous state to explore another letter in this position.
--(more_args->updated_dawgs);
--(more_args->active_dawgs);
}
} else {
if (dawg_debug_level) {
tprintf("last unichar not OK at index %d in %s\n", word_index, word->debug_string().c_str());
}
}
}
/**
* dawg_permute_and_select
*
* Recursively explore all the possible character combinations in
* the given char_choices. Use go_deeper_dawg_fxn() to search all the
* dawgs in the dawgs_ vector in parallel and discard invalid words.
*
* Allocate and return a WERD_CHOICE with the best valid word found.
*/
WERD_CHOICE *Dict::dawg_permute_and_select(const BLOB_CHOICE_LIST_VECTOR &char_choices,
float rating_limit) {
auto *best_choice = new WERD_CHOICE(&getUnicharset());
best_choice->make_bad();
best_choice->set_rating(rating_limit);
if (char_choices.empty() || char_choices.size() > MAX_WERD_LENGTH) {
return best_choice;
}
auto *active_dawgs = new DawgPositionVector[char_choices.size() + 1];
init_active_dawgs(&(active_dawgs[0]), true);
DawgArgs dawg_args(&(active_dawgs[0]), &(active_dawgs[1]), NO_PERM);
WERD_CHOICE word(&getUnicharset(), MAX_WERD_LENGTH);
float certainties[MAX_WERD_LENGTH];
this->go_deeper_fxn_ = &tesseract::Dict::go_deeper_dawg_fxn;
int attempts_left = max_permuter_attempts;
permute_choices((dawg_debug_level) ? "permute_dawg_debug" : nullptr, char_choices, 0, nullptr,
&word, certainties, &rating_limit, best_choice, &attempts_left, &dawg_args);
delete[] active_dawgs;
return best_choice;
}
/**
* permute_choices
*
* Call append_choices() for each BLOB_CHOICE in BLOB_CHOICE_LIST
* with the given char_choice_index in char_choices.
*/
void Dict::permute_choices(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info,
WERD_CHOICE *word, float certainties[], float *limit,
WERD_CHOICE *best_choice, int *attempts_left, void *more_args) {
if (debug) {
tprintf(
"%s permute_choices: char_choice_index=%d"
" limit=%g rating=%g, certainty=%g word=%s\n",
debug, char_choice_index, *limit, word->rating(), word->certainty(),
word->debug_string().c_str());
}
if (static_cast<unsigned>(char_choice_index) < char_choices.size()) {
BLOB_CHOICE_IT blob_choice_it;
blob_choice_it.set_to_list(char_choices.at(char_choice_index));
for (blob_choice_it.mark_cycle_pt(); !blob_choice_it.cycled_list(); blob_choice_it.forward()) {
(*attempts_left)--;
append_choices(debug, char_choices, *(blob_choice_it.data()), char_choice_index,
prev_char_frag_info, word, certainties, limit, best_choice, attempts_left,
more_args);
if (*attempts_left <= 0) {
if (debug) {
tprintf("permute_choices(): attempts_left is 0\n");
}
break;
}
}
}
}
/**
* append_choices
*
* Checks to see whether or not the next choice is worth appending to
* the word being generated. If so then keeps going deeper into the word.
*
* This function assumes that Dict::go_deeper_fxn_ is set.
*/
void Dict::append_choices(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices,
const BLOB_CHOICE &blob_choice, int char_choice_index,
const CHAR_FRAGMENT_INFO *prev_char_frag_info, WERD_CHOICE *word,
float certainties[], float *limit, WERD_CHOICE *best_choice,
int *attempts_left, void *more_args) {
auto word_ending = (static_cast<unsigned>(char_choice_index) == char_choices.size() - 1);
// Deal with fragments.
CHAR_FRAGMENT_INFO char_frag_info;
if (!fragment_state_okay(blob_choice.unichar_id(), blob_choice.rating(), blob_choice.certainty(),
prev_char_frag_info, debug, word_ending, &char_frag_info)) {
return; // blob_choice must be an invalid fragment
}
// Search the next letter if this character is a fragment.
if (char_frag_info.unichar_id == INVALID_UNICHAR_ID) {
permute_choices(debug, char_choices, char_choice_index + 1, &char_frag_info, word, certainties,
limit, best_choice, attempts_left, more_args);
return;
}
// Add the next unichar.
float old_rating = word->rating();
float old_certainty = word->certainty();
uint8_t old_permuter = word->permuter();
certainties[word->length()] = char_frag_info.certainty;
word->append_unichar_id_space_allocated(char_frag_info.unichar_id, char_frag_info.num_fragments,
char_frag_info.rating, char_frag_info.certainty);
// Explore the next unichar.
(this->*go_deeper_fxn_)(debug, char_choices, char_choice_index, &char_frag_info, word_ending,
word, certainties, limit, best_choice, attempts_left, more_args);
// Remove the unichar we added to explore other choices in it's place.
word->remove_last_unichar_id();
word->set_rating(old_rating);
word->set_certainty(old_certainty);
word->set_permuter(old_permuter);
}
/**
* @name fragment_state
*
* Given the current char choice and information about previously seen
* fragments, determines whether adjacent character fragments are
* present and whether they can be concatenated.
*
* The given prev_char_frag_info contains:
* - fragment: if not nullptr contains information about immediately
* preceding fragmented character choice
* - num_fragments: number of fragments that have been used so far
* to construct a character
* - certainty: certainty of the current choice or minimum
* certainty of all fragments concatenated so far
* - rating: rating of the current choice or sum of fragment
* ratings concatenated so far
*
* The output char_frag_info is filled in as follows:
* - character: is set to be nullptr if the choice is a non-matching
* or non-ending fragment piece; is set to unichar of the given choice
* if it represents a regular character or a matching ending fragment
* - fragment,num_fragments,certainty,rating are set as described above
*
* @returns false if a non-matching fragment is discovered, true otherwise.
*/
bool Dict::fragment_state_okay(UNICHAR_ID curr_unichar_id, float curr_rating, float curr_certainty,
const CHAR_FRAGMENT_INFO *prev_char_frag_info, const char *debug,
int word_ending, CHAR_FRAGMENT_INFO *char_frag_info) {
const CHAR_FRAGMENT *this_fragment = getUnicharset().get_fragment(curr_unichar_id);
const CHAR_FRAGMENT *prev_fragment =
prev_char_frag_info != nullptr ? prev_char_frag_info->fragment : nullptr;
// Print debug info for fragments.
if (debug && (prev_fragment || this_fragment)) {
tprintf("%s check fragments: choice=%s word_ending=%d\n", debug,
getUnicharset().debug_str(curr_unichar_id).c_str(), word_ending);
if (prev_fragment) {
tprintf("prev_fragment %s\n", prev_fragment->to_string().c_str());
}
if (this_fragment) {
tprintf("this_fragment %s\n", this_fragment->to_string().c_str());
}
}
char_frag_info->unichar_id = curr_unichar_id;
char_frag_info->fragment = this_fragment;
char_frag_info->rating = curr_rating;
char_frag_info->certainty = curr_certainty;
char_frag_info->num_fragments = 1;
if (prev_fragment && !this_fragment) {
if (debug) {
tprintf("Skip choice with incomplete fragment\n");
}
return false;
}
if (this_fragment) {
// We are dealing with a fragment.
char_frag_info->unichar_id = INVALID_UNICHAR_ID;
if (prev_fragment) {
if (!this_fragment->is_continuation_of(prev_fragment)) {
if (debug) {
tprintf("Non-matching fragment piece\n");
}
return false;
}
if (this_fragment->is_ending()) {
char_frag_info->unichar_id = getUnicharset().unichar_to_id(this_fragment->get_unichar());
char_frag_info->fragment = nullptr;
if (debug) {
tprintf("Built character %s from fragments\n",
getUnicharset().debug_str(char_frag_info->unichar_id).c_str());
}
} else {
if (debug) {
tprintf("Record fragment continuation\n");
}
char_frag_info->fragment = this_fragment;
}
// Update certainty and rating.
char_frag_info->rating = prev_char_frag_info->rating + curr_rating;
char_frag_info->num_fragments = prev_char_frag_info->num_fragments + 1;
char_frag_info->certainty = std::min(curr_certainty, prev_char_frag_info->certainty);
} else {
if (this_fragment->is_beginning()) {
if (debug) {
tprintf("Record fragment beginning\n");
}
} else {
if (debug) {
tprintf("Non-starting fragment piece with no prev_fragment\n");
}
return false;
}
}
}
if (word_ending && char_frag_info->fragment) {
if (debug) {
tprintf("Word cannot end with a fragment\n");
}
return false;
}
return true;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/permdawg.cpp
|
C++
|
apache-2.0
| 15,644
|
/******************************************************************************
** Filename: stopper.c
** Purpose: Stopping criteria for word classifier.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include <cctype>
#include <cmath>
#include <cstdio>
#include <cstring>
#include "stopper.h"
#ifndef DISABLED_LEGACY_ENGINE
# include "ambigs.h"
#endif
#include <tesseract/unichar.h>
#include "ccutil.h"
#include "dict.h"
#include "helpers.h"
#include "matchdefs.h"
#include "pageres.h"
#include "params.h"
#include "ratngs.h"
/*----------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
namespace tesseract {
bool Dict::AcceptableChoice(const WERD_CHOICE &best_choice,
XHeightConsistencyEnum xheight_consistency) {
float CertaintyThreshold = stopper_nondict_certainty_base;
int WordSize;
if (stopper_no_acceptable_choices) {
return false;
}
if (best_choice.empty()) {
return false;
}
bool no_dang_ambigs = !best_choice.dangerous_ambig_found();
bool is_valid_word = valid_word_permuter(best_choice.permuter(), false);
bool is_case_ok = case_ok(best_choice);
if (stopper_debug_level >= 1) {
const char *xht = "UNKNOWN";
switch (xheight_consistency) {
case XH_GOOD:
xht = "NORMAL";
break;
case XH_SUBNORMAL:
xht = "SUBNORMAL";
break;
case XH_INCONSISTENT:
xht = "INCONSISTENT";
break;
default:
xht = "UNKNOWN";
}
tprintf("\nStopper: %s (word=%c, case=%c, xht_ok=%s=[%g,%g])\n",
best_choice.unichar_string().c_str(), (is_valid_word ? 'y' : 'n'),
(is_case_ok ? 'y' : 'n'), xht, best_choice.min_x_height(), best_choice.max_x_height());
}
// Do not accept invalid words in PASS1.
if (reject_offset_ <= 0.0f && !is_valid_word) {
return false;
}
if (is_valid_word && is_case_ok) {
WordSize = LengthOfShortestAlphaRun(best_choice);
WordSize -= stopper_smallword_size;
if (WordSize < 0) {
WordSize = 0;
}
CertaintyThreshold += WordSize * stopper_certainty_per_char;
}
if (stopper_debug_level >= 1) {
tprintf("Stopper: Rating = %4.1f, Certainty = %4.1f, Threshold = %4.1f\n",
best_choice.rating(), best_choice.certainty(), CertaintyThreshold);
}
if (no_dang_ambigs && best_choice.certainty() > CertaintyThreshold &&
xheight_consistency < XH_INCONSISTENT && UniformCertainties(best_choice)) {
return true;
} else {
if (stopper_debug_level >= 1) {
tprintf(
"AcceptableChoice() returned false"
" (no_dang_ambig:%d cert:%.4g thresh:%g uniform:%d)\n",
no_dang_ambigs, best_choice.certainty(), CertaintyThreshold,
UniformCertainties(best_choice));
}
return false;
}
}
bool Dict::AcceptableResult(WERD_RES *word) const {
if (word->best_choice == nullptr) {
return false;
}
float CertaintyThreshold = stopper_nondict_certainty_base - reject_offset_;
int WordSize;
if (stopper_debug_level >= 1) {
tprintf("\nRejecter: %s (word=%c, case=%c, unambig=%c, multiple=%c)\n",
word->best_choice->debug_string().c_str(), (valid_word(*word->best_choice) ? 'y' : 'n'),
(case_ok(*word->best_choice) ? 'y' : 'n'),
word->best_choice->dangerous_ambig_found() ? 'n' : 'y',
word->best_choices.singleton() ? 'n' : 'y');
}
if (word->best_choice->empty() || !word->best_choices.singleton()) {
return false;
}
if (valid_word(*word->best_choice) && case_ok(*word->best_choice)) {
WordSize = LengthOfShortestAlphaRun(*word->best_choice);
WordSize -= stopper_smallword_size;
if (WordSize < 0) {
WordSize = 0;
}
CertaintyThreshold += WordSize * stopper_certainty_per_char;
}
if (stopper_debug_level >= 1) {
tprintf("Rejecter: Certainty = %4.1f, Threshold = %4.1f ", word->best_choice->certainty(),
CertaintyThreshold);
}
if (word->best_choice->certainty() > CertaintyThreshold && !stopper_no_acceptable_choices) {
if (stopper_debug_level >= 1) {
tprintf("ACCEPTED\n");
}
return true;
} else {
if (stopper_debug_level >= 1) {
tprintf("REJECTED\n");
}
return false;
}
}
#if !defined(DISABLED_LEGACY_ENGINE)
bool Dict::NoDangerousAmbig(WERD_CHOICE *best_choice, DANGERR *fixpt, bool fix_replaceable,
MATRIX *ratings) {
if (stopper_debug_level > 2) {
tprintf("\nRunning NoDangerousAmbig() for %s\n", best_choice->debug_string().c_str());
}
// Construct BLOB_CHOICE_LIST_VECTOR with ambiguities
// for each unichar id in BestChoice.
BLOB_CHOICE_LIST_VECTOR ambig_blob_choices;
bool ambigs_found = false;
// For each position in best_choice:
// -- choose AMBIG_SPEC_LIST that corresponds to unichar_id at best_choice[i]
// -- initialize wrong_ngram with a single unichar_id at best_choice[i]
// -- look for ambiguities corresponding to wrong_ngram in the list while
// adding the following unichar_ids from best_choice to wrong_ngram
//
// Repeat the above procedure twice: first time look through
// ambigs to be replaced and replace all the ambiguities found;
// second time look through dangerous ambiguities and construct
// ambig_blob_choices with fake a blob choice for each ambiguity
// and pass them to dawg_permute_and_select() to search for
// ambiguous words in the dictionaries.
//
// Note that during the execution of the for loop (on the first pass)
// if replacements are made the length of best_choice might change.
for (int pass = 0; pass < (fix_replaceable ? 2 : 1); ++pass) {
bool replace = (fix_replaceable && pass == 0);
const UnicharAmbigsVector &table =
replace ? getUnicharAmbigs().replace_ambigs() : getUnicharAmbigs().dang_ambigs();
if (!replace) {
// Initialize ambig_blob_choices with lists containing a single
// unichar id for the corresponding position in best_choice.
// best_choice consisting from only the original letters will
// have a rating of 0.0.
for (unsigned i = 0; i < best_choice->length(); ++i) {
auto *lst = new BLOB_CHOICE_LIST();
BLOB_CHOICE_IT lst_it(lst);
// TODO(rays/antonova) Put real xheights and y shifts here.
lst_it.add_to_end(
new BLOB_CHOICE(best_choice->unichar_id(i), 0.0, 0.0, -1, 0, 1, 0, BCC_AMBIG));
ambig_blob_choices.push_back(lst);
}
}
UNICHAR_ID wrong_ngram[MAX_AMBIG_SIZE + 1];
int wrong_ngram_index;
int blob_index = 0;
for (unsigned i = 0; i < best_choice->length(); blob_index += best_choice->state(i), ++i) {
auto curr_unichar_id = best_choice->unichar_id(i);
if (stopper_debug_level > 2) {
tprintf("Looking for %s ngrams starting with %s:\n", replace ? "replaceable" : "ambiguous",
getUnicharset().debug_str(curr_unichar_id).c_str());
}
int num_wrong_blobs = best_choice->state(i);
wrong_ngram_index = 0;
wrong_ngram[wrong_ngram_index] = curr_unichar_id;
if (curr_unichar_id == INVALID_UNICHAR_ID || static_cast<size_t>(curr_unichar_id) >= table.size() ||
table[curr_unichar_id] == nullptr) {
continue; // there is no ambig spec for this unichar id
}
AmbigSpec_IT spec_it(table[curr_unichar_id]);
for (spec_it.mark_cycle_pt(); !spec_it.cycled_list();) {
const AmbigSpec *ambig_spec = spec_it.data();
wrong_ngram[wrong_ngram_index + 1] = INVALID_UNICHAR_ID;
int compare = UnicharIdArrayUtils::compare(wrong_ngram, ambig_spec->wrong_ngram);
if (stopper_debug_level > 2) {
tprintf("candidate ngram: ");
UnicharIdArrayUtils::print(wrong_ngram, getUnicharset());
tprintf("current ngram from spec: ");
UnicharIdArrayUtils::print(ambig_spec->wrong_ngram, getUnicharset());
tprintf("comparison result: %d\n", compare);
}
if (compare == 0) {
// Record the place where we found an ambiguity.
if (fixpt != nullptr) {
UNICHAR_ID leftmost_id = ambig_spec->correct_fragments[0];
fixpt->push_back(DANGERR_INFO(blob_index, blob_index + num_wrong_blobs, replace,
getUnicharset().get_isngram(ambig_spec->correct_ngram_id),
leftmost_id));
if (stopper_debug_level > 1) {
tprintf("fixpt+=(%d %d %d %d %s)\n", blob_index, blob_index + num_wrong_blobs, false,
getUnicharset().get_isngram(ambig_spec->correct_ngram_id),
getUnicharset().id_to_unichar(leftmost_id));
}
}
if (replace) {
if (stopper_debug_level > 2) {
tprintf("replace ambiguity with %s : ",
getUnicharset().id_to_unichar(ambig_spec->correct_ngram_id));
UnicharIdArrayUtils::print(ambig_spec->correct_fragments, getUnicharset());
}
ReplaceAmbig(i, ambig_spec->wrong_ngram_size, ambig_spec->correct_ngram_id, best_choice,
ratings);
} else if (i > 0 || ambig_spec->type != CASE_AMBIG) {
// We found dang ambig - update ambig_blob_choices.
if (stopper_debug_level > 2) {
tprintf("found ambiguity: ");
UnicharIdArrayUtils::print(ambig_spec->correct_fragments, getUnicharset());
}
ambigs_found = true;
for (int tmp_index = 0; tmp_index <= wrong_ngram_index; ++tmp_index) {
// Add a blob choice for the corresponding fragment of the
// ambiguity. These fake blob choices are initialized with
// negative ratings (which are not possible for real blob
// choices), so that dawg_permute_and_select() considers any
// word not consisting of only the original letters a better
// choice and stops searching for alternatives once such a
// choice is found.
BLOB_CHOICE_IT bc_it(ambig_blob_choices[i + tmp_index]);
bc_it.add_to_end(new BLOB_CHOICE(ambig_spec->correct_fragments[tmp_index], -1.0, 0.0,
-1, 0, 1, 0, BCC_AMBIG));
}
}
spec_it.forward();
} else if (compare == -1) {
unsigned next_index;
if (wrong_ngram_index + 1 < ambig_spec->wrong_ngram_size &&
((next_index = wrong_ngram_index + 1 + i) < best_choice->length())) {
// Add the next unichar id to wrong_ngram and keep looking for
// more ambigs starting with curr_unichar_id in AMBIG_SPEC_LIST.
wrong_ngram[++wrong_ngram_index] = best_choice->unichar_id(next_index);
num_wrong_blobs += best_choice->state(next_index);
} else {
break; // no more matching ambigs in this AMBIG_SPEC_LIST
}
} else {
spec_it.forward();
}
} // end searching AmbigSpec_LIST
} // end searching best_choice
} // end searching replace and dangerous ambigs
// If any ambiguities were found permute the constructed ambig_blob_choices
// to see if an alternative dictionary word can be found.
if (ambigs_found) {
if (stopper_debug_level > 2) {
tprintf("\nResulting ambig_blob_choices:\n");
for (unsigned i = 0; i < ambig_blob_choices.size(); ++i) {
print_ratings_list("", ambig_blob_choices.at(i), getUnicharset());
tprintf("\n");
}
}
WERD_CHOICE *alt_word = dawg_permute_and_select(ambig_blob_choices, 0.0);
ambigs_found = (alt_word->rating() < 0.0);
if (ambigs_found) {
if (stopper_debug_level >= 1) {
tprintf("Stopper: Possible ambiguous word = %s\n", alt_word->debug_string().c_str());
}
if (fixpt != nullptr) {
// Note: Currently character choices combined from fragments can only
// be generated by NoDangrousAmbigs(). This code should be updated if
// the capability to produce classifications combined from character
// fragments is added to other functions.
int orig_i = 0;
for (unsigned i = 0; i < alt_word->length(); ++i) {
const UNICHARSET &uchset = getUnicharset();
bool replacement_is_ngram = uchset.get_isngram(alt_word->unichar_id(i));
UNICHAR_ID leftmost_id = alt_word->unichar_id(i);
if (replacement_is_ngram) {
// we have to extract the leftmost unichar from the ngram.
const char *str = uchset.id_to_unichar(leftmost_id);
int step = uchset.step(str);
if (step) {
leftmost_id = uchset.unichar_to_id(str, step);
}
}
int end_i = orig_i + alt_word->state(i);
if (alt_word->state(i) > 1 || (orig_i + 1 == end_i && replacement_is_ngram)) {
// Compute proper blob indices.
int blob_start = 0;
for (int j = 0; j < orig_i; ++j) {
blob_start += best_choice->state(j);
}
int blob_end = blob_start;
for (int j = orig_i; j < end_i; ++j) {
blob_end += best_choice->state(j);
}
fixpt->push_back(
DANGERR_INFO(blob_start, blob_end, true, replacement_is_ngram, leftmost_id));
if (stopper_debug_level > 1) {
tprintf("fixpt->dangerous+=(%d %d %d %d %s)\n", orig_i, end_i, true,
replacement_is_ngram, uchset.id_to_unichar(leftmost_id));
}
}
orig_i += alt_word->state(i);
}
}
}
delete alt_word;
}
if (output_ambig_words_file_ != nullptr) {
fprintf(output_ambig_words_file_, "\n");
}
for (auto data : ambig_blob_choices) {
delete data;
}
return !ambigs_found;
}
void Dict::EndDangerousAmbigs() {}
#endif // !defined(DISABLED_LEGACY_ENGINE)
void Dict::SettupStopperPass1() {
reject_offset_ = 0.0;
}
void Dict::SettupStopperPass2() {
reject_offset_ = stopper_phase2_certainty_rejection_offset;
}
void Dict::ReplaceAmbig(int wrong_ngram_begin_index, int wrong_ngram_size,
UNICHAR_ID correct_ngram_id, WERD_CHOICE *werd_choice, MATRIX *ratings) {
int num_blobs_to_replace = 0;
int begin_blob_index = 0;
int i;
// Rating and certainty for the new BLOB_CHOICE are derived from the
// replaced choices.
float new_rating = 0.0f;
float new_certainty = 0.0f;
BLOB_CHOICE *old_choice = nullptr;
for (i = 0; i < wrong_ngram_begin_index + wrong_ngram_size; ++i) {
if (i >= wrong_ngram_begin_index) {
int num_blobs = werd_choice->state(i);
int col = begin_blob_index + num_blobs_to_replace;
int row = col + num_blobs - 1;
BLOB_CHOICE_LIST *choices = ratings->get(col, row);
ASSERT_HOST(choices != nullptr);
old_choice = FindMatchingChoice(werd_choice->unichar_id(i), choices);
ASSERT_HOST(old_choice != nullptr);
new_rating += old_choice->rating();
new_certainty += old_choice->certainty();
num_blobs_to_replace += num_blobs;
} else {
begin_blob_index += werd_choice->state(i);
}
}
new_certainty /= wrong_ngram_size;
// If there is no entry in the ratings matrix, add it.
MATRIX_COORD coord(begin_blob_index, begin_blob_index + num_blobs_to_replace - 1);
if (!coord.Valid(*ratings)) {
ratings->IncreaseBandSize(coord.row - coord.col + 1);
}
if (ratings->get(coord.col, coord.row) == nullptr) {
ratings->put(coord.col, coord.row, new BLOB_CHOICE_LIST);
}
BLOB_CHOICE_LIST *new_choices = ratings->get(coord.col, coord.row);
BLOB_CHOICE *choice = FindMatchingChoice(correct_ngram_id, new_choices);
if (choice != nullptr) {
// Already there. Upgrade if new rating better.
if (new_rating < choice->rating()) {
choice->set_rating(new_rating);
}
if (new_certainty < choice->certainty()) {
choice->set_certainty(new_certainty);
}
// DO NOT SORT!! It will mess up the iterator in LanguageModel::UpdateState.
} else {
// Need a new choice with the correct_ngram_id.
choice = new BLOB_CHOICE(*old_choice);
choice->set_unichar_id(correct_ngram_id);
choice->set_rating(new_rating);
choice->set_certainty(new_certainty);
choice->set_classifier(BCC_AMBIG);
choice->set_matrix_cell(coord.col, coord.row);
BLOB_CHOICE_IT it(new_choices);
it.add_to_end(choice);
}
// Remove current unichar from werd_choice. On the last iteration
// set the correct replacement unichar instead of removing a unichar.
for (int replaced_count = 0; replaced_count < wrong_ngram_size; ++replaced_count) {
if (replaced_count + 1 == wrong_ngram_size) {
werd_choice->set_blob_choice(wrong_ngram_begin_index, num_blobs_to_replace, choice);
} else {
werd_choice->remove_unichar_id(wrong_ngram_begin_index + 1);
}
}
if (stopper_debug_level >= 1) {
werd_choice->print("ReplaceAmbig() ");
tprintf("Modified blob_choices: ");
print_ratings_list("\n", new_choices, getUnicharset());
}
}
int Dict::LengthOfShortestAlphaRun(const WERD_CHOICE &WordChoice) const {
int shortest = INT32_MAX;
int curr_len = 0;
for (unsigned w = 0; w < WordChoice.length(); ++w) {
if (WordChoice.unicharset()->get_isalpha(WordChoice.unichar_id(w))) {
curr_len++;
} else if (curr_len > 0) {
if (curr_len < shortest) {
shortest = curr_len;
}
curr_len = 0;
}
}
if (curr_len > 0 && curr_len < shortest) {
shortest = curr_len;
} else if (shortest == INT32_MAX) {
shortest = 0;
}
return shortest;
}
int Dict::UniformCertainties(const WERD_CHOICE &word) {
float Certainty;
float WorstCertainty = FLT_MAX;
float CertaintyThreshold;
double TotalCertainty;
double TotalCertaintySquared;
double Variance;
float Mean, StdDev;
int word_length = word.length();
if (word_length < 3) {
return true;
}
TotalCertainty = TotalCertaintySquared = 0.0;
for (int i = 0; i < word_length; ++i) {
Certainty = word.certainty(i);
TotalCertainty += Certainty;
TotalCertaintySquared += static_cast<double>(Certainty) * Certainty;
if (Certainty < WorstCertainty) {
WorstCertainty = Certainty;
}
}
// Subtract off worst certainty from statistics.
word_length--;
TotalCertainty -= WorstCertainty;
TotalCertaintySquared -= static_cast<double>(WorstCertainty) * WorstCertainty;
Mean = TotalCertainty / word_length;
Variance = ((word_length * TotalCertaintySquared - TotalCertainty * TotalCertainty) /
(word_length * (word_length - 1)));
if (Variance < 0.0) {
Variance = 0.0;
}
StdDev = sqrt(Variance);
CertaintyThreshold = Mean - stopper_allowable_character_badness * StdDev;
if (CertaintyThreshold > stopper_nondict_certainty_base) {
CertaintyThreshold = stopper_nondict_certainty_base;
}
if (word.certainty() < CertaintyThreshold) {
if (stopper_debug_level >= 1) {
tprintf(
"Stopper: Non-uniform certainty = %4.1f"
" (m=%4.1f, s=%4.1f, t=%4.1f)\n",
word.certainty(), Mean, StdDev, CertaintyThreshold);
}
return false;
} else {
return true;
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/stopper.cpp
|
C++
|
apache-2.0
| 20,118
|
/******************************************************************************
** Filename: stopper.h
** Purpose: Stopping criteria for word classifier.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef STOPPER_H
#define STOPPER_H
#include "params.h"
#include "ratngs.h"
#include <tesseract/unichar.h>
namespace tesseract {
class WERD_CHOICE;
using BLOB_WIDTH = uint8_t;
struct DANGERR_INFO {
DANGERR_INFO()
: begin(-1)
, end(-1)
, dangerous(false)
, correct_is_ngram(false)
, leftmost(INVALID_UNICHAR_ID) {}
DANGERR_INFO(int b, int e, bool d, bool n, UNICHAR_ID l)
: begin(b), end(e), dangerous(d), correct_is_ngram(n), leftmost(l) {}
int begin;
int end;
bool dangerous;
bool correct_is_ngram;
UNICHAR_ID leftmost; // in the replacement, what's the leftmost character?
};
using DANGERR = std::vector<DANGERR_INFO>;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/dict/stopper.h
|
C++
|
apache-2.0
| 1,592
|
/******************************************************************************
*
* File: trie.cpp (Formerly trie.c)
* Description: Functions to build a trie data structure.
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "trie.h"
#include "dawg.h"
#include "dict.h"
#include "helpers.h"
#include "kdpair.h"
namespace tesseract {
const char kDoNotReverse[] = "RRP_DO_NO_REVERSE";
const char kReverseIfHasRTL[] = "RRP_REVERSE_IF_HAS_RTL";
const char kForceReverse[] = "RRP_FORCE_REVERSE";
const char *const RTLReversePolicyNames[] = {kDoNotReverse, kReverseIfHasRTL, kForceReverse};
const char Trie::kAlphaPatternUnicode[] = "\u2000";
const char Trie::kDigitPatternUnicode[] = "\u2001";
const char Trie::kAlphanumPatternUnicode[] = "\u2002";
const char Trie::kPuncPatternUnicode[] = "\u2003";
const char Trie::kLowerPatternUnicode[] = "\u2004";
const char Trie::kUpperPatternUnicode[] = "\u2005";
const char *Trie::get_reverse_policy_name(RTLReversePolicy reverse_policy) {
return RTLReversePolicyNames[reverse_policy];
}
// Reset the Trie to empty.
void Trie::clear() {
for (auto node : nodes_) {
delete node;
}
nodes_.clear();
root_back_freelist_.clear();
num_edges_ = 0;
new_dawg_node(); // Need to allocate node 0.
}
bool Trie::edge_char_of(NODE_REF node_ref, NODE_REF next_node, int direction, bool word_end,
UNICHAR_ID unichar_id, EDGE_RECORD **edge_ptr,
EDGE_INDEX *edge_index) const {
if (debug_level_ == 3) {
tprintf("edge_char_of() given node_ref " REFFORMAT " next_node " REFFORMAT
" direction %d word_end %d unichar_id %d, exploring node:\n",
node_ref, next_node, direction, word_end, unichar_id);
if (node_ref != NO_EDGE) {
print_node(node_ref, nodes_[node_ref]->forward_edges.size());
}
}
if (node_ref == NO_EDGE) {
return false;
}
assert(static_cast<size_t>(node_ref) < nodes_.size());
EDGE_VECTOR &vec = (direction == FORWARD_EDGE) ? nodes_[node_ref]->forward_edges
: nodes_[node_ref]->backward_edges;
int vec_size = vec.size();
if (node_ref == 0 && direction == FORWARD_EDGE) { // binary search
EDGE_INDEX start = 0;
EDGE_INDEX end = vec_size - 1;
EDGE_INDEX k;
int compare;
while (start <= end) {
k = (start + end) >> 1; // (start + end) / 2
compare = given_greater_than_edge_rec(next_node, word_end, unichar_id, vec[k]);
if (compare == 0) { // given == vec[k]
*edge_ptr = &(vec[k]);
*edge_index = k;
return true;
} else if (compare == 1) { // given > vec[k]
start = k + 1;
} else { // given < vec[k]
end = k - 1;
}
}
} else { // linear search
for (int i = 0; i < vec_size; ++i) {
EDGE_RECORD &edge_rec = vec[i];
if (edge_rec_match(next_node, word_end, unichar_id, next_node_from_edge_rec(edge_rec),
end_of_word_from_edge_rec(edge_rec), unichar_id_from_edge_rec(edge_rec))) {
*edge_ptr = &(edge_rec);
*edge_index = i;
return true;
}
}
}
return false; // not found
}
bool Trie::add_edge_linkage(NODE_REF node1, NODE_REF node2, bool marker_flag, int direction,
bool word_end, UNICHAR_ID unichar_id) {
EDGE_VECTOR *vec = (direction == FORWARD_EDGE) ? &(nodes_[node1]->forward_edges)
: &(nodes_[node1]->backward_edges);
unsigned search_index;
if (node1 == 0 && direction == FORWARD_EDGE) {
search_index = 0; // find the index to make the add sorted
while (search_index < vec->size() &&
given_greater_than_edge_rec(node2, word_end, unichar_id, (*vec)[search_index]) == 1) {
search_index++;
}
} else {
search_index = vec->size(); // add is unsorted, so index does not matter
}
EDGE_RECORD edge_rec;
link_edge(&edge_rec, node2, marker_flag, direction, word_end, unichar_id);
if (node1 == 0 && direction == BACKWARD_EDGE && !root_back_freelist_.empty()) {
EDGE_INDEX edge_index = root_back_freelist_.back();
root_back_freelist_.pop_back();
(*vec)[edge_index] = edge_rec;
} else if (search_index < vec->size()) {
vec->insert(vec->begin() + search_index, edge_rec);
} else {
vec->push_back(edge_rec);
}
if (debug_level_ > 1) {
tprintf("new edge in nodes_[" REFFORMAT "]: ", node1);
print_edge_rec(edge_rec);
tprintf("\n");
}
num_edges_++;
return true;
}
void Trie::add_word_ending(EDGE_RECORD *edge_ptr, NODE_REF the_next_node, bool marker_flag,
UNICHAR_ID unichar_id) {
EDGE_RECORD *back_edge_ptr;
EDGE_INDEX back_edge_index;
ASSERT_HOST(edge_char_of(the_next_node, NO_EDGE, BACKWARD_EDGE, false, unichar_id, &back_edge_ptr,
&back_edge_index));
if (marker_flag) {
*back_edge_ptr |= (MARKER_FLAG << flag_start_bit_);
*edge_ptr |= (MARKER_FLAG << flag_start_bit_);
}
// Mark both directions as end of word.
*back_edge_ptr |= (WERD_END_FLAG << flag_start_bit_);
*edge_ptr |= (WERD_END_FLAG << flag_start_bit_);
}
bool Trie::add_word_to_dawg(const WERD_CHOICE &word, const std::vector<bool> *repetitions) {
if (word.length() <= 0) {
return false; // can't add empty words
}
if (repetitions != nullptr) {
ASSERT_HOST(repetitions->size() == word.length());
}
// Make sure the word does not contain invalid unchar ids.
for (unsigned i = 0; i < word.length(); ++i) {
if (word.unichar_id(i) < 0 || word.unichar_id(i) >= unicharset_size_) {
return false;
}
}
EDGE_RECORD *edge_ptr;
NODE_REF last_node = 0;
NODE_REF the_next_node;
bool marker_flag = false;
EDGE_INDEX edge_index;
int32_t still_finding_chars = true;
int32_t word_end = false;
bool add_failed = false;
bool found;
if (debug_level_ > 1) {
word.print("\nAdding word: ");
}
UNICHAR_ID unichar_id;
unsigned i;
for (i = 0; i < word.length() - 1; ++i) {
unichar_id = word.unichar_id(i);
marker_flag = (repetitions != nullptr) ? (*repetitions)[i] : false;
if (debug_level_ > 1) {
tprintf("Adding letter %d\n", unichar_id);
}
if (still_finding_chars) {
found = edge_char_of(last_node, NO_EDGE, FORWARD_EDGE, word_end, unichar_id, &edge_ptr,
&edge_index);
if (found && debug_level_ > 1) {
tprintf("exploring edge " REFFORMAT " in node " REFFORMAT "\n", edge_index, last_node);
}
if (!found) {
still_finding_chars = false;
} else if (next_node_from_edge_rec(*edge_ptr) == 0) {
// We hit the end of an existing word, but the new word is longer.
// In this case we have to disconnect the existing word from the
// backwards root node, mark the current position as end-of-word
// and add new nodes for the increased length. Disconnecting the
// existing word from the backwards root node requires a linear
// search, so it is much faster to add the longest words first,
// to avoid having to come here.
word_end = true;
still_finding_chars = false;
remove_edge(last_node, 0, word_end, unichar_id);
} else {
// We have to add a new branch here for the new word.
if (marker_flag) {
set_marker_flag_in_edge_rec(edge_ptr);
}
last_node = next_node_from_edge_rec(*edge_ptr);
}
}
if (!still_finding_chars) {
the_next_node = new_dawg_node();
if (debug_level_ > 1) {
tprintf("adding node " REFFORMAT "\n", the_next_node);
}
if (the_next_node == 0) {
add_failed = true;
break;
}
if (!add_new_edge(last_node, the_next_node, marker_flag, word_end, unichar_id)) {
add_failed = true;
break;
}
word_end = false;
last_node = the_next_node;
}
}
the_next_node = 0;
unichar_id = word.unichar_id(i);
marker_flag = (repetitions != nullptr) ? (*repetitions)[i] : false;
if (debug_level_ > 1) {
tprintf("Adding letter %d\n", unichar_id);
}
if (still_finding_chars &&
edge_char_of(last_node, NO_EDGE, FORWARD_EDGE, false, unichar_id, &edge_ptr, &edge_index)) {
// An extension of this word already exists in the trie, so we
// only have to add the ending flags in both directions.
add_word_ending(edge_ptr, next_node_from_edge_rec(*edge_ptr), marker_flag, unichar_id);
} else {
// Add a link to node 0. All leaves connect to node 0 so the back links can
// be used in reduction to a dawg. This root backward node has one edge
// entry for every word, (except prefixes of longer words) so it is huge.
if (!add_failed && !add_new_edge(last_node, the_next_node, marker_flag, true, unichar_id)) {
add_failed = true;
}
}
if (add_failed) {
tprintf("Re-initializing document dictionary...\n");
clear();
return false;
} else {
return true;
}
}
NODE_REF Trie::new_dawg_node() {
auto *node = new TRIE_NODE_RECORD();
nodes_.push_back(node);
return nodes_.size() - 1;
}
bool Trie::read_and_add_word_list(const char *filename, const UNICHARSET &unicharset,
Trie::RTLReversePolicy reverse_policy) {
std::vector<std::string> word_list;
if (!read_word_list(filename, &word_list)) {
return false;
}
std::sort(word_list.begin(), word_list.end(),
[](auto &s1, auto &s2) { return s1.size() > s2.size(); });
return add_word_list(word_list, unicharset, reverse_policy);
}
bool Trie::read_word_list(const char *filename, std::vector<std::string> *words) {
FILE *word_file;
char line_str[CHARS_PER_LINE];
int word_count = 0;
word_file = fopen(filename, "rb");
if (word_file == nullptr) {
return false;
}
while (fgets(line_str, sizeof(line_str), word_file) != nullptr) {
chomp_string(line_str); // remove newline
std::string word_str(line_str);
++word_count;
if (debug_level_ && word_count % 10000 == 0) {
tprintf("Read %d words so far\n", word_count);
}
words->push_back(word_str);
}
if (debug_level_) {
tprintf("Read %d words total.\n", word_count);
}
fclose(word_file);
return true;
}
bool Trie::add_word_list(const std::vector<std::string> &words, const UNICHARSET &unicharset,
Trie::RTLReversePolicy reverse_policy) {
for (const auto &i : words) {
WERD_CHOICE word(i.c_str(), unicharset);
if (word.empty() || word.contains_unichar_id(INVALID_UNICHAR_ID)) {
continue;
}
if ((reverse_policy == RRP_REVERSE_IF_HAS_RTL && word.has_rtl_unichar_id()) ||
reverse_policy == RRP_FORCE_REVERSE) {
word.reverse_and_mirror_unichar_ids();
}
if (!word_in_dawg(word)) {
add_word_to_dawg(word);
if (!word_in_dawg(word)) {
tprintf("Error: word '%s' not in DAWG after adding it\n", i.c_str());
return false;
}
}
}
return true;
}
void Trie::initialize_patterns(UNICHARSET *unicharset) {
unicharset->unichar_insert(kAlphaPatternUnicode);
alpha_pattern_ = unicharset->unichar_to_id(kAlphaPatternUnicode);
unicharset->unichar_insert(kDigitPatternUnicode);
digit_pattern_ = unicharset->unichar_to_id(kDigitPatternUnicode);
unicharset->unichar_insert(kAlphanumPatternUnicode);
alphanum_pattern_ = unicharset->unichar_to_id(kAlphanumPatternUnicode);
unicharset->unichar_insert(kPuncPatternUnicode);
punc_pattern_ = unicharset->unichar_to_id(kPuncPatternUnicode);
unicharset->unichar_insert(kLowerPatternUnicode);
lower_pattern_ = unicharset->unichar_to_id(kLowerPatternUnicode);
unicharset->unichar_insert(kUpperPatternUnicode);
upper_pattern_ = unicharset->unichar_to_id(kUpperPatternUnicode);
initialized_patterns_ = true;
unicharset_size_ = unicharset->size();
}
void Trie::unichar_id_to_patterns(UNICHAR_ID unichar_id, const UNICHARSET &unicharset,
std::vector<UNICHAR_ID> *vec) const {
bool is_alpha = unicharset.get_isalpha(unichar_id);
if (is_alpha) {
vec->push_back(alpha_pattern_);
vec->push_back(alphanum_pattern_);
if (unicharset.get_islower(unichar_id)) {
vec->push_back(lower_pattern_);
} else if (unicharset.get_isupper(unichar_id)) {
vec->push_back(upper_pattern_);
}
}
if (unicharset.get_isdigit(unichar_id)) {
vec->push_back(digit_pattern_);
if (!is_alpha) {
vec->push_back(alphanum_pattern_);
}
}
if (unicharset.get_ispunctuation(unichar_id)) {
vec->push_back(punc_pattern_);
}
}
UNICHAR_ID Trie::character_class_to_pattern(char ch) {
if (ch == 'c') {
return alpha_pattern_;
} else if (ch == 'd') {
return digit_pattern_;
} else if (ch == 'n') {
return alphanum_pattern_;
} else if (ch == 'p') {
return punc_pattern_;
} else if (ch == 'a') {
return lower_pattern_;
} else if (ch == 'A') {
return upper_pattern_;
} else {
return INVALID_UNICHAR_ID;
}
}
bool Trie::read_pattern_list(const char *filename, const UNICHARSET &unicharset) {
if (!initialized_patterns_) {
tprintf("please call initialize_patterns() before read_pattern_list()\n");
return false;
}
FILE *pattern_file = fopen(filename, "rb");
if (pattern_file == nullptr) {
tprintf("Error opening pattern file %s\n", filename);
return false;
}
int pattern_count = 0;
char string[CHARS_PER_LINE];
while (fgets(string, CHARS_PER_LINE, pattern_file) != nullptr) {
chomp_string(string); // remove newline
// Parse the pattern and construct a unichar id vector.
// Record the number of repetitions of each unichar in the parallel vector.
WERD_CHOICE word(&unicharset);
std::vector<bool> repetitions_vec;
const char *str_ptr = string;
int step = unicharset.step(str_ptr);
bool failed = false;
while (step > 0) {
UNICHAR_ID curr_unichar_id = INVALID_UNICHAR_ID;
if (step == 1 && *str_ptr == '\\') {
++str_ptr;
if (*str_ptr == '\\') { // regular '\' unichar that was escaped
curr_unichar_id = unicharset.unichar_to_id(str_ptr, step);
} else {
#if 0 // TODO: This code should be enabled if kSaneNumConcreteChars != 0.
if (word.length() < kSaneNumConcreteChars) {
tprintf(
"Please provide at least %d concrete characters at the"
" beginning of the pattern\n",
kSaneNumConcreteChars);
failed = true;
break;
}
#endif
// Parse character class from expression.
curr_unichar_id = character_class_to_pattern(*str_ptr);
}
} else {
curr_unichar_id = unicharset.unichar_to_id(str_ptr, step);
}
if (curr_unichar_id == INVALID_UNICHAR_ID) {
failed = true;
break; // failed to parse this pattern
}
word.append_unichar_id(curr_unichar_id, 1, 0.0, 0.0);
repetitions_vec.push_back(false);
str_ptr += step;
step = unicharset.step(str_ptr);
// Check if there is a repetition pattern specified after this unichar.
if (step == 1 && *str_ptr == '\\' && *(str_ptr + 1) == '*') {
repetitions_vec[repetitions_vec.size() - 1] = true;
str_ptr += 2;
step = unicharset.step(str_ptr);
}
}
if (failed) {
tprintf("Invalid user pattern %s\n", string);
continue;
}
// Insert the pattern into the trie.
if (debug_level_ > 2) {
tprintf("Inserting expanded user pattern %s\n", word.debug_string().c_str());
}
if (!this->word_in_dawg(word)) {
this->add_word_to_dawg(word, &repetitions_vec);
if (!this->word_in_dawg(word)) {
tprintf("Error: failed to insert pattern '%s'\n", string);
}
}
++pattern_count;
}
if (debug_level_) {
tprintf("Read %d valid patterns from %s\n", pattern_count, filename);
}
fclose(pattern_file);
return true;
}
void Trie::remove_edge_linkage(NODE_REF node1, NODE_REF node2, int direction, bool word_end,
UNICHAR_ID unichar_id) {
EDGE_RECORD *edge_ptr = nullptr;
EDGE_INDEX edge_index = 0;
ASSERT_HOST(edge_char_of(node1, node2, direction, word_end, unichar_id, &edge_ptr, &edge_index));
if (debug_level_ > 1) {
tprintf("removed edge in nodes_[" REFFORMAT "]: ", node1);
print_edge_rec(*edge_ptr);
tprintf("\n");
}
if (direction == FORWARD_EDGE) {
nodes_[node1]->forward_edges.erase(nodes_[node1]->forward_edges.begin() + edge_index);
} else if (node1 == 0) {
KillEdge(&nodes_[node1]->backward_edges[edge_index]);
root_back_freelist_.push_back(edge_index);
} else {
nodes_[node1]->backward_edges.erase(nodes_[node1]->backward_edges.begin() + edge_index);
}
--num_edges_;
}
// Some optimizations employed in add_word_to_dawg and trie_to_dawg:
// 1 Avoid insertion sorting or bubble sorting the tail root node
// (back links on node 0, a list of all the leaves.). The node is
// huge, and sorting it with n^2 time is terrible.
// 2 Avoid using vector::erase on the tail root node.
// (a) During add of words to the trie, zero-out the unichars and
// keep a freelist of spaces to re-use.
// (b) During reduction, just zero-out the unichars of deleted back
// links, skipping zero entries while searching.
// 3 Avoid linear search of the tail root node. This has to be done when
// a suffix is added to an existing word. Adding words by decreasing
// length avoids this problem entirely. Words can still be added in
// any order, but it is faster to add the longest first.
SquishedDawg *Trie::trie_to_dawg() {
root_back_freelist_.clear(); // Will be invalided by trie_to_dawg.
if (debug_level_ > 2) {
print_all("Before reduction:", MAX_NODE_EDGES_DISPLAY);
}
std::vector<bool> reduced_nodes(nodes_.size());
this->reduce_node_input(0, reduced_nodes);
if (debug_level_ > 2) {
print_all("After reduction:", MAX_NODE_EDGES_DISPLAY);
}
// Build a translation map from node indices in nodes_ vector to
// their target indices in EDGE_ARRAY.
std::vector<NODE_REF> node_ref_map(nodes_.size() + 1);
unsigned i;
for (i = 0; i < nodes_.size(); ++i) {
node_ref_map[i + 1] = node_ref_map[i] + nodes_[i]->forward_edges.size();
}
int num_forward_edges = node_ref_map[i];
// Convert nodes_ vector into EDGE_ARRAY translating the next node references
// in edges using node_ref_map. Empty nodes and backward edges are dropped.
auto edge_array = new EDGE_RECORD[num_forward_edges];
EDGE_ARRAY edge_array_ptr = edge_array;
for (i = 0; i < nodes_.size(); ++i) {
TRIE_NODE_RECORD *node_ptr = nodes_[i];
int end = node_ptr->forward_edges.size();
for (int j = 0; j < end; ++j) {
EDGE_RECORD &edge_rec = node_ptr->forward_edges[j];
NODE_REF node_ref = next_node_from_edge_rec(edge_rec);
ASSERT_HOST(static_cast<size_t>(node_ref) < nodes_.size());
UNICHAR_ID unichar_id = unichar_id_from_edge_rec(edge_rec);
link_edge(edge_array_ptr, node_ref_map[node_ref], false, FORWARD_EDGE,
end_of_word_from_edge_rec(edge_rec), unichar_id);
if (j == end - 1) {
set_marker_flag_in_edge_rec(edge_array_ptr);
}
++edge_array_ptr;
}
}
return new SquishedDawg(edge_array, num_forward_edges, type_, lang_, perm_, unicharset_size_,
debug_level_);
}
bool Trie::eliminate_redundant_edges(NODE_REF node, const EDGE_RECORD &edge1,
const EDGE_RECORD &edge2) {
if (debug_level_ > 1) {
tprintf("\nCollapsing node %" PRIi64 ":\n", node);
print_node(node, MAX_NODE_EDGES_DISPLAY);
tprintf("Candidate edges: ");
print_edge_rec(edge1);
tprintf(", ");
print_edge_rec(edge2);
tprintf("\n\n");
}
NODE_REF next_node1 = next_node_from_edge_rec(edge1);
NODE_REF next_node2 = next_node_from_edge_rec(edge2);
TRIE_NODE_RECORD *next_node2_ptr = nodes_[next_node2];
// Translate all edges going to/from next_node2 to go to/from next_node1.
EDGE_RECORD *edge_ptr = nullptr;
EDGE_INDEX edge_index;
// The backward link in node to next_node2 will be zeroed out by the caller.
// Copy all the backward links in next_node2 to node next_node1
for (unsigned i = 0; i < next_node2_ptr->backward_edges.size(); ++i) {
const EDGE_RECORD &bkw_edge = next_node2_ptr->backward_edges[i];
NODE_REF curr_next_node = next_node_from_edge_rec(bkw_edge);
UNICHAR_ID curr_unichar_id = unichar_id_from_edge_rec(bkw_edge);
int curr_word_end = end_of_word_from_edge_rec(bkw_edge);
bool marker_flag = marker_flag_from_edge_rec(bkw_edge);
add_edge_linkage(next_node1, curr_next_node, marker_flag, BACKWARD_EDGE, curr_word_end,
curr_unichar_id);
// Relocate the corresponding forward edge in curr_next_node
ASSERT_HOST(edge_char_of(curr_next_node, next_node2, FORWARD_EDGE, curr_word_end,
curr_unichar_id, &edge_ptr, &edge_index));
set_next_node_in_edge_rec(edge_ptr, next_node1);
}
int next_node2_num_edges =
(next_node2_ptr->forward_edges.size() + next_node2_ptr->backward_edges.size());
if (debug_level_ > 1) {
tprintf("removed %d edges from node " REFFORMAT "\n", next_node2_num_edges, next_node2);
}
next_node2_ptr->forward_edges.clear();
next_node2_ptr->backward_edges.clear();
num_edges_ -= next_node2_num_edges;
return true;
}
bool Trie::reduce_lettered_edges(EDGE_INDEX edge_index, UNICHAR_ID unichar_id, NODE_REF node,
EDGE_VECTOR *backward_edges, std::vector<bool> &reduced_nodes) {
if (debug_level_ > 1) {
tprintf("reduce_lettered_edges(edge=" REFFORMAT ")\n", edge_index);
}
// Compare each of the edge pairs with the given unichar_id.
bool did_something = false;
for (unsigned i = edge_index; i < backward_edges->size() - 1; ++i) {
// Find the first edge that can be eliminated.
UNICHAR_ID curr_unichar_id = INVALID_UNICHAR_ID;
while (i < backward_edges->size()) {
if (!DeadEdge((*backward_edges)[i])) {
curr_unichar_id = unichar_id_from_edge_rec((*backward_edges)[i]);
if (curr_unichar_id != unichar_id) {
return did_something;
}
if (can_be_eliminated((*backward_edges)[i])) {
break;
}
}
++i;
}
if (i == backward_edges->size()) {
break;
}
const EDGE_RECORD &edge_rec = (*backward_edges)[i];
// Compare it to the rest of the edges with the given unichar_id.
for (auto j = i + 1; j < backward_edges->size(); ++j) {
const EDGE_RECORD &next_edge_rec = (*backward_edges)[j];
if (DeadEdge(next_edge_rec)) {
continue;
}
UNICHAR_ID next_id = unichar_id_from_edge_rec(next_edge_rec);
if (next_id != unichar_id) {
break;
}
if (end_of_word_from_edge_rec(next_edge_rec) == end_of_word_from_edge_rec(edge_rec) &&
can_be_eliminated(next_edge_rec) &&
eliminate_redundant_edges(node, edge_rec, next_edge_rec)) {
reduced_nodes[next_node_from_edge_rec(edge_rec)] = false;
did_something = true;
KillEdge(&(*backward_edges)[j]);
}
}
}
return did_something;
}
void Trie::sort_edges(EDGE_VECTOR *edges) {
int num_edges = edges->size();
if (num_edges <= 1) {
return;
}
std::vector<KDPairInc<UNICHAR_ID, EDGE_RECORD>> sort_vec;
sort_vec.reserve(num_edges);
for (int i = 0; i < num_edges; ++i) {
sort_vec.emplace_back(unichar_id_from_edge_rec((*edges)[i]), (*edges)[i]);
}
std::sort(sort_vec.begin(), sort_vec.end());
for (int i = 0; i < num_edges; ++i) {
(*edges)[i] = sort_vec[i].data();
}
}
void Trie::reduce_node_input(NODE_REF node, std::vector<bool> &reduced_nodes) {
EDGE_VECTOR &backward_edges = nodes_[node]->backward_edges;
sort_edges(&backward_edges);
if (debug_level_ > 1) {
tprintf("reduce_node_input(node=" REFFORMAT ")\n", node);
print_node(node, MAX_NODE_EDGES_DISPLAY);
}
EDGE_INDEX edge_index = 0;
while (static_cast<size_t>(edge_index) < backward_edges.size()) {
if (DeadEdge(backward_edges[edge_index])) {
continue;
}
UNICHAR_ID unichar_id = unichar_id_from_edge_rec(backward_edges[edge_index]);
while (reduce_lettered_edges(edge_index, unichar_id, node, &backward_edges, reduced_nodes)) {
;
}
while (static_cast<size_t>(++edge_index) < backward_edges.size()) {
UNICHAR_ID id = unichar_id_from_edge_rec(backward_edges[edge_index]);
if (!DeadEdge(backward_edges[edge_index]) && id != unichar_id) {
break;
}
}
}
reduced_nodes[node] = true; // mark as reduced
if (debug_level_ > 1) {
tprintf("Node " REFFORMAT " after reduction:\n", node);
print_node(node, MAX_NODE_EDGES_DISPLAY);
}
for (auto &backward_edge : backward_edges) {
if (DeadEdge(backward_edge)) {
continue;
}
NODE_REF next_node = next_node_from_edge_rec(backward_edge);
if (next_node != 0 && !reduced_nodes[next_node]) {
reduce_node_input(next_node, reduced_nodes);
}
}
}
void Trie::print_node(NODE_REF node, int max_num_edges) const {
if (node == NO_EDGE) {
return; // nothing to print
}
TRIE_NODE_RECORD *node_ptr = nodes_[node];
int num_fwd = node_ptr->forward_edges.size();
int num_bkw = node_ptr->backward_edges.size();
EDGE_VECTOR *vec;
for (int dir = 0; dir < 2; ++dir) {
if (dir == 0) {
vec = &(node_ptr->forward_edges);
tprintf(REFFORMAT " (%d %d): ", node, num_fwd, num_bkw);
} else {
vec = &(node_ptr->backward_edges);
tprintf("\t");
}
int i;
for (i = 0; (dir == 0 ? i < num_fwd : i < num_bkw) && i < max_num_edges; ++i) {
if (DeadEdge((*vec)[i])) {
continue;
}
print_edge_rec((*vec)[i]);
tprintf(" ");
}
if (dir == 0 ? i < num_fwd : i < num_bkw) {
tprintf("...");
}
tprintf("\n");
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/dict/trie.cpp
|
C++
|
apache-2.0
| 26,865
|
/******************************************************************************
*
* File: trie.h
* Description: Functions to build a trie data structure.
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef TRIE_H
#define TRIE_H
#include "dawg.h"
namespace tesseract {
class UNICHARSET;
// Note: if we consider either NODE_REF or EDGE_INDEX to ever exceed
// max int32, we will need to change vector to use int64 for size
// and address indices. This does not seem to be needed immediately,
// since currently the largest number of edges limit used by tesseract
// (kMaxNumEdges in wordlist2dawg.cpp) is far less than max int32.
// There are also int casts below to satisfy the WIN32 compiler that would
// need to be changed.
// It might be cleanest to change the types of most of the Trie/Dawg related
// typedefs to int and restrict the casts to extracting these values from
// the 64 bit EDGE_RECORD.
using EDGE_INDEX = int64_t; // index of an edge in a given node
using EDGE_VECTOR = std::vector<EDGE_RECORD>;
struct TRIE_NODE_RECORD {
EDGE_VECTOR forward_edges;
EDGE_VECTOR backward_edges;
};
using TRIE_NODES = std::vector<TRIE_NODE_RECORD *>;
/**
* Concrete class for Trie data structure that allows to store a list of
* words (extends Dawg base class) as well as dynamically add new words.
* This class stores a vector of pointers to TRIE_NODE_RECORDs, each of
* which has a vector of forward and backward edges.
*/
class TESS_API Trie : public Dawg {
public:
enum RTLReversePolicy {
RRP_DO_NO_REVERSE,
RRP_REVERSE_IF_HAS_RTL,
RRP_FORCE_REVERSE,
};
// Minimum number of concrete characters at the beginning of user patterns.
static const int kSaneNumConcreteChars = 0;
// Various unicode whitespace characters are used to denote unichar patterns,
// (character classifier would never produce these whitespace characters as a
// valid classification).
static const char kAlphaPatternUnicode[];
static const char kDigitPatternUnicode[];
static const char kAlphanumPatternUnicode[];
static const char kPuncPatternUnicode[];
static const char kLowerPatternUnicode[];
static const char kUpperPatternUnicode[];
static const char *get_reverse_policy_name(RTLReversePolicy reverse_policy);
// max_num_edges argument allows limiting the amount of memory this
// Trie can consume (if a new word insert would cause the Trie to
// contain more edges than max_num_edges, all the edges are cleared
// so that new inserts can proceed).
Trie(DawgType type, const std::string &lang, PermuterType perm, int unicharset_size, int debug_level)
: Dawg(type, lang, perm, debug_level) {
init(unicharset_size);
deref_node_index_mask_ = ~letter_mask_;
new_dawg_node(); // need to allocate node 0
}
~Trie() override {
for (auto node : nodes_) {
delete node;
}
}
// Reset the Trie to empty.
void clear();
/** Returns the edge that corresponds to the letter out of this node. */
EDGE_REF edge_char_of(NODE_REF node_ref, UNICHAR_ID unichar_id, bool word_end) const override {
EDGE_RECORD *edge_ptr;
EDGE_INDEX edge_index;
if (!edge_char_of(node_ref, NO_EDGE, FORWARD_EDGE, word_end, unichar_id, &edge_ptr,
&edge_index)) {
return NO_EDGE;
}
return make_edge_ref(node_ref, edge_index);
}
/**
* Fills the given NodeChildVector with all the unichar ids (and the
* corresponding EDGE_REFs) for which there is an edge out of this node.
*/
void unichar_ids_of(NODE_REF node, NodeChildVector *vec, bool word_end) const override {
const EDGE_VECTOR &forward_edges = nodes_[static_cast<int>(node)]->forward_edges;
for (auto &edge : forward_edges) {
if (!word_end || end_of_word_from_edge_rec(edge)) {
vec->push_back(
NodeChild(unichar_id_from_edge_rec(edge), make_edge_ref(node, &edge - &forward_edges[0])));
}
}
}
/**
* Returns the next node visited by following the edge
* indicated by the given EDGE_REF.
*/
NODE_REF next_node(EDGE_REF edge_ref) const override {
if (edge_ref == NO_EDGE || num_edges_ == 0) {
return NO_EDGE;
}
return next_node_from_edge_rec(*deref_edge_ref(edge_ref));
}
/**
* Returns true if the edge indicated by the given EDGE_REF
* marks the end of a word.
*/
bool end_of_word(EDGE_REF edge_ref) const override {
if (edge_ref == NO_EDGE || num_edges_ == 0) {
return false;
}
return end_of_word_from_edge_rec(*deref_edge_ref(edge_ref));
}
/** Returns UNICHAR_ID stored in the edge indicated by the given EDGE_REF. */
UNICHAR_ID edge_letter(EDGE_REF edge_ref) const override {
if (edge_ref == NO_EDGE || num_edges_ == 0) {
return INVALID_UNICHAR_ID;
}
return unichar_id_from_edge_rec(*deref_edge_ref(edge_ref));
}
// Sets the UNICHAR_ID in the given edge_rec to unicharset_size_, marking
// the edge dead.
void KillEdge(EDGE_RECORD *edge_rec) const {
*edge_rec &= ~letter_mask_;
*edge_rec |= (unicharset_size_ << LETTER_START_BIT);
}
bool DeadEdge(const EDGE_RECORD &edge_rec) const {
return unichar_id_from_edge_rec(edge_rec) == unicharset_size_;
}
// Prints the contents of the node indicated by the given NODE_REF.
// At most max_num_edges will be printed.
void print_node(NODE_REF node, int max_num_edges) const override;
// Writes edges from nodes_ to an EDGE_ARRAY and creates a SquishedDawg.
// Eliminates redundant edges and returns the pointer to the SquishedDawg.
// Note: the caller is responsible for deallocating memory associated
// with the returned SquishedDawg pointer.
SquishedDawg *trie_to_dawg();
// Reads a list of words from the given file and adds into the Trie.
// Calls WERD_CHOICE::reverse_unichar_ids_if_rtl() according to the reverse
// policy and information in the unicharset.
// Returns false on error.
bool read_and_add_word_list(const char *filename, const UNICHARSET &unicharset,
Trie::RTLReversePolicy reverse);
// Reads a list of words from the given file.
// Returns false on error.
bool read_word_list(const char *filename, std::vector<std::string> *words);
// Adds a list of words previously read using read_word_list to the trie
// using the given unicharset and reverse_policy to convert to unichar-ids.
// Returns false on error.
bool add_word_list(const std::vector<std::string> &words, const UNICHARSET &unicharset,
Trie::RTLReversePolicy reverse_policy);
// Inserts the list of patterns from the given file into the Trie.
// The pattern list file should contain one pattern per line in UTF-8 format.
//
// Each pattern can contain any non-whitespace characters, however only the
// patterns that contain characters from the unicharset of the corresponding
// language will be useful.
// The only meta character is '\'. To be used in a pattern as an ordinary
// string it should be escaped with '\' (e.g. string "C:\Documents" should
// be written in the patterns file as "C:\\Documents").
// This function supports a very limited regular expression syntax. One can
// express a character, a certain character class and a number of times the
// entity should be repeated in the pattern.
//
// To denote a character class use one of:
// \c - unichar for which UNICHARSET::get_isalpha() is true (character)
// \d - unichar for which UNICHARSET::get_isdigit() is true
// \n - unichar for which UNICHARSET::get_isdigit() or
// UNICHARSET::isalpha() are true
// \p - unichar for which UNICHARSET::get_ispunct() is true
// \a - unichar for which UNICHARSET::get_islower() is true
// \A - unichar for which UNICHARSET::get_isupper() is true
//
// \* could be specified after each character or pattern to indicate that
// the character/pattern can be repeated any number of times before the next
// character/pattern occurs.
//
// Examples:
// 1-8\d\d-GOOG-411 will be expanded to strings:
// 1-800-GOOG-411, 1-801-GOOG-411, ... 1-899-GOOG-411.
//
// http://www.\n\*.com will be expanded to strings like:
// http://www.a.com http://www.a123.com ... http://www.ABCDefgHIJKLMNop.com
//
// Note: In choosing which patterns to include please be aware of the fact
// providing very generic patterns will make tesseract run slower.
// For example \n\* at the beginning of the pattern will make Tesseract
// consider all the combinations of proposed character choices for each
// of the segmentations, which will be unacceptably slow.
// Because of potential problems with speed that could be difficult to
// identify, each user pattern has to have at least kSaneNumConcreteChars
// concrete characters from the unicharset at the beginning.
bool read_pattern_list(const char *filename, const UNICHARSET &unicharset);
// Initializes the values of *_pattern_ unichar ids.
// This function should be called before calling read_pattern_list().
void initialize_patterns(UNICHARSET *unicharset);
// Fills in the given unichar id vector with the unichar ids that represent
// the patterns of the character classes of the given unichar_id.
void unichar_id_to_patterns(UNICHAR_ID unichar_id, const UNICHARSET &unicharset,
std::vector<UNICHAR_ID> *vec) const override;
// Returns the given EDGE_REF if the EDGE_RECORD that it points to has
// a self loop and the given unichar_id matches the unichar_id stored in the
// EDGE_RECORD, returns NO_EDGE otherwise.
EDGE_REF pattern_loop_edge(EDGE_REF edge_ref, UNICHAR_ID unichar_id,
bool word_end) const override {
if (edge_ref == NO_EDGE) {
return NO_EDGE;
}
EDGE_RECORD *edge_rec = deref_edge_ref(edge_ref);
return (marker_flag_from_edge_rec(*edge_rec) &&
unichar_id == unichar_id_from_edge_rec(*edge_rec) &&
word_end == end_of_word_from_edge_rec(*edge_rec))
? edge_ref
: NO_EDGE;
}
// Adds a word to the Trie (creates the necessary nodes and edges).
//
// If repetitions vector is not nullptr, each entry in the vector indicates
// whether the unichar id with the corresponding index in the word is allowed
// to repeat an unlimited number of times. For each entry that is true, MARKER
// flag of the corresponding edge created for this unichar id is set to true).
//
// Return true if add succeeded, false otherwise (e.g. when a word contained
// an invalid unichar id or the trie was getting too large and was cleared).
bool add_word_to_dawg(const WERD_CHOICE &word, const std::vector<bool> *repetitions);
bool add_word_to_dawg(const WERD_CHOICE &word) {
return add_word_to_dawg(word, nullptr);
}
protected:
// The structure of an EDGE_REF for Trie edges is as follows:
// [LETTER_START_BIT, flag_start_bit_):
// edge index in *_edges in a TRIE_NODE_RECORD
// [flag_start_bit, 30th bit]: node index in nodes (TRIE_NODES vector)
//
// With this arrangement there are enough bits to represent edge indices
// (each node can have at most unicharset_size_ forward edges and
// the position of flag_start_bit is set to be log2(unicharset_size_)).
// It is also possible to accommodate a maximum number of nodes that is at
// least as large as that of the SquishedDawg representation (in SquishedDawg
// each EDGE_RECORD has 32-(flag_start_bit+NUM_FLAG_BITS) bits to represent
// the next node index).
//
// Returns the pointer to EDGE_RECORD after decoding the location
// of the edge from the information in the given EDGE_REF.
// This function assumes that EDGE_REF holds valid node/edge indices.
inline EDGE_RECORD *deref_edge_ref(EDGE_REF edge_ref) const {
int edge_index = static_cast<int>((edge_ref & letter_mask_) >> LETTER_START_BIT);
int node_index = static_cast<int>((edge_ref & deref_node_index_mask_) >> flag_start_bit_);
TRIE_NODE_RECORD *node_rec = nodes_[node_index];
return &(node_rec->forward_edges[edge_index]);
}
/** Constructs EDGE_REF from the given node_index and edge_index. */
inline EDGE_REF make_edge_ref(NODE_REF node_index, EDGE_INDEX edge_index) const {
return ((node_index << flag_start_bit_) | (edge_index << LETTER_START_BIT));
}
/** Sets up this edge record to the requested values. */
inline void link_edge(EDGE_RECORD *edge, NODE_REF nxt, bool repeats, int direction, bool word_end,
UNICHAR_ID unichar_id) {
EDGE_RECORD flags = 0;
if (repeats) {
flags |= MARKER_FLAG;
}
if (word_end) {
flags |= WERD_END_FLAG;
}
if (direction == BACKWARD_EDGE) {
flags |= DIRECTION_FLAG;
}
*edge = ((nxt << next_node_start_bit_) | (static_cast<EDGE_RECORD>(flags) << flag_start_bit_) |
(static_cast<EDGE_RECORD>(unichar_id) << LETTER_START_BIT));
}
/** Prints the given EDGE_RECORD. */
inline void print_edge_rec(const EDGE_RECORD &edge_rec) const {
tprintf("|" REFFORMAT "|%s%s%s|%d|", next_node_from_edge_rec(edge_rec),
marker_flag_from_edge_rec(edge_rec) ? "R," : "",
(direction_from_edge_rec(edge_rec) == FORWARD_EDGE) ? "F" : "B",
end_of_word_from_edge_rec(edge_rec) ? ",E" : "", unichar_id_from_edge_rec(edge_rec));
}
// Returns true if the next node in recorded the given EDGE_RECORD
// has exactly one forward edge.
inline bool can_be_eliminated(const EDGE_RECORD &edge_rec) {
NODE_REF node_ref = next_node_from_edge_rec(edge_rec);
return (node_ref != NO_EDGE && nodes_[static_cast<int>(node_ref)]->forward_edges.size() == 1);
}
// Prints the contents of the Trie.
// At most max_num_edges will be printed for each node.
void print_all(const char *msg, int max_num_edges) {
tprintf("\n__________________________\n%s\n", msg);
for (size_t i = 0; i < nodes_.size(); ++i) {
print_node(i, max_num_edges);
}
tprintf("__________________________\n");
}
// Finds the edge with the given direction, word_end and unichar_id
// in the node indicated by node_ref. Fills in the pointer to the
// EDGE_RECORD and the index of the edge with the values
// corresponding to the edge found. Returns true if an edge was found.
bool edge_char_of(NODE_REF node_ref, NODE_REF next_node, int direction, bool word_end,
UNICHAR_ID unichar_id, EDGE_RECORD **edge_ptr, EDGE_INDEX *edge_index) const;
// Adds an single edge linkage between node1 and node2 in the direction
// indicated by direction argument.
bool add_edge_linkage(NODE_REF node1, NODE_REF node2, bool repeats, int direction, bool word_end,
UNICHAR_ID unichar_id);
// Adds forward edge linkage from node1 to node2 and the corresponding
// backward edge linkage in the other direction.
bool add_new_edge(NODE_REF node1, NODE_REF node2, bool repeats, bool word_end,
UNICHAR_ID unichar_id) {
return (add_edge_linkage(node1, node2, repeats, FORWARD_EDGE, word_end, unichar_id) &&
add_edge_linkage(node2, node1, repeats, BACKWARD_EDGE, word_end, unichar_id));
}
// Sets the word ending flags in an already existing edge pair.
// Returns true on success.
void add_word_ending(EDGE_RECORD *edge, NODE_REF the_next_node, bool repeats,
UNICHAR_ID unichar_id);
// Allocates space for a new node in the Trie.
NODE_REF new_dawg_node();
// Removes a single edge linkage to between node1 and node2 in the
// direction indicated by direction argument.
void remove_edge_linkage(NODE_REF node1, NODE_REF node2, int direction, bool word_end,
UNICHAR_ID unichar_id);
// Removes forward edge linkage from node1 to node2 and the corresponding
// backward edge linkage in the other direction.
void remove_edge(NODE_REF node1, NODE_REF node2, bool word_end, UNICHAR_ID unichar_id) {
remove_edge_linkage(node1, node2, FORWARD_EDGE, word_end, unichar_id);
remove_edge_linkage(node2, node1, BACKWARD_EDGE, word_end, unichar_id);
}
// Compares edge1 and edge2 in the given node to see if they point to two
// next nodes that could be collapsed. If they do, performs the reduction
// and returns true.
bool eliminate_redundant_edges(NODE_REF node, const EDGE_RECORD &edge1, const EDGE_RECORD &edge2);
// Assuming that edge_index indicates the first edge in a group of edges
// in this node with a particular letter value, looks through these edges
// to see if any of them can be collapsed. If so does it. Returns to the
// caller when all edges with this letter have been reduced.
// Returns true if further reduction is possible with this same letter.
bool reduce_lettered_edges(EDGE_INDEX edge_index, UNICHAR_ID unichar_id, NODE_REF node,
EDGE_VECTOR *backward_edges, std::vector<bool> &reduced_nodes);
/**
* Order num_edges of consecutive EDGE_RECORDS in the given EDGE_VECTOR in
* increasing order of unichar ids. This function is normally called
* for all edges in a single node, and since number of edges in each node
* is usually quite small, selection sort is used.
*/
void sort_edges(EDGE_VECTOR *edges);
/** Eliminates any redundant edges from this node in the Trie. */
void reduce_node_input(NODE_REF node, std::vector<bool> &reduced_nodes);
// Returns the pattern unichar id for the given character class code.
UNICHAR_ID character_class_to_pattern(char ch);
// Member variables
TRIE_NODES nodes_; // vector of nodes in the Trie
// Freelist of edges in the root backwards node that were previously zeroed.
std::vector<EDGE_INDEX> root_back_freelist_;
uint64_t num_edges_ = 0; // sum of all edges (forward and backward)
uint64_t deref_direction_mask_ = 0; // mask for EDGE_REF to extract direction
uint64_t deref_node_index_mask_ = 0; // mask for EDGE_REF to extract node index
// Variables for translating character class codes denoted in user patterns
// file to the unichar ids used to represent them in a Trie.
UNICHAR_ID alpha_pattern_ = 0;
UNICHAR_ID digit_pattern_ = 0;
UNICHAR_ID alphanum_pattern_ = 0;
UNICHAR_ID punc_pattern_ = 0;
UNICHAR_ID lower_pattern_ = 0;
UNICHAR_ID upper_pattern_ = 0;
bool initialized_patterns_ = false;
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/dict/trie.h
|
C++
|
apache-2.0
| 19,093
|
///////////////////////////////////////////////////////////////////////
// File: convolve.cpp
// Description: Convolutional layer that stacks the inputs over its rectangle
// and pulls in random data to fill out-of-input inputs.
// Output is therefore same size as its input, but deeper.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "convolve.h"
#include "networkscratch.h"
#include "serialis.h"
namespace tesseract {
Convolve::Convolve(const std::string &name, int ni, int half_x, int half_y)
: Network(NT_CONVOLVE, name, ni, ni * (2 * half_x + 1) * (2 * half_y + 1))
, half_x_(half_x)
, half_y_(half_y) {}
// Writes to the given file. Returns false in case of error.
bool Convolve::Serialize(TFile *fp) const {
return Network::Serialize(fp) && fp->Serialize(&half_x_) && fp->Serialize(&half_y_);
}
// Reads from the given file. Returns false in case of error.
bool Convolve::DeSerialize(TFile *fp) {
if (!fp->DeSerialize(&half_x_)) {
return false;
}
if (!fp->DeSerialize(&half_y_)) {
return false;
}
no_ = ni_ * (2 * half_x_ + 1) * (2 * half_y_ + 1);
return true;
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void Convolve::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
output->Resize(input, no_);
int y_scale = 2 * half_y_ + 1;
StrideMap::Index dest_index(output->stride_map());
do {
// Stack x_scale groups of y_scale * ni_ inputs together.
int t = dest_index.t();
int out_ix = 0;
for (int x = -half_x_; x <= half_x_; ++x, out_ix += y_scale * ni_) {
StrideMap::Index x_index(dest_index);
if (!x_index.AddOffset(x, FD_WIDTH)) {
// This x is outside the image.
output->Randomize(t, out_ix, y_scale * ni_, randomizer_);
} else {
int out_iy = out_ix;
for (int y = -half_y_; y <= half_y_; ++y, out_iy += ni_) {
StrideMap::Index y_index(x_index);
if (!y_index.AddOffset(y, FD_HEIGHT)) {
// This y is outside the image.
output->Randomize(t, out_iy, ni_, randomizer_);
} else {
output->CopyTimeStepGeneral(t, out_iy, ni_, input, y_index.t(), 0);
}
}
}
}
} while (dest_index.Increment());
#ifndef GRAPHICS_DISABLED
if (debug) {
DisplayForward(*output);
}
#endif
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Convolve::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
back_deltas->Resize(fwd_deltas, ni_);
NetworkScratch::IO delta_sum;
delta_sum.ResizeFloat(fwd_deltas, ni_, scratch);
delta_sum->Zero();
int y_scale = 2 * half_y_ + 1;
StrideMap::Index src_index(fwd_deltas.stride_map());
do {
// Stack x_scale groups of y_scale * ni_ inputs together.
int t = src_index.t();
int out_ix = 0;
for (int x = -half_x_; x <= half_x_; ++x, out_ix += y_scale * ni_) {
StrideMap::Index x_index(src_index);
if (x_index.AddOffset(x, FD_WIDTH)) {
int out_iy = out_ix;
for (int y = -half_y_; y <= half_y_; ++y, out_iy += ni_) {
StrideMap::Index y_index(x_index);
if (y_index.AddOffset(y, FD_HEIGHT)) {
fwd_deltas.AddTimeStepPart(t, out_iy, ni_, delta_sum->f(y_index.t()));
}
}
}
}
} while (src_index.Increment());
back_deltas->CopyAll(*delta_sum);
return true;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/convolve.cpp
|
C++
|
apache-2.0
| 4,394
|
///////////////////////////////////////////////////////////////////////
// File: convolve.h
// Description: Convolutional layer that stacks the inputs over its rectangle
// and pulls in random data to fill out-of-input inputs.
// Output is therefore same size as its input, but deeper.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_CONVOLVE_H_
#define TESSERACT_LSTM_CONVOLVE_H_
#include "matrix.h"
#include "network.h"
namespace tesseract {
// Makes each time-step deeper by stacking inputs over its rectangle. Does not
// affect the size of its input. Achieves this by bringing in random values in
// out-of-input areas.
class Convolve : public Network {
public:
// The area of convolution is 2*half_x + 1 by 2*half_y + 1, forcing it to
// always be odd, so the center is the current pixel.
TESS_API
Convolve(const std::string &name, int ni, int half_x, int half_y);
~Convolve() override = default;
std::string spec() const override {
return "C" + std::to_string(half_y_ * 2 + 1) + "," + std::to_string(half_x_ * 2 + 1);
}
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
private:
void DebugWeights() override {
tprintf("Must override Network::DebugWeights for type %d\n", type_);
}
protected:
// Serialized data.
int32_t half_x_;
int32_t half_y_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_SUBSAMPLE_H_
|
2301_81045437/tesseract
|
src/lstm/convolve.h
|
C++
|
apache-2.0
| 2,753
|
///////////////////////////////////////////////////////////////////////
// File: fullyconnected.cpp
// Description: Simple feed-forward layer with various non-linearities.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "fullyconnected.h"
#ifdef _OPENMP
# include <omp.h>
#endif
#include <cstdio>
#include <cstdlib>
#include "functions.h"
#include "networkscratch.h"
// Number of threads to use for parallel calculation of Forward and Backward.
#ifdef _OPENMP
const int kNumThreads = 4;
#else
const int kNumThreads = 1;
#endif
namespace tesseract {
FullyConnected::FullyConnected(const std::string &name, int ni, int no, NetworkType type)
: Network(type, name, ni, no), external_source_(nullptr), int_mode_(false) {}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape FullyConnected::OutputShape(const StaticShape &input_shape) const {
LossType loss_type = LT_NONE;
if (type_ == NT_SOFTMAX) {
loss_type = LT_CTC;
} else if (type_ == NT_SOFTMAX_NO_CTC) {
loss_type = LT_SOFTMAX;
} else if (type_ == NT_LOGISTIC) {
loss_type = LT_LOGISTIC;
}
StaticShape result(input_shape);
result.set_depth(no_);
result.set_loss_type(loss_type);
return result;
}
// Suspends/Enables training by setting the training_ flag.
void FullyConnected::SetEnableTraining(TrainingState state) {
if (state == TS_RE_ENABLE) {
// Enable only from temp disabled.
if (training_ == TS_TEMP_DISABLE) {
training_ = TS_ENABLED;
}
} else if (state == TS_TEMP_DISABLE) {
// Temp disable only from enabled.
if (training_ == TS_ENABLED) {
training_ = state;
}
} else {
if (state == TS_ENABLED && training_ != TS_ENABLED) {
weights_.InitBackward();
}
training_ = state;
}
}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
int FullyConnected::InitWeights(float range, TRand *randomizer) {
Network::SetRandomizer(randomizer);
num_weights_ = weights_.InitWeightsFloat(no_, ni_ + 1, TestFlag(NF_ADAM), range, randomizer);
return num_weights_;
}
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int FullyConnected::RemapOutputs(int old_no, const std::vector<int> &code_map) {
if (type_ == NT_SOFTMAX && no_ == old_no) {
num_weights_ = weights_.RemapOutputs(code_map);
no_ = code_map.size();
}
return num_weights_;
}
// Converts a float network to an int network.
void FullyConnected::ConvertToInt() {
weights_.ConvertToInt();
}
// Provides debug output on the weights.
void FullyConnected::DebugWeights() {
weights_.Debug2D(name_.c_str());
}
// Writes to the given file. Returns false in case of error.
bool FullyConnected::Serialize(TFile *fp) const {
if (!Network::Serialize(fp)) {
return false;
}
if (!weights_.Serialize(IsTraining(), fp)) {
return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
bool FullyConnected::DeSerialize(TFile *fp) {
return weights_.DeSerialize(IsTraining(), fp);
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void FullyConnected::Forward(bool debug, const NetworkIO &input,
const TransposedArray *input_transpose, NetworkScratch *scratch,
NetworkIO *output) {
int width = input.Width();
if (type_ == NT_SOFTMAX) {
output->ResizeFloat(input, no_);
} else {
output->Resize(input, no_);
}
SetupForward(input, input_transpose);
std::vector<NetworkScratch::FloatVec> temp_lines(kNumThreads);
std::vector<NetworkScratch::FloatVec> curr_input(kNumThreads);
int ro = no_;
if (IntSimdMatrix::intSimdMatrix) {
ro = IntSimdMatrix::intSimdMatrix->RoundOutputs(ro);
}
for (int i = 0; i < kNumThreads; ++i) {
temp_lines[i].Init(ro, scratch);
curr_input[i].Init(ni_, scratch);
}
#ifdef _OPENMP
# pragma omp parallel for num_threads(kNumThreads)
for (int t = 0; t < width; ++t) {
// Thread-local pointer to temporary storage.
int thread_id = omp_get_thread_num();
#else
for (int t = 0; t < width; ++t) {
// Thread-local pointer to temporary storage.
int thread_id = 0;
#endif
TFloat *temp_line = temp_lines[thread_id];
if (input.int_mode()) {
ForwardTimeStep(input.i(t), t, temp_line);
} else {
input.ReadTimeStep(t, curr_input[thread_id]);
ForwardTimeStep(curr_input[thread_id], t, temp_line);
}
output->WriteTimeStep(t, temp_line);
if (IsTraining() && type_ != NT_SOFTMAX) {
acts_.CopyTimeStepFrom(t, *output, t);
}
}
// Zero all the elements that are in the padding around images that allows
// multiple different-sized images to exist in a single array.
// acts_ is only used if this is not a softmax op.
if (IsTraining() && type_ != NT_SOFTMAX) {
acts_.ZeroInvalidElements();
}
output->ZeroInvalidElements();
#if DEBUG_DETAIL > 0
tprintf("F Output:%s\n", name_.c_str());
output->Print(10);
#endif
#ifndef GRAPHICS_DISABLED
if (debug) {
DisplayForward(*output);
}
#endif
}
// Components of Forward so FullyConnected can be reused inside LSTM.
void FullyConnected::SetupForward(const NetworkIO &input, const TransposedArray *input_transpose) {
// Softmax output is always float, so save the input type.
int_mode_ = input.int_mode();
if (IsTraining()) {
acts_.Resize(input, no_);
// Source_ is a transposed copy of input. It isn't needed if provided.
external_source_ = input_transpose;
if (external_source_ == nullptr) {
source_t_.ResizeNoInit(ni_, input.Width());
}
}
}
void FullyConnected::ForwardTimeStep(int t, TFloat *output_line) {
if (type_ == NT_TANH) {
FuncInplace<GFunc>(no_, output_line);
} else if (type_ == NT_LOGISTIC) {
FuncInplace<FFunc>(no_, output_line);
} else if (type_ == NT_POSCLIP) {
FuncInplace<ClipFFunc>(no_, output_line);
} else if (type_ == NT_SYMCLIP) {
FuncInplace<ClipGFunc>(no_, output_line);
} else if (type_ == NT_RELU) {
FuncInplace<Relu>(no_, output_line);
} else if (type_ == NT_SOFTMAX || type_ == NT_SOFTMAX_NO_CTC) {
SoftmaxInPlace(no_, output_line);
} else if (type_ != NT_LINEAR) {
ASSERT_HOST("Invalid fully-connected type!" == nullptr);
}
}
void FullyConnected::ForwardTimeStep(const TFloat *d_input, int t, TFloat *output_line) {
// input is copied to source_ line-by-line for cache coherency.
if (IsTraining() && external_source_ == nullptr) {
source_t_.WriteStrided(t, d_input);
}
weights_.MatrixDotVector(d_input, output_line);
ForwardTimeStep(t, output_line);
}
void FullyConnected::ForwardTimeStep(const int8_t *i_input, int t, TFloat *output_line) {
// input is copied to source_ line-by-line for cache coherency.
weights_.MatrixDotVector(i_input, output_line);
ForwardTimeStep(t, output_line);
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool FullyConnected::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
#ifndef GRAPHICS_DISABLED
if (debug) {
DisplayBackward(fwd_deltas);
}
#endif
back_deltas->Resize(fwd_deltas, ni_);
std::vector<NetworkScratch::FloatVec> errors(kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
errors[i].Init(no_, scratch);
}
std::vector<NetworkScratch::FloatVec> temp_backprops;
if (needs_to_backprop_) {
temp_backprops.resize(kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
temp_backprops[i].Init(ni_, scratch);
}
}
int width = fwd_deltas.Width();
NetworkScratch::GradientStore errors_t;
errors_t.Init(no_, width, scratch);
#ifdef _OPENMP
# pragma omp parallel for num_threads(kNumThreads)
for (int t = 0; t < width; ++t) {
int thread_id = omp_get_thread_num();
#else
for (int t = 0; t < width; ++t) {
int thread_id = 0;
#endif
TFloat *backprop = nullptr;
if (needs_to_backprop_) {
backprop = temp_backprops[thread_id];
}
TFloat *curr_errors = errors[thread_id];
BackwardTimeStep(fwd_deltas, t, curr_errors, errors_t.get(), backprop);
if (backprop != nullptr) {
back_deltas->WriteTimeStep(t, backprop);
}
}
FinishBackward(*errors_t.get());
if (needs_to_backprop_) {
back_deltas->ZeroInvalidElements();
#if DEBUG_DETAIL > 0
tprintf("F Backprop:%s\n", name_.c_str());
back_deltas->Print(10);
#endif
return true;
}
return false; // No point going further back.
}
void FullyConnected::BackwardTimeStep(const NetworkIO &fwd_deltas, int t, TFloat *curr_errors,
TransposedArray *errors_t, TFloat *backprop) {
if (type_ == NT_TANH) {
acts_.FuncMultiply<GPrime>(fwd_deltas, t, curr_errors);
} else if (type_ == NT_LOGISTIC) {
acts_.FuncMultiply<FPrime>(fwd_deltas, t, curr_errors);
} else if (type_ == NT_POSCLIP) {
acts_.FuncMultiply<ClipFPrime>(fwd_deltas, t, curr_errors);
} else if (type_ == NT_SYMCLIP) {
acts_.FuncMultiply<ClipGPrime>(fwd_deltas, t, curr_errors);
} else if (type_ == NT_RELU) {
acts_.FuncMultiply<ReluPrime>(fwd_deltas, t, curr_errors);
} else if (type_ == NT_SOFTMAX || type_ == NT_SOFTMAX_NO_CTC || type_ == NT_LINEAR) {
fwd_deltas.ReadTimeStep(t, curr_errors); // fwd_deltas are the errors.
} else {
ASSERT_HOST("Invalid fully-connected type!" == nullptr);
}
// Generate backprop only if needed by the lower layer.
if (backprop != nullptr) {
weights_.VectorDotMatrix(curr_errors, backprop);
}
errors_t->WriteStrided(t, curr_errors);
}
void FullyConnected::FinishBackward(const TransposedArray &errors_t) {
if (external_source_ == nullptr) {
weights_.SumOuterTransposed(errors_t, source_t_, true);
} else {
weights_.SumOuterTransposed(errors_t, *external_source_, true);
}
}
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
void FullyConnected::Update(float learning_rate, float momentum, float adam_beta, int num_samples) {
weights_.Update(learning_rate, momentum, adam_beta, num_samples);
}
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void FullyConnected::CountAlternators(const Network &other, TFloat *same, TFloat *changed) const {
ASSERT_HOST(other.type() == type_);
const auto *fc = static_cast<const FullyConnected *>(&other);
weights_.CountAlternators(fc->weights_, same, changed);
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/fullyconnected.cpp
|
C++
|
apache-2.0
| 11,587
|
///////////////////////////////////////////////////////////////////////
// File: fullyconnected.h
// Description: Simple feed-forward layer with various non-linearities.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_FULLYCONNECTED_H_
#define TESSERACT_LSTM_FULLYCONNECTED_H_
#include "network.h"
#include "networkscratch.h"
#include "tesstypes.h"
namespace tesseract {
// C++ Implementation of the Softmax (output) class from lstm.py.
class FullyConnected : public Network {
public:
TESS_API
FullyConnected(const std::string &name, int ni, int no, NetworkType type);
~FullyConnected() override = default;
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override;
std::string spec() const override {
std::string spec;
if (type_ == NT_TANH) {
spec += "Ft" + std::to_string(no_);
} else if (type_ == NT_LOGISTIC) {
spec += "Fs" + std::to_string(no_);
} else if (type_ == NT_RELU) {
spec += "Fr" + std::to_string(no_);
} else if (type_ == NT_LINEAR) {
spec += "Fl" + std::to_string(no_);
} else if (type_ == NT_POSCLIP) {
spec += "Fp" + std::to_string(no_);
} else if (type_ == NT_SYMCLIP) {
spec += "Fn" + std::to_string(no_);
} else if (type_ == NT_SOFTMAX) {
spec += "Fc" + std::to_string(no_);
} else {
spec += "Fm" + std::to_string(no_);
}
return spec;
}
// Changes the type to the given type. Used to commute a softmax to a
// non-output type for adding on other networks.
void ChangeType(NetworkType type) {
type_ = type;
}
// Suspends/Enables training by setting the training_ flag. Serialize and
// DeSerialize only operate on the run-time data if state is false.
void SetEnableTraining(TrainingState state) override;
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
int InitWeights(float range, TRand *randomizer) override;
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int RemapOutputs(int old_no, const std::vector<int> &code_map) override;
// Converts a float network to an int network.
void ConvertToInt() override;
// Provides debug output on the weights.
void DebugWeights() override;
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Components of Forward so FullyConnected can be reused inside LSTM.
void SetupForward(const NetworkIO &input, const TransposedArray *input_transpose);
void ForwardTimeStep(int t, TFloat *output_line);
void ForwardTimeStep(const TFloat *d_input, int t, TFloat *output_line);
void ForwardTimeStep(const int8_t *i_input, int t, TFloat *output_line);
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
// Components of Backward so FullyConnected can be reused inside LSTM.
void BackwardTimeStep(const NetworkIO &fwd_deltas, int t, TFloat *curr_errors,
TransposedArray *errors_t, TFloat *backprop);
void FinishBackward(const TransposedArray &errors_t);
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
void Update(float learning_rate, float momentum, float adam_beta, int num_samples) override;
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void CountAlternators(const Network &other, TFloat *same, TFloat *changed) const override;
protected:
// Weight arrays of size [no, ni + 1].
WeightMatrix weights_;
// Transposed copy of input used during training of size [ni, width].
TransposedArray source_t_;
// Pointer to transposed input stored elsewhere. If not null, this is used
// in preference to calculating the transpose and storing it in source_t_.
const TransposedArray *external_source_;
// Activations from forward pass of size [width, no].
NetworkIO acts_;
// Memory of the integer mode input to forward as softmax always outputs
// float, so the information is otherwise lost.
bool int_mode_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_FULLYCONNECTED_H_
|
2301_81045437/tesseract
|
src/lstm/fullyconnected.h
|
C++
|
apache-2.0
| 5,721
|
///////////////////////////////////////////////////////////////////////
// File: functions.h
// Description: Collection of function-objects used by the network layers.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_FUNCTIONS_H_
#define TESSERACT_LSTM_FUNCTIONS_H_
#include "helpers.h"
#include "tesstypes.h"
// Setting this to 1 or more causes massive dumps of debug data: weights,
// updates, internal calculations etc, and reduces the number of test iterations
// to a small number, so outputs can be diffed.
#define DEBUG_DETAIL 0
#if DEBUG_DETAIL > 0
# undef _OPENMP // Disable open mp to get the outputs in sync.
#endif
namespace tesseract {
// Size of static tables.
constexpr int kTableSize = 4096;
// Scale factor for float arg to int index.
constexpr TFloat kScaleFactor = 256.0;
// Generated lookup tables.
extern const TFloat TanhTable[];
extern const TFloat LogisticTable[];
// Non-linearity (sigmoid) functions with cache tables and clipping.
inline TFloat Tanh(TFloat x) {
if (x < 0) {
return -Tanh(-x);
}
x *= kScaleFactor;
auto index = static_cast<unsigned>(x);
if (index >= (kTableSize - 1)) {
return 1;
}
TFloat tanh_i0 = TanhTable[index];
TFloat tanh_i1 = TanhTable[index + 1];
// Linear interpolation.
return tanh_i0 + (tanh_i1 - tanh_i0) * (x - index);
}
inline TFloat Logistic(TFloat x) {
if (x < 0) {
return 1 - Logistic(-x);
}
x *= kScaleFactor;
auto index = static_cast<unsigned>(x);
if (index >= (kTableSize - 1)) {
return 1;
}
TFloat l0 = LogisticTable[index];
TFloat l1 = LogisticTable[index + 1];
// Linear interpolation.
return l0 + (l1 - l0) * (x - index);
}
// Non-linearity (sigmoid) functions and their derivatives.
struct FFunc {
inline TFloat operator()(TFloat x) const {
return Logistic(x);
}
};
struct FPrime {
inline TFloat operator()(TFloat y) const {
return y * (1 - y);
}
};
struct ClipFFunc {
inline TFloat operator()(TFloat x) const {
if (x <= 0) {
return 0;
}
if (x >= 1) {
return 1;
}
return x;
}
};
struct ClipFPrime {
inline TFloat operator()(TFloat y) const {
return 0 < y && y < 1 ? 1 : 0;
}
};
struct Relu {
inline TFloat operator()(TFloat x) const {
if (x <= 0) {
return 0;
}
return x;
}
};
struct ReluPrime {
inline TFloat operator()(TFloat y) const {
return 0 < y ? 1 : 0;
}
};
struct GFunc {
inline TFloat operator()(TFloat x) const {
return Tanh(x);
}
};
struct GPrime {
inline TFloat operator()(TFloat y) const {
return 1 - y * y;
}
};
struct ClipGFunc {
inline TFloat operator()(TFloat x) const {
if (x <= -1) {
return -1;
}
if (x >= 1) {
return 1;
}
return x;
}
};
struct ClipGPrime {
inline TFloat operator()(TFloat y) const {
return -1 < y && y < 1 ? 1 : 0;
}
};
struct HFunc {
inline TFloat operator()(TFloat x) const {
return Tanh(x);
}
};
struct HPrime {
inline TFloat operator()(TFloat y) const {
TFloat u = Tanh(y);
return 1 - u * u;
}
};
struct UnityFunc {
inline TFloat operator()(TFloat /*x*/) const {
return 1.0;
}
};
struct IdentityFunc {
inline TFloat operator()(TFloat x) const {
return x;
}
};
// Applies Func in-place to inout, of size n.
template <class Func>
inline void FuncInplace(int n, TFloat *inout) {
Func f;
for (int i = 0; i < n; ++i) {
inout[i] = f(inout[i]);
}
}
// Applies Func to u and multiplies the result by v component-wise,
// putting the product in out, all of size n.
template <class Func>
inline void FuncMultiply(const TFloat *u, const TFloat *v, int n, TFloat *out) {
Func f;
for (int i = 0; i < n; ++i) {
out[i] = f(u[i]) * v[i];
}
}
// Applies the Softmax function in-place to inout, of size n.
template <typename T>
inline void SoftmaxInPlace(int n, T *inout) {
if (n <= 0) {
return;
}
// A limit on the negative range input to exp to guarantee non-zero output.
const T kMaxSoftmaxActivation = 86;
T max_output = inout[0];
for (int i = 1; i < n; i++) {
T output = inout[i];
if (output > max_output) {
max_output = output;
}
}
T prob_total = 0;
for (int i = 0; i < n; i++) {
T prob = inout[i] - max_output;
prob = std::exp(ClipToRange(prob, -kMaxSoftmaxActivation, static_cast<T>(0)));
prob_total += prob;
inout[i] = prob;
}
if (prob_total > 0) {
for (int i = 0; i < n; i++) {
inout[i] /= prob_total;
}
}
}
// Copies n values of the given src vector to dest.
inline void CopyVector(unsigned n, const TFloat *src, TFloat *dest) {
memcpy(dest, src, n * sizeof(dest[0]));
}
// Adds n values of the given src vector to dest.
inline void AccumulateVector(int n, const TFloat *src, TFloat *dest) {
for (int i = 0; i < n; ++i) {
dest[i] += src[i];
}
}
// Multiplies n values of inout in-place element-wise by the given src vector.
inline void MultiplyVectorsInPlace(int n, const TFloat *src, TFloat *inout) {
for (int i = 0; i < n; ++i) {
inout[i] *= src[i];
}
}
// Multiplies n values of u by v, element-wise, accumulating to out.
inline void MultiplyAccumulate(int n, const TFloat *u, const TFloat *v, TFloat *out) {
for (int i = 0; i < n; i++) {
out[i] += u[i] * v[i];
}
}
// Sums the given 5 n-vectors putting the result into sum.
inline void SumVectors(int n, const TFloat *v1, const TFloat *v2, const TFloat *v3,
const TFloat *v4, const TFloat *v5, TFloat *sum) {
for (int i = 0; i < n; ++i) {
sum[i] = v1[i] + v2[i] + v3[i] + v4[i] + v5[i];
}
}
// Sets the given n-vector vec to 0.
template <typename T>
inline void ZeroVector(unsigned n, T *vec) {
memset(vec, 0, n * sizeof(*vec));
}
// Clips the given vector vec, of size n to [lower, upper].
template <typename T>
inline void ClipVector(int n, T lower, T upper, T *vec) {
for (int i = 0; i < n; ++i) {
vec[i] = ClipToRange(vec[i], lower, upper);
}
}
// Converts the given n-vector to a binary encoding of the maximum value,
// encoded as vector of nf binary values.
inline void CodeInBinary(int n, int nf, TFloat *vec) {
if (nf <= 0 || n < nf) {
return;
}
int index = 0;
TFloat best_score = vec[0];
for (int i = 1; i < n; ++i) {
if (vec[i] > best_score) {
best_score = vec[i];
index = i;
}
}
int mask = 1;
for (int i = 0; i < nf; ++i, mask *= 2) {
vec[i] = (index & mask) ? 1.0 : 0.0;
}
}
} // namespace tesseract.
#endif // TESSERACT_LSTM_FUNCTIONS_H_
|
2301_81045437/tesseract
|
src/lstm/functions.h
|
C++
|
apache-2.0
| 7,144
|
///////////////////////////////////////////////////////////////////////
// File: input.cpp
// Description: Input layer class for neural network implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "input.h"
#include <allheaders.h>
#include "imagedata.h"
#include "pageres.h"
#include "scrollview.h"
namespace tesseract {
// Max height for variable height inputs before scaling anyway.
const int kMaxInputHeight = 48;
Input::Input(const std::string &name, int ni, int no)
: Network(NT_INPUT, name, ni, no), cached_x_scale_(1) {}
Input::Input(const std::string &name, const StaticShape &shape)
: Network(NT_INPUT, name, shape.height(), shape.depth()), shape_(shape), cached_x_scale_(1) {
if (shape.height() == 1) {
ni_ = shape.depth();
}
}
// Writes to the given file. Returns false in case of error.
bool Input::Serialize(TFile *fp) const {
return Network::Serialize(fp) && shape_.Serialize(fp);
}
// Reads from the given file. Returns false in case of error.
bool Input::DeSerialize(TFile *fp) {
return shape_.DeSerialize(fp);
}
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
int Input::XScaleFactor() const {
return 1;
}
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
void Input::CacheXScaleFactor(int factor) {
cached_x_scale_ = factor;
}
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Input::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
*output = input;
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Input::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
tprintf("Input::Backward should not be called!!\n");
return false;
}
// Creates and returns a Pix of appropriate size for the network from the
// image_data. If non-null, *image_scale returns the image scale factor used.
// Returns nullptr on error.
/* static */
Image Input::PrepareLSTMInputs(const ImageData &image_data, const Network *network, int min_width,
TRand *randomizer, float *image_scale) {
// Note that NumInputs() is defined as input image height.
int target_height = network->NumInputs();
int width, height;
Image pix =
image_data.PreScale(target_height, kMaxInputHeight, image_scale, &width, &height, nullptr);
if (pix == nullptr) {
tprintf("Bad pix from ImageData!\n");
return nullptr;
}
if (width < min_width || height < min_width) {
tprintf("Image too small to scale!! (%dx%d vs min width of %d)\n", width, height, min_width);
pix.destroy();
return nullptr;
}
return pix;
}
// Converts the given pix to a NetworkIO of height and depth appropriate to the
// given StaticShape:
// If depth == 3, convert to 24 bit color, otherwise normalized grey.
// Scale to target height, if the shape's height is > 1, or its depth if the
// height == 1. If height == 0 then no scaling.
// NOTE: It isn't safe for multiple threads to call this on the same pix.
/* static */
void Input::PreparePixInput(const StaticShape &shape, const Image pix, TRand *randomizer,
NetworkIO *input) {
bool color = shape.depth() == 3;
Image var_pix = pix;
int depth = pixGetDepth(var_pix);
Image normed_pix = nullptr;
// On input to BaseAPI, an image is forced to be 1, 8 or 24 bit, without
// colormap, so we just have to deal with depth conversion here.
if (color) {
// Force RGB.
if (depth == 32) {
normed_pix = var_pix.clone();
} else {
normed_pix = pixConvertTo32(var_pix);
}
} else {
// Convert non-8-bit images to 8 bit.
if (depth == 8) {
normed_pix = var_pix.clone();
} else {
normed_pix = pixConvertTo8(var_pix, false);
}
}
int height = pixGetHeight(normed_pix);
int target_height = shape.height();
if (target_height == 1) {
target_height = shape.depth();
}
if (target_height != 0 && target_height != height) {
// Get the scaled image.
float im_factor = static_cast<float>(target_height) / height;
Image scaled_pix = pixScale(normed_pix, im_factor, im_factor);
normed_pix.destroy();
normed_pix = scaled_pix;
}
input->FromPix(shape, normed_pix, randomizer);
normed_pix.destroy();
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/input.cpp
|
C++
|
apache-2.0
| 5,353
|
///////////////////////////////////////////////////////////////////////
// File: input.h
// Description: Input layer class for neural network implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_INPUT_H_
#define TESSERACT_LSTM_INPUT_H_
#include "network.h"
namespace tesseract {
class ScrollView;
class Input : public Network {
public:
TESS_API
Input(const std::string &name, int ni, int no);
TESS_API
Input(const std::string &name, const StaticShape &shape);
~Input() override = default;
std::string spec() const override {
return std::to_string(shape_.batch()) + "," +
std::to_string(shape_.height()) + "," +
std::to_string(shape_.width()) + "," +
std::to_string(shape_.depth());
}
// Returns the required shape input to the network.
StaticShape InputShape() const override {
return shape_;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(
[[maybe_unused]] const StaticShape &input_shape) const override {
return shape_;
}
// Writes to the given file. Returns false in case of error.
// Should be overridden by subclasses, but called by their Serialize.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int XScaleFactor() const override;
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
void CacheXScaleFactor(int factor) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input,
const TransposedArray *input_transpose, NetworkScratch *scratch,
NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas,
NetworkScratch *scratch, NetworkIO *back_deltas) override;
// Creates and returns a Pix of appropriate size for the network from the
// image_data. If non-null, *image_scale returns the image scale factor used.
// Returns nullptr on error.
/* static */
static Image PrepareLSTMInputs(const ImageData &image_data,
const Network *network, int min_width,
TRand *randomizer, float *image_scale);
// Converts the given pix to a NetworkIO of height and depth appropriate to
// the given StaticShape:
// If depth == 3, convert to 24 bit color, otherwise normalized grey.
// Scale to target height, if the shape's height is > 1, or its depth if the
// height == 1. If height == 0 then no scaling.
// NOTE: It isn't safe for multiple threads to call this on the same pix.
static void PreparePixInput(const StaticShape &shape, const Image pix,
TRand *randomizer, NetworkIO *input);
private:
void DebugWeights() override {
tprintf("Must override Network::DebugWeights for type %d\n", type_);
}
// Input shape determines how images are dealt with.
StaticShape shape_;
// Cached total network x scale factor for scaling bounding boxes.
int cached_x_scale_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_INPUT_H_
|
2301_81045437/tesseract
|
src/lstm/input.h
|
C++
|
apache-2.0
| 4,550
|
///////////////////////////////////////////////////////////////////////
// File: lstm.cpp
// Description: Long-term-short-term-memory Recurrent neural network.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "lstm.h"
#ifdef _OPENMP
# include <omp.h>
#endif
#include <cstdio>
#include <cstdlib>
#include <sstream> // for std::ostringstream
#if defined(_MSC_VER) && !defined(__clang__)
# include <intrin.h> // _BitScanReverse
#endif
#include "fullyconnected.h"
#include "functions.h"
#include "networkscratch.h"
#include "tprintf.h"
// Macros for openmp code if it is available, otherwise empty macros.
#ifdef _OPENMP
# define PARALLEL_IF_OPENMP(__num_threads) \
PRAGMA(omp parallel if (__num_threads > 1) num_threads(__num_threads)) { \
PRAGMA(omp sections nowait) { \
PRAGMA(omp section) {
# define SECTION_IF_OPENMP \
} \
PRAGMA(omp section) {
# define END_PARALLEL_IF_OPENMP \
} \
} /* end of sections */ \
} /* end of parallel section */
// Define the portable PRAGMA macro.
# ifdef _MSC_VER // Different _Pragma
# define PRAGMA(x) __pragma(x)
# else
# define PRAGMA(x) _Pragma(# x)
# endif // _MSC_VER
#else // _OPENMP
# define PARALLEL_IF_OPENMP(__num_threads)
# define SECTION_IF_OPENMP
# define END_PARALLEL_IF_OPENMP
#endif // _OPENMP
namespace tesseract {
// Max absolute value of state_. It is reasonably high to enable the state
// to count things.
const TFloat kStateClip = 100.0;
// Max absolute value of gate_errors (the gradients).
const TFloat kErrClip = 1.0f;
// Calculate ceil(log2(n)).
static inline uint32_t ceil_log2(uint32_t n) {
// l2 = (unsigned)log2(n).
#if defined(__GNUC__)
// Use fast inline assembler code for gcc or clang.
uint32_t l2 = 31 - __builtin_clz(n);
#elif defined(_MSC_VER)
// Use fast intrinsic function for MS compiler.
unsigned long l2 = 0;
_BitScanReverse(&l2, n);
#else
if (n == 0)
return UINT_MAX;
if (n == 1)
return 0;
uint32_t val = n;
uint32_t l2 = 0;
while (val > 1) {
val >>= 1;
l2++;
}
#endif
// Round up if n is not a power of 2.
return (n == (1u << l2)) ? l2 : l2 + 1;
}
LSTM::LSTM(const std::string &name, int ni, int ns, int no, bool two_dimensional, NetworkType type)
: Network(type, name, ni, no)
, na_(ni + ns)
, ns_(ns)
, nf_(0)
, is_2d_(two_dimensional)
, softmax_(nullptr)
, input_width_(0) {
if (two_dimensional) {
na_ += ns_;
}
if (type_ == NT_LSTM || type_ == NT_LSTM_SUMMARY) {
nf_ = 0;
// networkbuilder ensures this is always true.
ASSERT_HOST(no == ns);
} else if (type_ == NT_LSTM_SOFTMAX || type_ == NT_LSTM_SOFTMAX_ENCODED) {
nf_ = type_ == NT_LSTM_SOFTMAX ? no_ : ceil_log2(no_);
softmax_ = new FullyConnected("LSTM Softmax", ns_, no_, NT_SOFTMAX);
} else {
tprintf("%d is invalid type of LSTM!\n", type);
ASSERT_HOST(false);
}
na_ += nf_;
}
LSTM::~LSTM() {
delete softmax_;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape LSTM::OutputShape(const StaticShape &input_shape) const {
StaticShape result = input_shape;
result.set_depth(no_);
if (type_ == NT_LSTM_SUMMARY) {
result.set_width(1);
}
if (softmax_ != nullptr) {
return softmax_->OutputShape(result);
}
return result;
}
// Suspends/Enables training by setting the training_ flag. Serialize and
// DeSerialize only operate on the run-time data if state is false.
void LSTM::SetEnableTraining(TrainingState state) {
if (state == TS_RE_ENABLE) {
// Enable only from temp disabled.
if (training_ == TS_TEMP_DISABLE) {
training_ = TS_ENABLED;
}
} else if (state == TS_TEMP_DISABLE) {
// Temp disable only from enabled.
if (training_ == TS_ENABLED) {
training_ = state;
}
} else {
if (state == TS_ENABLED && training_ != TS_ENABLED) {
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
gate_weights_[w].InitBackward();
}
}
training_ = state;
}
if (softmax_ != nullptr) {
softmax_->SetEnableTraining(state);
}
}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
int LSTM::InitWeights(float range, TRand *randomizer) {
Network::SetRandomizer(randomizer);
num_weights_ = 0;
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
num_weights_ +=
gate_weights_[w].InitWeightsFloat(ns_, na_ + 1, TestFlag(NF_ADAM), range, randomizer);
}
if (softmax_ != nullptr) {
num_weights_ += softmax_->InitWeights(range, randomizer);
}
return num_weights_;
}
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int LSTM::RemapOutputs(int old_no, const std::vector<int> &code_map) {
if (softmax_ != nullptr) {
num_weights_ -= softmax_->num_weights();
num_weights_ += softmax_->RemapOutputs(old_no, code_map);
}
return num_weights_;
}
// Converts a float network to an int network.
void LSTM::ConvertToInt() {
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
gate_weights_[w].ConvertToInt();
}
if (softmax_ != nullptr) {
softmax_->ConvertToInt();
}
}
// Sets up the network for training using the given weight_range.
void LSTM::DebugWeights() {
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
std::ostringstream msg;
msg << name_ << " Gate weights " << w;
gate_weights_[w].Debug2D(msg.str().c_str());
}
if (softmax_ != nullptr) {
softmax_->DebugWeights();
}
}
// Writes to the given file. Returns false in case of error.
bool LSTM::Serialize(TFile *fp) const {
if (!Network::Serialize(fp)) {
return false;
}
if (!fp->Serialize(&na_)) {
return false;
}
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
if (!gate_weights_[w].Serialize(IsTraining(), fp)) {
return false;
}
}
if (softmax_ != nullptr && !softmax_->Serialize(fp)) {
return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
bool LSTM::DeSerialize(TFile *fp) {
if (!fp->DeSerialize(&na_)) {
return false;
}
if (type_ == NT_LSTM_SOFTMAX) {
nf_ = no_;
} else if (type_ == NT_LSTM_SOFTMAX_ENCODED) {
nf_ = ceil_log2(no_);
} else {
nf_ = 0;
}
is_2d_ = false;
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
if (!gate_weights_[w].DeSerialize(IsTraining(), fp)) {
return false;
}
if (w == CI) {
ns_ = gate_weights_[CI].NumOutputs();
is_2d_ = na_ - nf_ == ni_ + 2 * ns_;
}
}
delete softmax_;
if (type_ == NT_LSTM_SOFTMAX || type_ == NT_LSTM_SOFTMAX_ENCODED) {
softmax_ = static_cast<FullyConnected *>(Network::CreateFromFile(fp));
if (softmax_ == nullptr) {
return false;
}
} else {
softmax_ = nullptr;
}
return true;
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void LSTM::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
input_map_ = input.stride_map();
input_width_ = input.Width();
if (softmax_ != nullptr) {
output->ResizeFloat(input, no_);
} else if (type_ == NT_LSTM_SUMMARY) {
output->ResizeXTo1(input, no_);
} else {
output->Resize(input, no_);
}
ResizeForward(input);
// Temporary storage of forward computation for each gate.
NetworkScratch::FloatVec temp_lines[WT_COUNT];
int ro = ns_;
if (source_.int_mode() && IntSimdMatrix::intSimdMatrix) {
ro = IntSimdMatrix::intSimdMatrix->RoundOutputs(ro);
}
for (auto &temp_line : temp_lines) {
temp_line.Init(ns_, ro, scratch);
}
// Single timestep buffers for the current/recurrent output and state.
NetworkScratch::FloatVec curr_state, curr_output;
curr_state.Init(ns_, scratch);
ZeroVector<TFloat>(ns_, curr_state);
curr_output.Init(ns_, scratch);
ZeroVector<TFloat>(ns_, curr_output);
// Rotating buffers of width buf_width allow storage of the state and output
// for the other dimension, used only when working in true 2D mode. The width
// is enough to hold an entire strip of the major direction.
int buf_width = Is2D() ? input_map_.Size(FD_WIDTH) : 1;
std::vector<NetworkScratch::FloatVec> states, outputs;
if (Is2D()) {
states.resize(buf_width);
outputs.resize(buf_width);
for (int i = 0; i < buf_width; ++i) {
states[i].Init(ns_, scratch);
ZeroVector<TFloat>(ns_, states[i]);
outputs[i].Init(ns_, scratch);
ZeroVector<TFloat>(ns_, outputs[i]);
}
}
// Used only if a softmax LSTM.
NetworkScratch::FloatVec softmax_output;
NetworkScratch::IO int_output;
if (softmax_ != nullptr) {
softmax_output.Init(no_, scratch);
ZeroVector<TFloat>(no_, softmax_output);
int rounded_softmax_inputs = gate_weights_[CI].RoundInputs(ns_);
if (input.int_mode()) {
int_output.Resize2d(true, 1, rounded_softmax_inputs, scratch);
}
softmax_->SetupForward(input, nullptr);
}
NetworkScratch::FloatVec curr_input;
curr_input.Init(na_, scratch);
StrideMap::Index src_index(input_map_);
// Used only by NT_LSTM_SUMMARY.
StrideMap::Index dest_index(output->stride_map());
do {
int t = src_index.t();
// True if there is a valid old state for the 2nd dimension.
bool valid_2d = Is2D();
if (valid_2d) {
StrideMap::Index dim_index(src_index);
if (!dim_index.AddOffset(-1, FD_HEIGHT)) {
valid_2d = false;
}
}
// Index of the 2-D revolving buffers (outputs, states).
int mod_t = Modulo(t, buf_width); // Current timestep.
// Setup the padded input in source.
source_.CopyTimeStepGeneral(t, 0, ni_, input, t, 0);
if (softmax_ != nullptr) {
source_.WriteTimeStepPart(t, ni_, nf_, softmax_output);
}
source_.WriteTimeStepPart(t, ni_ + nf_, ns_, curr_output);
if (Is2D()) {
source_.WriteTimeStepPart(t, ni_ + nf_ + ns_, ns_, outputs[mod_t]);
}
if (!source_.int_mode()) {
source_.ReadTimeStep(t, curr_input);
}
// Matrix multiply the inputs with the source.
PARALLEL_IF_OPENMP(GFS)
// It looks inefficient to create the threads on each t iteration, but the
// alternative of putting the parallel outside the t loop, a single around
// the t-loop and then tasks in place of the sections is a *lot* slower.
// Cell inputs.
if (source_.int_mode()) {
gate_weights_[CI].MatrixDotVector(source_.i(t), temp_lines[CI]);
} else {
gate_weights_[CI].MatrixDotVector(curr_input, temp_lines[CI]);
}
FuncInplace<GFunc>(ns_, temp_lines[CI]);
SECTION_IF_OPENMP
// Input Gates.
if (source_.int_mode()) {
gate_weights_[GI].MatrixDotVector(source_.i(t), temp_lines[GI]);
} else {
gate_weights_[GI].MatrixDotVector(curr_input, temp_lines[GI]);
}
FuncInplace<FFunc>(ns_, temp_lines[GI]);
SECTION_IF_OPENMP
// 1-D forget gates.
if (source_.int_mode()) {
gate_weights_[GF1].MatrixDotVector(source_.i(t), temp_lines[GF1]);
} else {
gate_weights_[GF1].MatrixDotVector(curr_input, temp_lines[GF1]);
}
FuncInplace<FFunc>(ns_, temp_lines[GF1]);
// 2-D forget gates.
if (Is2D()) {
if (source_.int_mode()) {
gate_weights_[GFS].MatrixDotVector(source_.i(t), temp_lines[GFS]);
} else {
gate_weights_[GFS].MatrixDotVector(curr_input, temp_lines[GFS]);
}
FuncInplace<FFunc>(ns_, temp_lines[GFS]);
}
SECTION_IF_OPENMP
// Output gates.
if (source_.int_mode()) {
gate_weights_[GO].MatrixDotVector(source_.i(t), temp_lines[GO]);
} else {
gate_weights_[GO].MatrixDotVector(curr_input, temp_lines[GO]);
}
FuncInplace<FFunc>(ns_, temp_lines[GO]);
END_PARALLEL_IF_OPENMP
// Apply forget gate to state.
MultiplyVectorsInPlace(ns_, temp_lines[GF1], curr_state);
if (Is2D()) {
// Max-pool the forget gates (in 2-d) instead of blindly adding.
int8_t *which_fg_col = which_fg_[t];
memset(which_fg_col, 1, ns_ * sizeof(which_fg_col[0]));
if (valid_2d) {
const TFloat *stepped_state = states[mod_t];
for (int i = 0; i < ns_; ++i) {
if (temp_lines[GF1][i] < temp_lines[GFS][i]) {
curr_state[i] = temp_lines[GFS][i] * stepped_state[i];
which_fg_col[i] = 2;
}
}
}
}
MultiplyAccumulate(ns_, temp_lines[CI], temp_lines[GI], curr_state);
// Clip curr_state to a sane range.
ClipVector<TFloat>(ns_, -kStateClip, kStateClip, curr_state);
if (IsTraining()) {
// Save the gate node values.
node_values_[CI].WriteTimeStep(t, temp_lines[CI]);
node_values_[GI].WriteTimeStep(t, temp_lines[GI]);
node_values_[GF1].WriteTimeStep(t, temp_lines[GF1]);
node_values_[GO].WriteTimeStep(t, temp_lines[GO]);
if (Is2D()) {
node_values_[GFS].WriteTimeStep(t, temp_lines[GFS]);
}
}
FuncMultiply<HFunc>(curr_state, temp_lines[GO], ns_, curr_output);
if (IsTraining()) {
state_.WriteTimeStep(t, curr_state);
}
if (softmax_ != nullptr) {
if (input.int_mode()) {
int_output->WriteTimeStepPart(0, 0, ns_, curr_output);
softmax_->ForwardTimeStep(int_output->i(0), t, softmax_output);
} else {
softmax_->ForwardTimeStep(curr_output, t, softmax_output);
}
output->WriteTimeStep(t, softmax_output);
if (type_ == NT_LSTM_SOFTMAX_ENCODED) {
CodeInBinary(no_, nf_, softmax_output);
}
} else if (type_ == NT_LSTM_SUMMARY) {
// Output only at the end of a row.
if (src_index.IsLast(FD_WIDTH)) {
output->WriteTimeStep(dest_index.t(), curr_output);
dest_index.Increment();
}
} else {
output->WriteTimeStep(t, curr_output);
}
// Save states for use by the 2nd dimension only if needed.
if (Is2D()) {
CopyVector(ns_, curr_state, states[mod_t]);
CopyVector(ns_, curr_output, outputs[mod_t]);
}
// Always zero the states at the end of every row, but only for the major
// direction. The 2-D state remains intact.
if (src_index.IsLast(FD_WIDTH)) {
ZeroVector<TFloat>(ns_, curr_state);
ZeroVector<TFloat>(ns_, curr_output);
}
} while (src_index.Increment());
#if DEBUG_DETAIL > 0
tprintf("Source:%s\n", name_.c_str());
source_.Print(10);
tprintf("State:%s\n", name_.c_str());
state_.Print(10);
tprintf("Output:%s\n", name_.c_str());
output->Print(10);
#endif
#ifndef GRAPHICS_DISABLED
if (debug) {
DisplayForward(*output);
}
#endif
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool LSTM::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
#ifndef GRAPHICS_DISABLED
if (debug) {
DisplayBackward(fwd_deltas);
}
#endif
back_deltas->ResizeToMap(fwd_deltas.int_mode(), input_map_, ni_);
// ======Scratch space.======
// Output errors from deltas with recurrence from sourceerr.
NetworkScratch::FloatVec outputerr;
outputerr.Init(ns_, scratch);
// Recurrent error in the state/source.
NetworkScratch::FloatVec curr_stateerr, curr_sourceerr;
curr_stateerr.Init(ns_, scratch);
curr_sourceerr.Init(na_, scratch);
ZeroVector<TFloat>(ns_, curr_stateerr);
ZeroVector<TFloat>(na_, curr_sourceerr);
// Errors in the gates.
NetworkScratch::FloatVec gate_errors[WT_COUNT];
for (auto &gate_error : gate_errors) {
gate_error.Init(ns_, scratch);
}
// Rotating buffers of width buf_width allow storage of the recurrent time-
// steps used only for true 2-D. Stores one full strip of the major direction.
int buf_width = Is2D() ? input_map_.Size(FD_WIDTH) : 1;
std::vector<NetworkScratch::FloatVec> stateerr, sourceerr;
if (Is2D()) {
stateerr.resize(buf_width);
sourceerr.resize(buf_width);
for (int t = 0; t < buf_width; ++t) {
stateerr[t].Init(ns_, scratch);
sourceerr[t].Init(na_, scratch);
ZeroVector<TFloat>(ns_, stateerr[t]);
ZeroVector<TFloat>(na_, sourceerr[t]);
}
}
// Parallel-generated sourceerr from each of the gates.
NetworkScratch::FloatVec sourceerr_temps[WT_COUNT];
for (auto &sourceerr_temp : sourceerr_temps) {
sourceerr_temp.Init(na_, scratch);
}
int width = input_width_;
// Transposed gate errors stored over all timesteps for sum outer.
NetworkScratch::GradientStore gate_errors_t[WT_COUNT];
for (auto &w : gate_errors_t) {
w.Init(ns_, width, scratch);
}
// Used only if softmax_ != nullptr.
NetworkScratch::FloatVec softmax_errors;
NetworkScratch::GradientStore softmax_errors_t;
if (softmax_ != nullptr) {
softmax_errors.Init(no_, scratch);
softmax_errors_t.Init(no_, width, scratch);
}
TFloat state_clip = Is2D() ? 9.0 : 4.0;
#if DEBUG_DETAIL > 1
tprintf("fwd_deltas:%s\n", name_.c_str());
fwd_deltas.Print(10);
#endif
StrideMap::Index dest_index(input_map_);
dest_index.InitToLast();
// Used only by NT_LSTM_SUMMARY.
StrideMap::Index src_index(fwd_deltas.stride_map());
src_index.InitToLast();
do {
int t = dest_index.t();
bool at_last_x = dest_index.IsLast(FD_WIDTH);
// up_pos is the 2-D back step, down_pos is the 2-D fwd step, and are only
// valid if >= 0, which is true if 2d and not on the top/bottom.
int up_pos = -1;
int down_pos = -1;
if (Is2D()) {
if (dest_index.index(FD_HEIGHT) > 0) {
StrideMap::Index up_index(dest_index);
if (up_index.AddOffset(-1, FD_HEIGHT)) {
up_pos = up_index.t();
}
}
if (!dest_index.IsLast(FD_HEIGHT)) {
StrideMap::Index down_index(dest_index);
if (down_index.AddOffset(1, FD_HEIGHT)) {
down_pos = down_index.t();
}
}
}
// Index of the 2-D revolving buffers (sourceerr, stateerr).
int mod_t = Modulo(t, buf_width); // Current timestep.
// Zero the state in the major direction only at the end of every row.
if (at_last_x) {
ZeroVector<TFloat>(na_, curr_sourceerr);
ZeroVector<TFloat>(ns_, curr_stateerr);
}
// Setup the outputerr.
if (type_ == NT_LSTM_SUMMARY) {
if (dest_index.IsLast(FD_WIDTH)) {
fwd_deltas.ReadTimeStep(src_index.t(), outputerr);
src_index.Decrement();
} else {
ZeroVector<TFloat>(ns_, outputerr);
}
} else if (softmax_ == nullptr) {
fwd_deltas.ReadTimeStep(t, outputerr);
} else {
softmax_->BackwardTimeStep(fwd_deltas, t, softmax_errors, softmax_errors_t.get(), outputerr);
}
if (!at_last_x) {
AccumulateVector(ns_, curr_sourceerr + ni_ + nf_, outputerr);
}
if (down_pos >= 0) {
AccumulateVector(ns_, sourceerr[mod_t] + ni_ + nf_ + ns_, outputerr);
}
// Apply the 1-d forget gates.
if (!at_last_x) {
const float *next_node_gf1 = node_values_[GF1].f(t + 1);
for (int i = 0; i < ns_; ++i) {
curr_stateerr[i] *= next_node_gf1[i];
}
}
if (Is2D() && t + 1 < width) {
for (int i = 0; i < ns_; ++i) {
if (which_fg_[t + 1][i] != 1) {
curr_stateerr[i] = 0.0;
}
}
if (down_pos >= 0) {
const float *right_node_gfs = node_values_[GFS].f(down_pos);
const TFloat *right_stateerr = stateerr[mod_t];
for (int i = 0; i < ns_; ++i) {
if (which_fg_[down_pos][i] == 2) {
curr_stateerr[i] += right_stateerr[i] * right_node_gfs[i];
}
}
}
}
state_.FuncMultiply3Add<HPrime>(node_values_[GO], t, outputerr, curr_stateerr);
// Clip stateerr_ to a sane range.
ClipVector<TFloat>(ns_, -state_clip, state_clip, curr_stateerr);
#if DEBUG_DETAIL > 1
if (t + 10 > width) {
tprintf("t=%d, stateerr=", t);
for (int i = 0; i < ns_; ++i)
tprintf(" %g,%g,%g", curr_stateerr[i], outputerr[i], curr_sourceerr[ni_ + nf_ + i]);
tprintf("\n");
}
#endif
// Matrix multiply to get the source errors.
PARALLEL_IF_OPENMP(GFS)
// Cell inputs.
node_values_[CI].FuncMultiply3<GPrime>(t, node_values_[GI], t, curr_stateerr, gate_errors[CI]);
ClipVector(ns_, -kErrClip, kErrClip, gate_errors[CI].get());
gate_weights_[CI].VectorDotMatrix(gate_errors[CI], sourceerr_temps[CI]);
gate_errors_t[CI].get()->WriteStrided(t, gate_errors[CI]);
SECTION_IF_OPENMP
// Input Gates.
node_values_[GI].FuncMultiply3<FPrime>(t, node_values_[CI], t, curr_stateerr, gate_errors[GI]);
ClipVector(ns_, -kErrClip, kErrClip, gate_errors[GI].get());
gate_weights_[GI].VectorDotMatrix(gate_errors[GI], sourceerr_temps[GI]);
gate_errors_t[GI].get()->WriteStrided(t, gate_errors[GI]);
SECTION_IF_OPENMP
// 1-D forget Gates.
if (t > 0) {
node_values_[GF1].FuncMultiply3<FPrime>(t, state_, t - 1, curr_stateerr, gate_errors[GF1]);
ClipVector(ns_, -kErrClip, kErrClip, gate_errors[GF1].get());
gate_weights_[GF1].VectorDotMatrix(gate_errors[GF1], sourceerr_temps[GF1]);
} else {
memset(gate_errors[GF1], 0, ns_ * sizeof(gate_errors[GF1][0]));
memset(sourceerr_temps[GF1], 0, na_ * sizeof(*sourceerr_temps[GF1]));
}
gate_errors_t[GF1].get()->WriteStrided(t, gate_errors[GF1]);
// 2-D forget Gates.
if (up_pos >= 0) {
node_values_[GFS].FuncMultiply3<FPrime>(t, state_, up_pos, curr_stateerr, gate_errors[GFS]);
ClipVector(ns_, -kErrClip, kErrClip, gate_errors[GFS].get());
gate_weights_[GFS].VectorDotMatrix(gate_errors[GFS], sourceerr_temps[GFS]);
} else {
memset(gate_errors[GFS], 0, ns_ * sizeof(gate_errors[GFS][0]));
memset(sourceerr_temps[GFS], 0, na_ * sizeof(*sourceerr_temps[GFS]));
}
if (Is2D()) {
gate_errors_t[GFS].get()->WriteStrided(t, gate_errors[GFS]);
}
SECTION_IF_OPENMP
// Output gates.
state_.Func2Multiply3<HFunc, FPrime>(node_values_[GO], t, outputerr, gate_errors[GO]);
ClipVector(ns_, -kErrClip, kErrClip, gate_errors[GO].get());
gate_weights_[GO].VectorDotMatrix(gate_errors[GO], sourceerr_temps[GO]);
gate_errors_t[GO].get()->WriteStrided(t, gate_errors[GO]);
END_PARALLEL_IF_OPENMP
SumVectors(na_, sourceerr_temps[CI], sourceerr_temps[GI], sourceerr_temps[GF1],
sourceerr_temps[GO], sourceerr_temps[GFS], curr_sourceerr);
back_deltas->WriteTimeStep(t, curr_sourceerr);
// Save states for use by the 2nd dimension only if needed.
if (Is2D()) {
CopyVector(ns_, curr_stateerr, stateerr[mod_t]);
CopyVector(na_, curr_sourceerr, sourceerr[mod_t]);
}
} while (dest_index.Decrement());
#if DEBUG_DETAIL > 2
for (int w = 0; w < WT_COUNT; ++w) {
tprintf("%s gate errors[%d]\n", name_.c_str(), w);
gate_errors_t[w].get()->PrintUnTransposed(10);
}
#endif
// Transposed source_ used to speed-up SumOuter.
NetworkScratch::GradientStore source_t, state_t;
source_t.Init(na_, width, scratch);
source_.Transpose(source_t.get());
state_t.Init(ns_, width, scratch);
state_.Transpose(state_t.get());
#ifdef _OPENMP
# pragma omp parallel for num_threads(GFS) if (!Is2D())
#endif
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
gate_weights_[w].SumOuterTransposed(*gate_errors_t[w], *source_t, false);
}
if (softmax_ != nullptr) {
softmax_->FinishBackward(*softmax_errors_t);
}
return needs_to_backprop_;
}
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
void LSTM::Update(float learning_rate, float momentum, float adam_beta, int num_samples) {
#if DEBUG_DETAIL > 3
PrintW();
#endif
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
gate_weights_[w].Update(learning_rate, momentum, adam_beta, num_samples);
}
if (softmax_ != nullptr) {
softmax_->Update(learning_rate, momentum, adam_beta, num_samples);
}
#if DEBUG_DETAIL > 3
PrintDW();
#endif
}
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void LSTM::CountAlternators(const Network &other, TFloat *same, TFloat *changed) const {
ASSERT_HOST(other.type() == type_);
const LSTM *lstm = static_cast<const LSTM *>(&other);
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
gate_weights_[w].CountAlternators(lstm->gate_weights_[w], same, changed);
}
if (softmax_ != nullptr) {
softmax_->CountAlternators(*lstm->softmax_, same, changed);
}
}
#if DEBUG_DETAIL > 3
// Prints the weights for debug purposes.
void LSTM::PrintW() {
tprintf("Weight state:%s\n", name_.c_str());
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
tprintf("Gate %d, inputs\n", w);
for (int i = 0; i < ni_; ++i) {
tprintf("Row %d:", i);
for (int s = 0; s < ns_; ++s) {
tprintf(" %g", gate_weights_[w].GetWeights(s)[i]);
}
tprintf("\n");
}
tprintf("Gate %d, outputs\n", w);
for (int i = ni_; i < ni_ + ns_; ++i) {
tprintf("Row %d:", i - ni_);
for (int s = 0; s < ns_; ++s) {
tprintf(" %g", gate_weights_[w].GetWeights(s)[i]);
}
tprintf("\n");
}
tprintf("Gate %d, bias\n", w);
for (int s = 0; s < ns_; ++s) {
tprintf(" %g", gate_weights_[w].GetWeights(s)[na_]);
}
tprintf("\n");
}
}
// Prints the weight deltas for debug purposes.
void LSTM::PrintDW() {
tprintf("Delta state:%s\n", name_.c_str());
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
tprintf("Gate %d, inputs\n", w);
for (int i = 0; i < ni_; ++i) {
tprintf("Row %d:", i);
for (int s = 0; s < ns_; ++s) {
tprintf(" %g", gate_weights_[w].GetDW(s, i));
}
tprintf("\n");
}
tprintf("Gate %d, outputs\n", w);
for (int i = ni_; i < ni_ + ns_; ++i) {
tprintf("Row %d:", i - ni_);
for (int s = 0; s < ns_; ++s) {
tprintf(" %g", gate_weights_[w].GetDW(s, i));
}
tprintf("\n");
}
tprintf("Gate %d, bias\n", w);
for (int s = 0; s < ns_; ++s) {
tprintf(" %g", gate_weights_[w].GetDW(s, na_));
}
tprintf("\n");
}
}
#endif
// Resizes forward data to cope with an input image of the given width.
void LSTM::ResizeForward(const NetworkIO &input) {
int rounded_inputs = gate_weights_[CI].RoundInputs(na_);
source_.Resize(input, rounded_inputs);
which_fg_.ResizeNoInit(input.Width(), ns_);
if (IsTraining()) {
state_.ResizeFloat(input, ns_);
for (int w = 0; w < WT_COUNT; ++w) {
if (w == GFS && !Is2D()) {
continue;
}
node_values_[w].ResizeFloat(input, ns_);
}
}
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/lstm.cpp
|
C++
|
apache-2.0
| 28,145
|
///////////////////////////////////////////////////////////////////////
// File: lstm.h
// Description: Long-term-short-term-memory Recurrent neural network.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_LSTM_H_
#define TESSERACT_LSTM_LSTM_H_
#include "fullyconnected.h"
#include "network.h"
namespace tesseract {
// C++ Implementation of the LSTM class from lstm.py.
class LSTM : public Network {
public:
// Enum for the different weights in LSTM, to reduce some of the I/O and
// setup code to loops. The elements of the enum correspond to elements of an
// array of WeightMatrix or a corresponding array of NetworkIO.
enum WeightType {
CI, // Cell Inputs.
GI, // Gate at the input.
GF1, // Forget gate at the memory (1-d or looking back 1 timestep).
GO, // Gate at the output.
GFS, // Forget gate at the memory, looking back in the other dimension.
WT_COUNT // Number of WeightTypes.
};
// Constructor for NT_LSTM (regular 1 or 2-d LSTM), NT_LSTM_SOFTMAX (LSTM with
// additional softmax layer included and fed back into the input at the next
// timestep), or NT_LSTM_SOFTMAX_ENCODED (as LSTM_SOFTMAX, but the feedback
// is binary encoded instead of categorical) only.
// 2-d and bidi softmax LSTMs are not rejected, but are impossible to build
// in the conventional way because the output feedback both forwards and
// backwards in time does become impossible.
TESS_API
LSTM(const std::string &name, int num_inputs, int num_states, int num_outputs,
bool two_dimensional, NetworkType type);
~LSTM() override;
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override;
std::string spec() const override {
std::string spec;
if (type_ == NT_LSTM) {
spec += "Lfx" + std::to_string(ns_);
} else if (type_ == NT_LSTM_SUMMARY) {
spec += "Lfxs" + std::to_string(ns_);
} else if (type_ == NT_LSTM_SOFTMAX) {
spec += "LS" + std::to_string(ns_);
} else if (type_ == NT_LSTM_SOFTMAX_ENCODED) {
spec += "LE" + std::to_string(ns_);
}
if (softmax_ != nullptr) {
spec += softmax_->spec();
}
return spec;
}
// Suspends/Enables training by setting the training_ flag. Serialize and
// DeSerialize only operate on the run-time data if state is false.
void SetEnableTraining(TrainingState state) override;
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
int InitWeights(float range, TRand *randomizer) override;
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int RemapOutputs(int old_no, const std::vector<int> &code_map) override;
// Converts a float network to an int network.
void ConvertToInt() override;
// Provides debug output on the weights.
void DebugWeights() override;
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
void Update(float learning_rate, float momentum, float adam_beta, int num_samples) override;
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void CountAlternators(const Network &other, TFloat *same, TFloat *changed) const override;
// Prints the weights for debug purposes.
void PrintW();
// Prints the weight deltas for debug purposes.
void PrintDW();
// Returns true of this is a 2-d lstm.
bool Is2D() const {
return is_2d_;
}
private:
// Resizes forward data to cope with an input image of the given width.
void ResizeForward(const NetworkIO &input);
private:
// Size of padded input to weight matrices = ni_ + no_ for 1-D operation
// and ni_ + 2 * no_ for 2-D operation. Note that there is a phantom 1 input
// for the bias that makes the weight matrices of size [na + 1][no].
int32_t na_;
// Number of internal states. Equal to no_ except for a softmax LSTM.
// ns_ is NOT serialized, but is calculated from gate_weights_.
int32_t ns_;
// Number of additional feedback states. The softmax types feed back
// additional output information on top of the ns_ internal states.
// In the case of a binary-coded (EMBEDDED) softmax, nf_ < no_.
int32_t nf_;
// Flag indicating 2-D operation.
bool is_2d_;
// Gate weight arrays of size [na + 1, no].
WeightMatrix gate_weights_[WT_COUNT];
// Used only if this is a softmax LSTM.
FullyConnected *softmax_;
// Input padded with previous output of size [width, na].
NetworkIO source_;
// Internal state used during forward operation, of size [width, ns].
NetworkIO state_;
// State of the 2-d maxpool, generated during forward, used during backward.
GENERIC_2D_ARRAY<int8_t> which_fg_;
// Internal state saved from forward, but used only during backward.
NetworkIO node_values_[WT_COUNT];
// Preserved input stride_map used for Backward when NT_LSTM_SQUASHED.
StrideMap input_map_;
int input_width_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_LSTM_H_
|
2301_81045437/tesseract
|
src/lstm/lstm.h
|
C++
|
apache-2.0
| 6,758
|
///////////////////////////////////////////////////////////////////////
// File: lstmrecognizer.cpp
// Description: Top-level line recognizer class for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "lstmrecognizer.h"
#include <allheaders.h>
#include "dict.h"
#include "genericheap.h"
#include "helpers.h"
#include "imagedata.h"
#include "input.h"
#include "lstm.h"
#include "normalis.h"
#include "pageres.h"
#include "ratngs.h"
#include "recodebeam.h"
#include "scrollview.h"
#include "statistc.h"
#include "tprintf.h"
#include <unordered_set>
#include <vector>
namespace tesseract {
// Default ratio between dict and non-dict words.
const double kDictRatio = 2.25;
// Default certainty offset to give the dictionary a chance.
const double kCertOffset = -0.085;
LSTMRecognizer::LSTMRecognizer(const std::string &language_data_path_prefix)
: LSTMRecognizer::LSTMRecognizer() {
ccutil_.language_data_path_prefix = language_data_path_prefix;
}
LSTMRecognizer::LSTMRecognizer()
: network_(nullptr)
, training_flags_(0)
, training_iteration_(0)
, sample_iteration_(0)
, null_char_(UNICHAR_BROKEN)
, learning_rate_(0.0f)
, momentum_(0.0f)
, adam_beta_(0.0f)
, dict_(nullptr)
, search_(nullptr)
, debug_win_(nullptr) {}
LSTMRecognizer::~LSTMRecognizer() {
delete network_;
delete dict_;
delete search_;
}
// Loads a model from mgr, including the dictionary only if lang is not null.
bool LSTMRecognizer::Load(const ParamsVectors *params, const std::string &lang,
TessdataManager *mgr) {
TFile fp;
if (!mgr->GetComponent(TESSDATA_LSTM, &fp)) {
return false;
}
if (!DeSerialize(mgr, &fp)) {
return false;
}
if (lang.empty()) {
return true;
}
// Allow it to run without a dictionary.
LoadDictionary(params, lang, mgr);
return true;
}
// Writes to the given file. Returns false in case of error.
bool LSTMRecognizer::Serialize(const TessdataManager *mgr, TFile *fp) const {
bool include_charsets = mgr == nullptr || !mgr->IsComponentAvailable(TESSDATA_LSTM_RECODER) ||
!mgr->IsComponentAvailable(TESSDATA_LSTM_UNICHARSET);
if (!network_->Serialize(fp)) {
return false;
}
if (include_charsets && !GetUnicharset().save_to_file(fp)) {
return false;
}
if (!fp->Serialize(network_str_)) {
return false;
}
if (!fp->Serialize(&training_flags_)) {
return false;
}
if (!fp->Serialize(&training_iteration_)) {
return false;
}
if (!fp->Serialize(&sample_iteration_)) {
return false;
}
if (!fp->Serialize(&null_char_)) {
return false;
}
if (!fp->Serialize(&adam_beta_)) {
return false;
}
if (!fp->Serialize(&learning_rate_)) {
return false;
}
if (!fp->Serialize(&momentum_)) {
return false;
}
if (include_charsets && IsRecoding() && !recoder_.Serialize(fp)) {
return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
bool LSTMRecognizer::DeSerialize(const TessdataManager *mgr, TFile *fp) {
delete network_;
network_ = Network::CreateFromFile(fp);
if (network_ == nullptr) {
return false;
}
bool include_charsets = mgr == nullptr || !mgr->IsComponentAvailable(TESSDATA_LSTM_RECODER) ||
!mgr->IsComponentAvailable(TESSDATA_LSTM_UNICHARSET);
if (include_charsets && !ccutil_.unicharset.load_from_file(fp, false)) {
return false;
}
if (!fp->DeSerialize(network_str_)) {
return false;
}
if (!fp->DeSerialize(&training_flags_)) {
return false;
}
if (!fp->DeSerialize(&training_iteration_)) {
return false;
}
if (!fp->DeSerialize(&sample_iteration_)) {
return false;
}
if (!fp->DeSerialize(&null_char_)) {
return false;
}
if (!fp->DeSerialize(&adam_beta_)) {
return false;
}
if (!fp->DeSerialize(&learning_rate_)) {
return false;
}
if (!fp->DeSerialize(&momentum_)) {
return false;
}
if (include_charsets && !LoadRecoder(fp)) {
return false;
}
if (!include_charsets && !LoadCharsets(mgr)) {
return false;
}
network_->SetRandomizer(&randomizer_);
network_->CacheXScaleFactor(network_->XScaleFactor());
return true;
}
// Loads the charsets from mgr.
bool LSTMRecognizer::LoadCharsets(const TessdataManager *mgr) {
TFile fp;
if (!mgr->GetComponent(TESSDATA_LSTM_UNICHARSET, &fp)) {
return false;
}
if (!ccutil_.unicharset.load_from_file(&fp, false)) {
return false;
}
if (!mgr->GetComponent(TESSDATA_LSTM_RECODER, &fp)) {
return false;
}
if (!LoadRecoder(&fp)) {
return false;
}
return true;
}
// Loads the Recoder.
bool LSTMRecognizer::LoadRecoder(TFile *fp) {
if (IsRecoding()) {
if (!recoder_.DeSerialize(fp)) {
return false;
}
RecodedCharID code;
recoder_.EncodeUnichar(UNICHAR_SPACE, &code);
if (code(0) != UNICHAR_SPACE) {
tprintf("Space was garbled in recoding!!\n");
return false;
}
} else {
recoder_.SetupPassThrough(GetUnicharset());
training_flags_ |= TF_COMPRESS_UNICHARSET;
}
return true;
}
// Loads the dictionary if possible from the traineddata file.
// Prints a warning message, and returns false but otherwise fails silently
// and continues to work without it if loading fails.
// Note that dictionary load is independent from DeSerialize, but dependent
// on the unicharset matching. This enables training to deserialize a model
// from checkpoint or restore without having to go back and reload the
// dictionary.
// Some parameters have to be passed in (from langdata/config/api via Tesseract)
bool LSTMRecognizer::LoadDictionary(const ParamsVectors *params, const std::string &lang,
TessdataManager *mgr) {
delete dict_;
dict_ = new Dict(&ccutil_);
dict_->user_words_file.ResetFrom(params);
dict_->user_words_suffix.ResetFrom(params);
dict_->user_patterns_file.ResetFrom(params);
dict_->user_patterns_suffix.ResetFrom(params);
dict_->SetupForLoad(Dict::GlobalDawgCache());
dict_->LoadLSTM(lang, mgr);
if (dict_->FinishLoad()) {
return true; // Success.
}
if (log_level <= 0) {
tprintf("Failed to load any lstm-specific dictionaries for lang %s!!\n", lang.c_str());
}
delete dict_;
dict_ = nullptr;
return false;
}
// Recognizes the line image, contained within image_data, returning the
// ratings matrix and matching box_word for each WERD_RES in the output.
void LSTMRecognizer::RecognizeLine(const ImageData &image_data,
float invert_threshold, bool debug,
double worst_dict_cert, const TBOX &line_box,
PointerVector<WERD_RES> *words, int lstm_choice_mode,
int lstm_choice_amount) {
NetworkIO outputs;
float scale_factor;
NetworkIO inputs;
if (!RecognizeLine(image_data, invert_threshold, debug, false, false, &scale_factor, &inputs, &outputs)) {
return;
}
if (search_ == nullptr) {
search_ = new RecodeBeamSearch(recoder_, null_char_, SimpleTextOutput(), dict_);
}
search_->excludedUnichars.clear();
search_->Decode(outputs, kDictRatio, kCertOffset, worst_dict_cert, &GetUnicharset(),
lstm_choice_mode);
search_->ExtractBestPathAsWords(line_box, scale_factor, debug, &GetUnicharset(), words,
lstm_choice_mode);
if (lstm_choice_mode) {
search_->extractSymbolChoices(&GetUnicharset());
for (int i = 0; i < lstm_choice_amount; ++i) {
search_->DecodeSecondaryBeams(outputs, kDictRatio, kCertOffset, worst_dict_cert,
&GetUnicharset(), lstm_choice_mode);
search_->extractSymbolChoices(&GetUnicharset());
}
search_->segmentTimestepsByCharacters();
unsigned char_it = 0;
for (size_t i = 0; i < words->size(); ++i) {
for (int j = 0; j < words->at(i)->end; ++j) {
if (char_it < search_->ctc_choices.size()) {
words->at(i)->CTC_symbol_choices.push_back(search_->ctc_choices[char_it]);
}
if (char_it < search_->segmentedTimesteps.size()) {
words->at(i)->segmented_timesteps.push_back(search_->segmentedTimesteps[char_it]);
}
++char_it;
}
words->at(i)->timesteps =
search_->combineSegmentedTimesteps(&words->at(i)->segmented_timesteps);
}
search_->segmentedTimesteps.clear();
search_->ctc_choices.clear();
search_->excludedUnichars.clear();
}
}
// Helper computes min and mean best results in the output.
void LSTMRecognizer::OutputStats(const NetworkIO &outputs, float *min_output, float *mean_output,
float *sd) {
const int kOutputScale = INT8_MAX;
STATS stats(0, kOutputScale);
for (int t = 0; t < outputs.Width(); ++t) {
int best_label = outputs.BestLabel(t, nullptr);
if (best_label != null_char_) {
float best_output = outputs.f(t)[best_label];
stats.add(static_cast<int>(kOutputScale * best_output), 1);
}
}
// If the output is all nulls it could be that the photometric interpretation
// is wrong, so make it look bad, so the other way can win, even if not great.
if (stats.get_total() == 0) {
*min_output = 0.0f;
*mean_output = 0.0f;
*sd = 1.0f;
} else {
*min_output = static_cast<float>(stats.min_bucket()) / kOutputScale;
*mean_output = stats.mean() / kOutputScale;
*sd = stats.sd() / kOutputScale;
}
}
// Recognizes the image_data, returning the labels,
// scores, and corresponding pairs of start, end x-coords in coords.
bool LSTMRecognizer::RecognizeLine(const ImageData &image_data,
float invert_threshold, bool debug,
bool re_invert, bool upside_down, float *scale_factor,
NetworkIO *inputs, NetworkIO *outputs) {
// This ensures consistent recognition results.
SetRandomSeed();
int min_width = network_->XScaleFactor();
Image pix = Input::PrepareLSTMInputs(image_data, network_, min_width, &randomizer_, scale_factor);
if (pix == nullptr) {
tprintf("Line cannot be recognized!!\n");
return false;
}
// Maximum width of image to train on.
const int kMaxImageWidth = 128 * pixGetHeight(pix);
if (network_->IsTraining() && pixGetWidth(pix) > kMaxImageWidth) {
tprintf("Image too large to learn!! Size = %dx%d\n", pixGetWidth(pix), pixGetHeight(pix));
pix.destroy();
return false;
}
if (upside_down) {
pixRotate180(pix, pix);
}
// Reduction factor from image to coords.
*scale_factor = min_width / *scale_factor;
inputs->set_int_mode(IsIntMode());
SetRandomSeed();
Input::PreparePixInput(network_->InputShape(), pix, &randomizer_, inputs);
network_->Forward(debug, *inputs, nullptr, &scratch_space_, outputs);
// Check for auto inversion.
if (invert_threshold > 0.0f) {
float pos_min, pos_mean, pos_sd;
OutputStats(*outputs, &pos_min, &pos_mean, &pos_sd);
if (pos_mean < invert_threshold) {
// Run again inverted and see if it is any better.
NetworkIO inv_inputs, inv_outputs;
inv_inputs.set_int_mode(IsIntMode());
SetRandomSeed();
pixInvert(pix, pix);
Input::PreparePixInput(network_->InputShape(), pix, &randomizer_, &inv_inputs);
network_->Forward(debug, inv_inputs, nullptr, &scratch_space_, &inv_outputs);
float inv_min, inv_mean, inv_sd;
OutputStats(inv_outputs, &inv_min, &inv_mean, &inv_sd);
if (inv_mean > pos_mean) {
// Inverted did better. Use inverted data.
if (debug) {
tprintf("Inverting image: old min=%g, mean=%g, sd=%g, inv %g,%g,%g\n", pos_min, pos_mean,
pos_sd, inv_min, inv_mean, inv_sd);
}
*outputs = std::move(inv_outputs);
*inputs = std::move(inv_inputs);
} else if (re_invert) {
// Inverting was not an improvement, so undo and run again, so the
// outputs match the best forward result.
SetRandomSeed();
network_->Forward(debug, *inputs, nullptr, &scratch_space_, outputs);
}
}
}
pix.destroy();
if (debug) {
std::vector<int> labels, coords;
LabelsFromOutputs(*outputs, &labels, &coords);
#ifndef GRAPHICS_DISABLED
DisplayForward(*inputs, labels, coords, "LSTMForward", &debug_win_);
#endif
DebugActivationPath(*outputs, labels, coords);
}
return true;
}
// Converts an array of labels to utf-8, whether or not the labels are
// augmented with character boundaries.
std::string LSTMRecognizer::DecodeLabels(const std::vector<int> &labels) {
std::string result;
unsigned end = 1;
for (unsigned start = 0; start < labels.size(); start = end) {
if (labels[start] == null_char_) {
end = start + 1;
} else {
result += DecodeLabel(labels, start, &end, nullptr);
}
}
return result;
}
#ifndef GRAPHICS_DISABLED
// Displays the forward results in a window with the characters and
// boundaries as determined by the labels and label_coords.
void LSTMRecognizer::DisplayForward(const NetworkIO &inputs, const std::vector<int> &labels,
const std::vector<int> &label_coords, const char *window_name,
ScrollView **window) {
Image input_pix = inputs.ToPix();
Network::ClearWindow(false, window_name, pixGetWidth(input_pix), pixGetHeight(input_pix), window);
int line_height = Network::DisplayImage(input_pix, *window);
DisplayLSTMOutput(labels, label_coords, line_height, *window);
}
// Displays the labels and cuts at the corresponding xcoords.
// Size of labels should match xcoords.
void LSTMRecognizer::DisplayLSTMOutput(const std::vector<int> &labels,
const std::vector<int> &xcoords, int height,
ScrollView *window) {
int x_scale = network_->XScaleFactor();
window->TextAttributes("Arial", height / 4, false, false, false);
unsigned end = 1;
for (unsigned start = 0; start < labels.size(); start = end) {
int xpos = xcoords[start] * x_scale;
if (labels[start] == null_char_) {
end = start + 1;
window->Pen(ScrollView::RED);
} else {
window->Pen(ScrollView::GREEN);
const char *str = DecodeLabel(labels, start, &end, nullptr);
if (*str == '\\') {
str = "\\\\";
}
xpos = xcoords[(start + end) / 2] * x_scale;
window->Text(xpos, height, str);
}
window->Line(xpos, 0, xpos, height * 3 / 2);
}
window->Update();
}
#endif // !GRAPHICS_DISABLED
// Prints debug output detailing the activation path that is implied by the
// label_coords.
void LSTMRecognizer::DebugActivationPath(const NetworkIO &outputs, const std::vector<int> &labels,
const std::vector<int> &xcoords) {
if (xcoords[0] > 0) {
DebugActivationRange(outputs, "<null>", null_char_, 0, xcoords[0]);
}
unsigned end = 1;
for (unsigned start = 0; start < labels.size(); start = end) {
if (labels[start] == null_char_) {
end = start + 1;
DebugActivationRange(outputs, "<null>", null_char_, xcoords[start], xcoords[end]);
continue;
} else {
int decoded;
const char *label = DecodeLabel(labels, start, &end, &decoded);
DebugActivationRange(outputs, label, labels[start], xcoords[start], xcoords[start + 1]);
for (unsigned i = start + 1; i < end; ++i) {
DebugActivationRange(outputs, DecodeSingleLabel(labels[i]), labels[i], xcoords[i],
xcoords[i + 1]);
}
}
}
}
// Prints debug output detailing activations and 2nd choice over a range
// of positions.
void LSTMRecognizer::DebugActivationRange(const NetworkIO &outputs, const char *label,
int best_choice, int x_start, int x_end) {
tprintf("%s=%d On [%d, %d), scores=", label, best_choice, x_start, x_end);
double max_score = 0.0;
double mean_score = 0.0;
const int width = x_end - x_start;
for (int x = x_start; x < x_end; ++x) {
const float *line = outputs.f(x);
const double score = line[best_choice] * 100.0;
if (score > max_score) {
max_score = score;
}
mean_score += score / width;
int best_c = 0;
double best_score = 0.0;
for (int c = 0; c < outputs.NumFeatures(); ++c) {
if (c != best_choice && line[c] > best_score) {
best_c = c;
best_score = line[c];
}
}
tprintf(" %.3g(%s=%d=%.3g)", score, DecodeSingleLabel(best_c), best_c, best_score * 100.0);
}
tprintf(", Mean=%g, max=%g\n", mean_score, max_score);
}
// Helper returns true if the null_char is the winner at t, and it beats the
// null_threshold, or the next choice is space, in which case we will use the
// null anyway.
#if 0 // TODO: unused, remove if still unused after 2020.
static bool NullIsBest(const NetworkIO& output, float null_thr,
int null_char, int t) {
if (output.f(t)[null_char] >= null_thr) return true;
if (output.BestLabel(t, null_char, null_char, nullptr) != UNICHAR_SPACE)
return false;
return output.f(t)[null_char] > output.f(t)[UNICHAR_SPACE];
}
#endif
// Converts the network output to a sequence of labels. Outputs labels, scores
// and start xcoords of each char, and each null_char_, with an additional
// final xcoord for the end of the output.
// The conversion method is determined by internal state.
void LSTMRecognizer::LabelsFromOutputs(const NetworkIO &outputs, std::vector<int> *labels,
std::vector<int> *xcoords) {
if (SimpleTextOutput()) {
LabelsViaSimpleText(outputs, labels, xcoords);
} else {
LabelsViaReEncode(outputs, labels, xcoords);
}
}
// As LabelsViaCTC except that this function constructs the best path that
// contains only legal sequences of subcodes for CJK.
void LSTMRecognizer::LabelsViaReEncode(const NetworkIO &output, std::vector<int> *labels,
std::vector<int> *xcoords) {
if (search_ == nullptr) {
search_ = new RecodeBeamSearch(recoder_, null_char_, SimpleTextOutput(), dict_);
}
search_->Decode(output, 1.0, 0.0, RecodeBeamSearch::kMinCertainty, nullptr);
search_->ExtractBestPathAsLabels(labels, xcoords);
}
// Converts the network output to a sequence of labels, with scores, using
// the simple character model (each position is a char, and the null_char_ is
// mainly intended for tail padding.)
void LSTMRecognizer::LabelsViaSimpleText(const NetworkIO &output, std::vector<int> *labels,
std::vector<int> *xcoords) {
labels->clear();
xcoords->clear();
const int width = output.Width();
for (int t = 0; t < width; ++t) {
float score = 0.0f;
const int label = output.BestLabel(t, &score);
if (label != null_char_) {
labels->push_back(label);
xcoords->push_back(t);
}
}
xcoords->push_back(width);
}
// Returns a string corresponding to the label starting at start. Sets *end
// to the next start and if non-null, *decoded to the unichar id.
const char *LSTMRecognizer::DecodeLabel(const std::vector<int> &labels, unsigned start, unsigned *end,
int *decoded) {
*end = start + 1;
if (IsRecoding()) {
// Decode labels via recoder_.
RecodedCharID code;
if (labels[start] == null_char_) {
if (decoded != nullptr) {
code.Set(0, null_char_);
*decoded = recoder_.DecodeUnichar(code);
}
return "<null>";
}
unsigned index = start;
while (index < labels.size() && code.length() < RecodedCharID::kMaxCodeLen) {
code.Set(code.length(), labels[index++]);
while (index < labels.size() && labels[index] == null_char_) {
++index;
}
int uni_id = recoder_.DecodeUnichar(code);
// If the next label isn't a valid first code, then we need to continue
// extending even if we have a valid uni_id from this prefix.
if (uni_id != INVALID_UNICHAR_ID &&
(index == labels.size() || code.length() == RecodedCharID::kMaxCodeLen ||
recoder_.IsValidFirstCode(labels[index]))) {
*end = index;
if (decoded != nullptr) {
*decoded = uni_id;
}
if (uni_id == UNICHAR_SPACE) {
return " ";
}
return GetUnicharset().get_normed_unichar(uni_id);
}
}
return "<Undecodable>";
} else {
if (decoded != nullptr) {
*decoded = labels[start];
}
if (labels[start] == null_char_) {
return "<null>";
}
if (labels[start] == UNICHAR_SPACE) {
return " ";
}
return GetUnicharset().get_normed_unichar(labels[start]);
}
}
// Returns a string corresponding to a given single label id, falling back to
// a default of ".." for part of a multi-label unichar-id.
const char *LSTMRecognizer::DecodeSingleLabel(int label) {
if (label == null_char_) {
return "<null>";
}
if (IsRecoding()) {
// Decode label via recoder_.
RecodedCharID code;
code.Set(0, label);
label = recoder_.DecodeUnichar(code);
if (label == INVALID_UNICHAR_ID) {
return ".."; // Part of a bigger code.
}
}
if (label == UNICHAR_SPACE) {
return " ";
}
return GetUnicharset().get_normed_unichar(label);
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/lstmrecognizer.cpp
|
C++
|
apache-2.0
| 22,194
|
///////////////////////////////////////////////////////////////////////
// File: lstmrecognizer.h
// Description: Top-level line recognizer class for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_LSTMRECOGNIZER_H_
#define TESSERACT_LSTM_LSTMRECOGNIZER_H_
#include "ccutil.h"
#include "helpers.h"
#include "matrix.h"
#include "network.h"
#include "networkscratch.h"
#include "params.h"
#include "recodebeam.h"
#include "series.h"
#include "unicharcompress.h"
class BLOB_CHOICE_IT;
struct Pix;
class ROW_RES;
class ScrollView;
class TBOX;
class WERD_RES;
namespace tesseract {
class Dict;
class ImageData;
// Enum indicating training mode control flags.
enum TrainingFlags {
TF_INT_MODE = 1,
TF_COMPRESS_UNICHARSET = 64,
};
// Top-level line recognizer class for LSTM-based networks.
// Note that a sub-class, LSTMTrainer is used for training.
class TESS_API LSTMRecognizer {
public:
LSTMRecognizer();
LSTMRecognizer(const std::string &language_data_path_prefix);
~LSTMRecognizer();
int NumOutputs() const {
return network_->NumOutputs();
}
// Return the training iterations.
int training_iteration() const {
return training_iteration_;
}
// Return the sample iterations.
int sample_iteration() const {
return sample_iteration_;
}
// Return the learning rate.
float learning_rate() const {
return learning_rate_;
}
LossType OutputLossType() const {
if (network_ == nullptr) {
return LT_NONE;
}
StaticShape shape;
shape = network_->OutputShape(shape);
return shape.loss_type();
}
bool SimpleTextOutput() const {
return OutputLossType() == LT_SOFTMAX;
}
bool IsIntMode() const {
return (training_flags_ & TF_INT_MODE) != 0;
}
// True if recoder_ is active to re-encode text to a smaller space.
bool IsRecoding() const {
return (training_flags_ & TF_COMPRESS_UNICHARSET) != 0;
}
// Returns true if the network is a TensorFlow network.
bool IsTensorFlow() const {
return network_->type() == NT_TENSORFLOW;
}
// Returns a vector of layer ids that can be passed to other layer functions
// to access a specific layer.
std::vector<std::string> EnumerateLayers() const {
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
auto *series = static_cast<Series *>(network_);
std::vector<std::string> layers;
series->EnumerateLayers(nullptr, layers);
return layers;
}
// Returns a specific layer from its id (from EnumerateLayers).
Network *GetLayer(const std::string &id) const {
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
ASSERT_HOST(id.length() > 1 && id[0] == ':');
auto *series = static_cast<Series *>(network_);
return series->GetLayer(&id[1]);
}
// Returns the learning rate of the layer from its id.
float GetLayerLearningRate(const std::string &id) const {
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) {
ASSERT_HOST(id.length() > 1 && id[0] == ':');
auto *series = static_cast<Series *>(network_);
return series->LayerLearningRate(&id[1]);
} else {
return learning_rate_;
}
}
// Return the network string.
const char *GetNetwork() const {
return network_str_.c_str();
}
// Return the adam beta.
float GetAdamBeta() const {
return adam_beta_;
}
// Return the momentum.
float GetMomentum() const {
return momentum_;
}
// Multiplies the all the learning rate(s) by the given factor.
void ScaleLearningRate(double factor) {
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
learning_rate_ *= factor;
if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) {
std::vector<std::string> layers = EnumerateLayers();
for (auto &layer : layers) {
ScaleLayerLearningRate(layer, factor);
}
}
}
// Multiplies the learning rate of the layer with id, by the given factor.
void ScaleLayerLearningRate(const std::string &id, double factor) {
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
ASSERT_HOST(id.length() > 1 && id[0] == ':');
auto *series = static_cast<Series *>(network_);
series->ScaleLayerLearningRate(&id[1], factor);
}
// Set the all the learning rate(s) to the given value.
void SetLearningRate(float learning_rate)
{
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
learning_rate_ = learning_rate;
if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) {
for (auto &id : EnumerateLayers()) {
SetLayerLearningRate(id, learning_rate);
}
}
}
// Set the learning rate of the layer with id, by the given value.
void SetLayerLearningRate(const std::string &id, float learning_rate)
{
ASSERT_HOST(network_ != nullptr && network_->type() == NT_SERIES);
ASSERT_HOST(id.length() > 1 && id[0] == ':');
auto *series = static_cast<Series *>(network_);
series->SetLayerLearningRate(&id[1], learning_rate);
}
// Converts the network to int if not already.
void ConvertToInt() {
if ((training_flags_ & TF_INT_MODE) == 0) {
network_->ConvertToInt();
training_flags_ |= TF_INT_MODE;
}
}
// Provides access to the UNICHARSET that this classifier works with.
const UNICHARSET &GetUnicharset() const {
return ccutil_.unicharset;
}
UNICHARSET &GetUnicharset() {
return ccutil_.unicharset;
}
// Provides access to the UnicharCompress that this classifier works with.
const UnicharCompress &GetRecoder() const {
return recoder_;
}
// Provides access to the Dict that this classifier works with.
const Dict *GetDict() const {
return dict_;
}
Dict *GetDict() {
return dict_;
}
// Sets the sample iteration to the given value. The sample_iteration_
// determines the seed for the random number generator. The training
// iteration is incremented only by a successful training iteration.
void SetIteration(int iteration) {
sample_iteration_ = iteration;
}
// Accessors for textline image normalization.
int NumInputs() const {
return network_->NumInputs();
}
// Return the null char index.
int null_char() const {
return null_char_;
}
// Loads a model from mgr, including the dictionary only if lang is not null.
bool Load(const ParamsVectors *params, const std::string &lang, TessdataManager *mgr);
// Writes to the given file. Returns false in case of error.
// If mgr contains a unicharset and recoder, then they are not encoded to fp.
bool Serialize(const TessdataManager *mgr, TFile *fp) const;
// Reads from the given file. Returns false in case of error.
// If mgr contains a unicharset and recoder, then they are taken from there,
// otherwise, they are part of the serialization in fp.
bool DeSerialize(const TessdataManager *mgr, TFile *fp);
// Loads the charsets from mgr.
bool LoadCharsets(const TessdataManager *mgr);
// Loads the Recoder.
bool LoadRecoder(TFile *fp);
// Loads the dictionary if possible from the traineddata file.
// Prints a warning message, and returns false but otherwise fails silently
// and continues to work without it if loading fails.
// Note that dictionary load is independent from DeSerialize, but dependent
// on the unicharset matching. This enables training to deserialize a model
// from checkpoint or restore without having to go back and reload the
// dictionary.
bool LoadDictionary(const ParamsVectors *params, const std::string &lang, TessdataManager *mgr);
// Recognizes the line image, contained within image_data, returning the
// recognized tesseract WERD_RES for the words.
// If invert_threshold > 0, tries inverted as well if the normal
// interpretation doesn't produce a result which at least reaches
// that threshold. The line_box is used for computing the
// box_word in the output words. worst_dict_cert is the worst certainty that
// will be used in a dictionary word.
void RecognizeLine(const ImageData &image_data, float invert_threshold, bool debug, double worst_dict_cert,
const TBOX &line_box, PointerVector<WERD_RES> *words, int lstm_choice_mode = 0,
int lstm_choice_amount = 5);
// Helper computes min and mean best results in the output.
void OutputStats(const NetworkIO &outputs, float *min_output, float *mean_output, float *sd);
// Recognizes the image_data, returning the labels,
// scores, and corresponding pairs of start, end x-coords in coords.
// Returned in scale_factor is the reduction factor
// between the image and the output coords, for computing bounding boxes.
// If re_invert is true, the input is inverted back to its original
// photometric interpretation if inversion is attempted but fails to
// improve the results. This ensures that outputs contains the correct
// forward outputs for the best photometric interpretation.
// inputs is filled with the used inputs to the network.
bool RecognizeLine(const ImageData &image_data, float invert_threshold, bool debug, bool re_invert,
bool upside_down, float *scale_factor, NetworkIO *inputs, NetworkIO *outputs);
// Converts an array of labels to utf-8, whether or not the labels are
// augmented with character boundaries.
std::string DecodeLabels(const std::vector<int> &labels);
// Displays the forward results in a window with the characters and
// boundaries as determined by the labels and label_coords.
void DisplayForward(const NetworkIO &inputs, const std::vector<int> &labels,
const std::vector<int> &label_coords, const char *window_name,
ScrollView **window);
// Converts the network output to a sequence of labels. Outputs labels, scores
// and start xcoords of each char, and each null_char_, with an additional
// final xcoord for the end of the output.
// The conversion method is determined by internal state.
void LabelsFromOutputs(const NetworkIO &outputs, std::vector<int> *labels,
std::vector<int> *xcoords);
protected:
// Sets the random seed from the sample_iteration_;
void SetRandomSeed() {
int64_t seed = static_cast<int64_t>(sample_iteration_) * 0x10000001;
randomizer_.set_seed(seed);
randomizer_.IntRand();
}
// Displays the labels and cuts at the corresponding xcoords.
// Size of labels should match xcoords.
void DisplayLSTMOutput(const std::vector<int> &labels, const std::vector<int> &xcoords,
int height, ScrollView *window);
// Prints debug output detailing the activation path that is implied by the
// xcoords.
void DebugActivationPath(const NetworkIO &outputs, const std::vector<int> &labels,
const std::vector<int> &xcoords);
// Prints debug output detailing activations and 2nd choice over a range
// of positions.
void DebugActivationRange(const NetworkIO &outputs, const char *label, int best_choice,
int x_start, int x_end);
// As LabelsViaCTC except that this function constructs the best path that
// contains only legal sequences of subcodes for recoder_.
void LabelsViaReEncode(const NetworkIO &output, std::vector<int> *labels,
std::vector<int> *xcoords);
// Converts the network output to a sequence of labels, with scores, using
// the simple character model (each position is a char, and the null_char_ is
// mainly intended for tail padding.)
void LabelsViaSimpleText(const NetworkIO &output, std::vector<int> *labels,
std::vector<int> *xcoords);
// Returns a string corresponding to the label starting at start. Sets *end
// to the next start and if non-null, *decoded to the unichar id.
const char *DecodeLabel(const std::vector<int> &labels, unsigned start, unsigned *end, int *decoded);
// Returns a string corresponding to a given single label id, falling back to
// a default of ".." for part of a multi-label unichar-id.
const char *DecodeSingleLabel(int label);
protected:
// The network hierarchy.
Network *network_;
// The unicharset. Only the unicharset element is serialized.
// Has to be a CCUtil, so Dict can point to it.
CCUtil ccutil_;
// For backward compatibility, recoder_ is serialized iff
// training_flags_ & TF_COMPRESS_UNICHARSET.
// Further encode/decode ccutil_.unicharset's ids to simplify the unicharset.
UnicharCompress recoder_;
// ==Training parameters that are serialized to provide a record of them.==
std::string network_str_;
// Flags used to determine the training method of the network.
// See enum TrainingFlags above.
int32_t training_flags_;
// Number of actual backward training steps used.
int32_t training_iteration_;
// Index into training sample set. sample_iteration >= training_iteration_.
int32_t sample_iteration_;
// Index in softmax of null character. May take the value UNICHAR_BROKEN or
// ccutil_.unicharset.size().
int32_t null_char_;
// Learning rate and momentum multipliers of deltas in backprop.
float learning_rate_;
float momentum_;
// Smoothing factor for 2nd moment of gradients.
float adam_beta_;
// === NOT SERIALIZED.
TRand randomizer_;
NetworkScratch scratch_space_;
// Language model (optional) to use with the beam search.
Dict *dict_;
// Beam search held between uses to optimize memory allocation/use.
RecodeBeamSearch *search_;
// == Debugging parameters.==
// Recognition debug display window.
ScrollView *debug_win_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_LSTMRECOGNIZER_H_
|
2301_81045437/tesseract
|
src/lstm/lstmrecognizer.h
|
C++
|
apache-2.0
| 14,378
|
///////////////////////////////////////////////////////////////////////
// File: maxpool.cpp
// Description: Standard Max-Pooling layer.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "maxpool.h"
namespace tesseract {
Maxpool::Maxpool(const std::string &name, int ni, int x_scale, int y_scale)
: Reconfig(name, ni, x_scale, y_scale) {
type_ = NT_MAXPOOL;
no_ = ni;
}
// Reads from the given file. Returns false in case of error.
bool Maxpool::DeSerialize(TFile *fp) {
bool result = Reconfig::DeSerialize(fp);
no_ = ni_;
return result;
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void Maxpool::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
output->ResizeScaled(input, x_scale_, y_scale_, no_);
maxes_.ResizeNoInit(output->Width(), ni_);
back_map_ = input.stride_map();
StrideMap::Index dest_index(output->stride_map());
do {
int out_t = dest_index.t();
StrideMap::Index src_index(input.stride_map(), dest_index.index(FD_BATCH),
dest_index.index(FD_HEIGHT) * y_scale_,
dest_index.index(FD_WIDTH) * x_scale_);
// Find the max input out of x_scale_ groups of y_scale_ inputs.
// Do it independently for each input dimension.
int *max_line = maxes_[out_t];
int in_t = src_index.t();
output->CopyTimeStepFrom(out_t, input, in_t);
for (int i = 0; i < ni_; ++i) {
max_line[i] = in_t;
}
for (int x = 0; x < x_scale_; ++x) {
for (int y = 0; y < y_scale_; ++y) {
StrideMap::Index src_xy(src_index);
if (src_xy.AddOffset(x, FD_WIDTH) && src_xy.AddOffset(y, FD_HEIGHT)) {
output->MaxpoolTimeStep(out_t, input, src_xy.t(), max_line);
}
}
}
} while (dest_index.Increment());
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Maxpool::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
back_deltas->ResizeToMap(fwd_deltas.int_mode(), back_map_, ni_);
back_deltas->MaxpoolBackward(fwd_deltas, maxes_);
return true;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/maxpool.cpp
|
C++
|
apache-2.0
| 3,013
|
///////////////////////////////////////////////////////////////////////
// File: maxpool.h
// Description: Standard Max-Pooling layer.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_MAXPOOL_H_
#define TESSERACT_LSTM_MAXPOOL_H_
#include "reconfig.h"
namespace tesseract {
// Maxpooling reduction. Independently for each input, selects the location
// in the rectangle that contains the max value.
// Backprop propagates only to the position that was the max.
class Maxpool : public Reconfig {
public:
TESS_API
Maxpool(const std::string &name, int ni, int x_scale, int y_scale);
~Maxpool() override = default;
// Accessors.
std::string spec() const override {
return "Mp" + std::to_string(y_scale_) + "," + std::to_string(x_scale_);
}
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
private:
// Memory of which input was the max.
GENERIC_2D_ARRAY<int> maxes_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_MAXPOOL_H_
|
2301_81045437/tesseract
|
src/lstm/maxpool.h
|
C++
|
apache-2.0
| 2,213
|
///////////////////////////////////////////////////////////////////////
// File: network.cpp
// Description: Base class for neural network implementations.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "network.h"
#include <cstdlib>
// This base class needs to know about all its sub-classes because of the
// factory deserializing method: CreateFromFile.
#include <allheaders.h>
#include "convolve.h"
#include "fullyconnected.h"
#include "input.h"
#include "lstm.h"
#include "maxpool.h"
#include "parallel.h"
#include "reconfig.h"
#include "reversed.h"
#include "scrollview.h"
#include "series.h"
#include "statistc.h"
#ifdef INCLUDE_TENSORFLOW
# include "tfnetwork.h"
#endif
#include "tprintf.h"
namespace tesseract {
#ifndef GRAPHICS_DISABLED
// Min and max window sizes.
const int kMinWinSize = 500;
const int kMaxWinSize = 2000;
// Window frame sizes need adding on to make the content fit.
const int kXWinFrameSize = 30;
const int kYWinFrameSize = 80;
#endif // !GRAPHICS_DISABLED
// String names corresponding to the NetworkType enum.
// Keep in sync with NetworkType.
// Names used in Serialization to allow re-ordering/addition/deletion of
// layer types in NetworkType without invalidating existing network files.
static char const *const kTypeNames[NT_COUNT] = {
"Invalid", "Input",
"Convolve", "Maxpool",
"Parallel", "Replicated",
"ParBidiLSTM", "DepParUDLSTM",
"Par2dLSTM", "Series",
"Reconfig", "RTLReversed",
"TTBReversed", "XYTranspose",
"LSTM", "SummLSTM",
"Logistic", "LinLogistic",
"LinTanh", "Tanh",
"Relu", "Linear",
"Softmax", "SoftmaxNoCTC",
"LSTMSoftmax", "LSTMBinarySoftmax",
"TensorFlow",
};
Network::Network()
: type_(NT_NONE)
, training_(TS_ENABLED)
, needs_to_backprop_(true)
, network_flags_(0)
, ni_(0)
, no_(0)
, num_weights_(0)
, forward_win_(nullptr)
, backward_win_(nullptr)
, randomizer_(nullptr) {}
Network::Network(NetworkType type, const std::string &name, int ni, int no)
: type_(type)
, training_(TS_ENABLED)
, needs_to_backprop_(true)
, network_flags_(0)
, ni_(ni)
, no_(no)
, num_weights_(0)
, name_(name)
, forward_win_(nullptr)
, backward_win_(nullptr)
, randomizer_(nullptr) {}
// Suspends/Enables/Permanently disables training by setting the training_
// flag. Serialize and DeSerialize only operate on the run-time data if state
// is TS_DISABLED or TS_TEMP_DISABLE. Specifying TS_TEMP_DISABLE will
// temporarily disable layers in state TS_ENABLED, allowing a trainer to
// serialize as if it were a recognizer.
// TS_RE_ENABLE will re-enable layers that were previously in any disabled
// state. If in TS_TEMP_DISABLE then the flag is just changed, but if in
// TS_DISABLED, the deltas in the weight matrices are reinitialized so that a
// recognizer can be converted back to a trainer.
void Network::SetEnableTraining(TrainingState state) {
if (state == TS_RE_ENABLE) {
// Enable only from temp disabled.
if (training_ == TS_TEMP_DISABLE) {
training_ = TS_ENABLED;
}
} else if (state == TS_TEMP_DISABLE) {
// Temp disable only from enabled.
if (training_ == TS_ENABLED) {
training_ = state;
}
} else {
training_ = state;
}
}
// Sets flags that control the action of the network. See NetworkFlags enum
// for bit values.
void Network::SetNetworkFlags(uint32_t flags) {
network_flags_ = flags;
}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
int Network::InitWeights([[maybe_unused]] float range, TRand *randomizer) {
randomizer_ = randomizer;
return 0;
}
// Provides a pointer to a TRand for any networks that care to use it.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
void Network::SetRandomizer(TRand *randomizer) {
randomizer_ = randomizer;
}
// Sets needs_to_backprop_ to needs_backprop and returns true if
// needs_backprop || any weights in this network so the next layer forward
// can be told to produce backprop for this layer if needed.
bool Network::SetupNeedsBackprop(bool needs_backprop) {
needs_to_backprop_ = needs_backprop;
return needs_backprop || num_weights_ > 0;
}
// Writes to the given file. Returns false in case of error.
bool Network::Serialize(TFile *fp) const {
int8_t data = NT_NONE;
if (!fp->Serialize(&data)) {
return false;
}
std::string type_name = kTypeNames[type_];
if (!fp->Serialize(type_name)) {
return false;
}
data = training_;
if (!fp->Serialize(&data)) {
return false;
}
data = needs_to_backprop_;
if (!fp->Serialize(&data)) {
return false;
}
if (!fp->Serialize(&network_flags_)) {
return false;
}
if (!fp->Serialize(&ni_)) {
return false;
}
if (!fp->Serialize(&no_)) {
return false;
}
if (!fp->Serialize(&num_weights_)) {
return false;
}
uint32_t length = name_.length();
if (!fp->Serialize(&length)) {
return false;
}
return fp->Serialize(name_.c_str(), length);
}
static NetworkType getNetworkType(TFile *fp) {
int8_t data;
if (!fp->DeSerialize(&data)) {
return NT_NONE;
}
if (data == NT_NONE) {
std::string type_name;
if (!fp->DeSerialize(type_name)) {
return NT_NONE;
}
for (data = 0; data < NT_COUNT && type_name != kTypeNames[data]; ++data) {
}
if (data == NT_COUNT) {
tprintf("Invalid network layer type:%s\n", type_name.c_str());
return NT_NONE;
}
}
return static_cast<NetworkType>(data);
}
// Reads from the given file. Returns nullptr in case of error.
// Determines the type of the serialized class and calls its DeSerialize
// on a new object of the appropriate type, which is returned.
Network *Network::CreateFromFile(TFile *fp) {
NetworkType type; // Type of the derived network class.
TrainingState training; // Are we currently training?
bool needs_to_backprop; // This network needs to output back_deltas.
int32_t network_flags; // Behavior control flags in NetworkFlags.
int32_t ni; // Number of input values.
int32_t no; // Number of output values.
int32_t num_weights; // Number of weights in this and sub-network.
std::string name; // A unique name for this layer.
int8_t data;
Network *network = nullptr;
type = getNetworkType(fp);
if (!fp->DeSerialize(&data)) {
return nullptr;
}
training = data == TS_ENABLED ? TS_ENABLED : TS_DISABLED;
if (!fp->DeSerialize(&data)) {
return nullptr;
}
needs_to_backprop = data != 0;
if (!fp->DeSerialize(&network_flags)) {
return nullptr;
}
if (!fp->DeSerialize(&ni)) {
return nullptr;
}
if (!fp->DeSerialize(&no)) {
return nullptr;
}
if (!fp->DeSerialize(&num_weights)) {
return nullptr;
}
if (!fp->DeSerialize(name)) {
return nullptr;
}
switch (type) {
case NT_CONVOLVE:
network = new Convolve(name, ni, 0, 0);
break;
case NT_INPUT:
network = new Input(name, ni, no);
break;
case NT_LSTM:
case NT_LSTM_SOFTMAX:
case NT_LSTM_SOFTMAX_ENCODED:
case NT_LSTM_SUMMARY:
network = new LSTM(name, ni, no, no, false, type);
break;
case NT_MAXPOOL:
network = new Maxpool(name, ni, 0, 0);
break;
// All variants of Parallel.
case NT_PARALLEL:
case NT_REPLICATED:
case NT_PAR_RL_LSTM:
case NT_PAR_UD_LSTM:
case NT_PAR_2D_LSTM:
network = new Parallel(name, type);
break;
case NT_RECONFIG:
network = new Reconfig(name, ni, 0, 0);
break;
// All variants of reversed.
case NT_XREVERSED:
case NT_YREVERSED:
case NT_XYTRANSPOSE:
network = new Reversed(name, type);
break;
case NT_SERIES:
network = new Series(name);
break;
case NT_TENSORFLOW:
#ifdef INCLUDE_TENSORFLOW
network = new TFNetwork(name);
#else
tprintf("TensorFlow not compiled in! -DINCLUDE_TENSORFLOW\n");
#endif
break;
// All variants of FullyConnected.
case NT_SOFTMAX:
case NT_SOFTMAX_NO_CTC:
case NT_RELU:
case NT_TANH:
case NT_LINEAR:
case NT_LOGISTIC:
case NT_POSCLIP:
case NT_SYMCLIP:
network = new FullyConnected(name, ni, no, type);
break;
default:
break;
}
if (network) {
network->training_ = training;
network->needs_to_backprop_ = needs_to_backprop;
network->network_flags_ = network_flags;
network->num_weights_ = num_weights;
if (!network->DeSerialize(fp)) {
delete network;
network = nullptr;
}
}
return network;
}
// Returns a random number in [-range, range].
TFloat Network::Random(TFloat range) {
ASSERT_HOST(randomizer_ != nullptr);
return randomizer_->SignedRand(range);
}
#ifndef GRAPHICS_DISABLED
// === Debug image display methods. ===
// Displays the image of the matrix to the forward window.
void Network::DisplayForward(const NetworkIO &matrix) {
Image image = matrix.ToPix();
ClearWindow(false, name_.c_str(), pixGetWidth(image), pixGetHeight(image), &forward_win_);
DisplayImage(image, forward_win_);
forward_win_->Update();
}
// Displays the image of the matrix to the backward window.
void Network::DisplayBackward(const NetworkIO &matrix) {
Image image = matrix.ToPix();
std::string window_name = name_ + "-back";
ClearWindow(false, window_name.c_str(), pixGetWidth(image), pixGetHeight(image), &backward_win_);
DisplayImage(image, backward_win_);
backward_win_->Update();
}
// Creates the window if needed, otherwise clears it.
void Network::ClearWindow(bool tess_coords, const char *window_name, int width, int height,
ScrollView **window) {
if (*window == nullptr) {
int min_size = std::min(width, height);
if (min_size < kMinWinSize) {
if (min_size < 1) {
min_size = 1;
}
width = width * kMinWinSize / min_size;
height = height * kMinWinSize / min_size;
}
width += kXWinFrameSize;
height += kYWinFrameSize;
if (width > kMaxWinSize) {
width = kMaxWinSize;
}
if (height > kMaxWinSize) {
height = kMaxWinSize;
}
*window = new ScrollView(window_name, 80, 100, width, height, width, height, tess_coords);
tprintf("Created window %s of size %d, %d\n", window_name, width, height);
} else {
(*window)->Clear();
}
}
// Displays the pix in the given window. and returns the height of the pix.
// The pix is pixDestroyed.
int Network::DisplayImage(Image pix, ScrollView *window) {
int height = pixGetHeight(pix);
window->Draw(pix, 0, 0);
pix.destroy();
return height;
}
#endif // !GRAPHICS_DISABLED
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/network.cpp
|
C++
|
apache-2.0
| 11,612
|
///////////////////////////////////////////////////////////////////////
// File: network.h
// Description: Base class for neural network implementations.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_NETWORK_H_
#define TESSERACT_LSTM_NETWORK_H_
#include "helpers.h"
#include "matrix.h"
#include "networkio.h"
#include "serialis.h"
#include "static_shape.h"
#include "tprintf.h"
#include <cmath>
#include <cstdio>
struct Pix;
namespace tesseract {
class ScrollView;
class TBOX;
class ImageData;
class NetworkScratch;
// Enum to store the run-time type of a Network. Keep in sync with kTypeNames.
enum NetworkType {
NT_NONE, // The naked base class.
NT_INPUT, // Inputs from an image.
// Plumbing networks combine other networks or rearrange the inputs.
NT_CONVOLVE, // Duplicates inputs in a sliding window neighborhood.
NT_MAXPOOL, // Chooses the max result from a rectangle.
NT_PARALLEL, // Runs networks in parallel.
NT_REPLICATED, // Runs identical networks in parallel.
NT_PAR_RL_LSTM, // Runs LTR and RTL LSTMs in parallel.
NT_PAR_UD_LSTM, // Runs Up and Down LSTMs in parallel.
NT_PAR_2D_LSTM, // Runs 4 LSTMs in parallel.
NT_SERIES, // Executes a sequence of layers.
NT_RECONFIG, // Scales the time/y size but makes the output deeper.
NT_XREVERSED, // Reverses the x direction of the inputs/outputs.
NT_YREVERSED, // Reverses the y-direction of the inputs/outputs.
NT_XYTRANSPOSE, // Transposes x and y (for just a single op).
// Functional networks actually calculate stuff.
NT_LSTM, // Long-Short-Term-Memory block.
NT_LSTM_SUMMARY, // LSTM that only keeps its last output.
NT_LOGISTIC, // Fully connected logistic nonlinearity.
NT_POSCLIP, // Fully connected rect lin version of logistic.
NT_SYMCLIP, // Fully connected rect lin version of tanh.
NT_TANH, // Fully connected with tanh nonlinearity.
NT_RELU, // Fully connected with rectifier nonlinearity.
NT_LINEAR, // Fully connected with no nonlinearity.
NT_SOFTMAX, // Softmax uses exponential normalization, with CTC.
NT_SOFTMAX_NO_CTC, // Softmax uses exponential normalization, no CTC.
// The SOFTMAX LSTMs both have an extra softmax layer on top, but inside, with
// the outputs fed back to the input of the LSTM at the next timestep.
// The ENCODED version binary encodes the softmax outputs, providing log2 of
// the number of outputs as additional inputs, and the other version just
// provides all the softmax outputs as additional inputs.
NT_LSTM_SOFTMAX, // 1-d LSTM with built-in fully connected softmax.
NT_LSTM_SOFTMAX_ENCODED, // 1-d LSTM with built-in binary encoded softmax.
// A TensorFlow graph encapsulated as a Tesseract network.
NT_TENSORFLOW,
NT_COUNT // Array size.
};
// Enum of Network behavior flags. Can in theory be set for each individual
// network element.
enum NetworkFlags {
// Network forward/backprop behavior.
NF_LAYER_SPECIFIC_LR = 64, // Separate learning rate for each layer.
NF_ADAM = 128, // Weight-specific learning rate.
};
// State of training and desired state used in SetEnableTraining.
enum TrainingState {
// Valid states of training_.
TS_DISABLED, // Disabled permanently.
TS_ENABLED, // Enabled for backprop and to write a training dump.
// Re-enable from ANY disabled state.
TS_TEMP_DISABLE, // Temporarily disabled to write a recognition dump.
// Valid only for SetEnableTraining.
TS_RE_ENABLE, // Re-Enable from TS_TEMP_DISABLE, but not TS_DISABLED.
};
// Base class for network types. Not quite an abstract base class, but almost.
// Most of the time no isolated Network exists, except prior to
// deserialization.
class TESS_API Network {
public:
Network();
Network(NetworkType type, const std::string &name, int ni, int no);
virtual ~Network() = default;
// Accessors.
NetworkType type() const {
return type_;
}
bool IsTraining() const {
return training_ == TS_ENABLED;
}
bool needs_to_backprop() const {
return needs_to_backprop_;
}
int num_weights() const {
return num_weights_;
}
int NumInputs() const {
return ni_;
}
int NumOutputs() const {
return no_;
}
// Returns the required shape input to the network.
virtual StaticShape InputShape() const {
StaticShape result;
return result;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
virtual StaticShape OutputShape(const StaticShape &input_shape) const {
StaticShape result(input_shape);
result.set_depth(no_);
return result;
}
const std::string &name() const {
return name_;
}
virtual std::string spec() const = 0;
bool TestFlag(NetworkFlags flag) const {
return (network_flags_ & flag) != 0;
}
// Initialization and administrative functions that are mostly provided
// by Plumbing.
// Returns true if the given type is derived from Plumbing, and thus contains
// multiple sub-networks that can have their own learning rate.
virtual bool IsPlumbingType() const {
return false;
}
// Suspends/Enables/Permanently disables training by setting the training_
// flag. Serialize and DeSerialize only operate on the run-time data if state
// is TS_DISABLED or TS_TEMP_DISABLE. Specifying TS_TEMP_DISABLE will
// temporarily disable layers in state TS_ENABLED, allowing a trainer to
// serialize as if it were a recognizer.
// TS_RE_ENABLE will re-enable layers that were previously in any disabled
// state. If in TS_TEMP_DISABLE then the flag is just changed, but if in
// TS_DISABLED, the deltas in the weight matrices are reinitialized so that a
// recognizer can be converted back to a trainer.
virtual void SetEnableTraining(TrainingState state);
// Sets flags that control the action of the network. See NetworkFlags enum
// for bit values.
virtual void SetNetworkFlags(uint32_t flags);
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
// Returns the number of weights initialized.
virtual int InitWeights(float range, TRand *randomizer);
// Changes the number of outputs to the outside world to the size of the given
// code_map. Recursively searches the entire network for Softmax layers that
// have exactly old_no outputs, and operates only on those, leaving all others
// unchanged. This enables networks with multiple output layers to get all
// their softmaxes updated, but if an internal layer, uses one of those
// softmaxes for input, then the inputs will effectively be scrambled.
// TODO(rays) Fix this before any such network is implemented.
// The softmaxes are resized by copying the old weight matrix entries for each
// output from code_map[output] where non-negative, and uses the mean (over
// all outputs) of the existing weights for all outputs with negative code_map
// entries. Returns the new number of weights.
virtual int RemapOutputs([[maybe_unused]] int old_no,
[[maybe_unused]] const std::vector<int> &code_map) {
return 0;
}
// Converts a float network to an int network.
virtual void ConvertToInt() {}
// Provides a pointer to a TRand for any networks that care to use it.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
virtual void SetRandomizer(TRand *randomizer);
// Sets needs_to_backprop_ to needs_backprop and returns true if
// needs_backprop || any weights in this network so the next layer forward
// can be told to produce backprop for this layer if needed.
virtual bool SetupNeedsBackprop(bool needs_backprop);
// Returns the most recent reduction factor that the network applied to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data and calculating result bounding boxes.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
virtual int XScaleFactor() const {
return 1;
}
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
virtual void CacheXScaleFactor([[maybe_unused]] int factor) {}
// Provides debug output on the weights.
virtual void DebugWeights() = 0;
// Writes to the given file. Returns false in case of error.
// Should be overridden by subclasses, but called by their Serialize.
virtual bool Serialize(TFile *fp) const;
// Reads from the given file. Returns false in case of error.
// Should be overridden by subclasses, but NOT called by their DeSerialize.
virtual bool DeSerialize(TFile *fp) = 0;
public:
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
virtual void Update([[maybe_unused]] float learning_rate,
[[maybe_unused]] float momentum,
[[maybe_unused]] float adam_beta,
[[maybe_unused]] int num_samples) {}
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
virtual void CountAlternators([[maybe_unused]] const Network &other,
[[maybe_unused]] TFloat *same,
[[maybe_unused]] TFloat *changed) const {}
// Reads from the given file. Returns nullptr in case of error.
// Determines the type of the serialized class and calls its DeSerialize
// on a new object of the appropriate type, which is returned.
static Network *CreateFromFile(TFile *fp);
// Runs forward propagation of activations on the input line.
// Note that input and output are both 2-d arrays.
// The 1st index is the time element. In a 1-d network, it might be the pixel
// position on the textline. In a 2-d network, the linearization is defined
// by the stride_map. (See networkio.h).
// The 2nd index of input is the network inputs/outputs, and the dimension
// of the input must match NumInputs() of this network.
// The output array will be resized as needed so that its 1st dimension is
// always equal to the number of output values, and its second dimension is
// always NumOutputs(). Note that all this detail is encapsulated away inside
// NetworkIO, as are the internals of the scratch memory space used by the
// network. See networkscratch.h for that.
// If input_transpose is not nullptr, then it contains the transpose of input,
// and the caller guarantees that it will still be valid on the next call to
// backward. The callee is therefore at liberty to save the pointer and
// reference it on a call to backward. This is a bit ugly, but it makes it
// possible for a replicating parallel to calculate the input transpose once
// instead of all the replicated networks having to do it.
virtual void Forward(bool debug, const NetworkIO &input,
const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) = 0;
// Runs backward propagation of errors on fwdX_deltas.
// Note that fwd_deltas and back_deltas are both 2-d arrays as with Forward.
// Returns false if back_deltas was not set, due to there being no point in
// propagating further backwards. Thus most complete networks will always
// return false from Backward!
virtual bool Backward(bool debug, const NetworkIO &fwd_deltas,
NetworkScratch *scratch, NetworkIO *back_deltas) = 0;
// === Debug image display methods. ===
// Displays the image of the matrix to the forward window.
void DisplayForward(const NetworkIO &matrix);
// Displays the image of the matrix to the backward window.
void DisplayBackward(const NetworkIO &matrix);
// Creates the window if needed, otherwise clears it.
static void ClearWindow(bool tess_coords, const char *window_name, int width,
int height, ScrollView **window);
// Displays the pix in the given window. and returns the height of the pix.
// The pix is pixDestroyed.
static int DisplayImage(Image pix, ScrollView *window);
protected:
// Returns a random number in [-range, range].
TFloat Random(TFloat range);
protected:
NetworkType type_; // Type of the derived network class.
TrainingState training_; // Are we currently training?
bool needs_to_backprop_; // This network needs to output back_deltas.
int32_t network_flags_; // Behavior control flags in NetworkFlags.
int32_t ni_; // Number of input values.
int32_t no_; // Number of output values.
int32_t num_weights_; // Number of weights in this and sub-network.
std::string name_; // A unique name for this layer.
// NOT-serialized debug data.
ScrollView *forward_win_; // Recognition debug display window.
ScrollView *backward_win_; // Training debug display window.
TRand *randomizer_; // Random number generator.
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_NETWORK_H_
|
2301_81045437/tesseract
|
src/lstm/network.h
|
C++
|
apache-2.0
| 14,298
|
///////////////////////////////////////////////////////////////////////
// File: networkio.cpp
// Description: Network input/output data, allowing float/int implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "networkio.h"
#include <cfloat> // for FLT_MAX
#include <cmath>
#include <allheaders.h>
#include "functions.h"
#include "statistc.h"
#include "tprintf.h"
namespace tesseract {
// Minimum value to output for certainty.
const float kMinCertainty = -20.0f;
// Probability corresponding to kMinCertainty.
const float kMinProb = std::exp(kMinCertainty);
// Resizes to a specific size as a 2-d temp buffer. No batches, no y-dim.
void NetworkIO::Resize2d(bool int_mode, int width, int num_features) {
stride_map_ = StrideMap();
int_mode_ = int_mode;
if (int_mode_) {
i_.ResizeNoInit(width, num_features, GetPadding(num_features));
} else {
f_.ResizeNoInit(width, num_features);
}
}
// Resizes to a specific stride_map.
void NetworkIO::ResizeToMap(bool int_mode, const StrideMap &stride_map, int num_features) {
// If this method crashes with this == nullptr,
// it most likely got here through an uninitialized scratch element,
// ie call NetworkScratch::IO::Resizexxx() not NetworkIO::Resizexxx()!!
stride_map_ = stride_map;
int_mode_ = int_mode;
if (int_mode_) {
i_.ResizeNoInit(stride_map.Width(), num_features, GetPadding(num_features));
} else {
f_.ResizeNoInit(stride_map.Width(), num_features);
}
ZeroInvalidElements();
}
// Shrinks image size by x_scale,y_scale, and use given number of features.
void NetworkIO::ResizeScaled(const NetworkIO &src, int x_scale, int y_scale, int num_features) {
StrideMap stride_map = src.stride_map_;
stride_map.ScaleXY(x_scale, y_scale);
ResizeToMap(src.int_mode_, stride_map, num_features);
}
// Resizes to just 1 x-coord, whatever the input.
void NetworkIO::ResizeXTo1(const NetworkIO &src, int num_features) {
StrideMap stride_map = src.stride_map_;
stride_map.ReduceWidthTo1();
ResizeToMap(src.int_mode_, stride_map, num_features);
}
// Initialize all the array to zero.
void NetworkIO::Zero() {
int width = Width();
// Zero out the everything. Column-by-column in case it is aligned.
for (int t = 0; t < width; ++t) {
ZeroTimeStep(t);
}
}
// Initializes to zero all elements of the array that do not correspond to
// valid image positions. (If a batch of different-sized images are packed
// together, then there will be padding pixels.)
void NetworkIO::ZeroInvalidElements() {
int num_features = NumFeatures();
int full_width = stride_map_.Size(FD_WIDTH);
int full_height = stride_map_.Size(FD_HEIGHT);
StrideMap::Index b_index(stride_map_);
do {
int end_x = b_index.MaxIndexOfDim(FD_WIDTH) + 1;
if (end_x < full_width) {
// The width is small, so fill for every valid y.
StrideMap::Index y_index(b_index);
int fill_size = num_features * (full_width - end_x);
do {
StrideMap::Index z_index(y_index);
z_index.AddOffset(end_x, FD_WIDTH);
if (int_mode_) {
ZeroVector(fill_size, i_[z_index.t()]);
} else {
ZeroVector(fill_size, f_[z_index.t()]);
}
} while (y_index.AddOffset(1, FD_HEIGHT));
}
int end_y = b_index.MaxIndexOfDim(FD_HEIGHT) + 1;
if (end_y < full_height) {
// The height is small, so fill in the space in one go.
StrideMap::Index y_index(b_index);
y_index.AddOffset(end_y, FD_HEIGHT);
int fill_size = num_features * full_width * (full_height - end_y);
if (int_mode_) {
ZeroVector(fill_size, i_[y_index.t()]);
} else {
ZeroVector(fill_size, f_[y_index.t()]);
}
}
} while (b_index.AddOffset(1, FD_BATCH));
}
// Helper computes a black point and white point to contrast-enhance an image.
// The computation is based on the assumption that the image is of a single line
// of text, so a horizontal line through the middle of the image passes through
// at least some of it, so local minima and maxima are a good proxy for black
// and white pixel samples.
static void ComputeBlackWhite(Image pix, float *black, float *white) {
int width = pixGetWidth(pix);
int height = pixGetHeight(pix);
STATS mins(0, 255), maxes(0, 255);
if (width >= 3) {
int y = height / 2;
l_uint32 *line = pixGetData(pix) + pixGetWpl(pix) * y;
int prev = GET_DATA_BYTE(line, 0);
int curr = GET_DATA_BYTE(line, 1);
for (int x = 1; x + 1 < width; ++x) {
int next = GET_DATA_BYTE(line, x + 1);
if ((curr < prev && curr <= next) || (curr <= prev && curr < next)) {
// Local minimum.
mins.add(curr, 1);
}
if ((curr > prev && curr >= next) || (curr >= prev && curr > next)) {
// Local maximum.
maxes.add(curr, 1);
}
prev = curr;
curr = next;
}
}
if (mins.get_total() == 0) {
mins.add(0, 1);
}
if (maxes.get_total() == 0) {
maxes.add(255, 1);
}
*black = mins.ile(0.25);
*white = maxes.ile(0.75);
}
// Sets up the array from the given image, using the currently set int_mode_.
// If the image width doesn't match the shape, the image is truncated or padded
// with noise to match.
void NetworkIO::FromPix(const StaticShape &shape, const Image pix, TRand *randomizer) {
std::vector<Image> pixes(1, pix);
FromPixes(shape, pixes, randomizer);
}
// Sets up the array from the given set of images, using the currently set
// int_mode_. If the image width doesn't match the shape, the images are
// truncated or padded with noise to match.
void NetworkIO::FromPixes(const StaticShape &shape, const std::vector<Image> &pixes,
TRand *randomizer) {
int target_height = shape.height();
int target_width = shape.width();
std::vector<std::pair<int, int>> h_w_pairs;
for (auto &&pix : pixes) {
Image var_pix = pix;
int width = pixGetWidth(var_pix);
if (target_width != 0) {
width = target_width;
}
int height = pixGetHeight(var_pix);
if (target_height != 0) {
height = target_height;
}
h_w_pairs.emplace_back(height, width);
}
stride_map_.SetStride(h_w_pairs);
ResizeToMap(int_mode(), stride_map_, shape.depth());
// Iterate over the images again to copy the data.
for (size_t b = 0; b < pixes.size(); ++b) {
Image pix = pixes[b];
float black = 0.0f, white = 255.0f;
if (shape.depth() != 3) {
ComputeBlackWhite(pix, &black, &white);
}
float contrast = (white - black) / 2.0f;
if (contrast <= 0.0f) {
contrast = 1.0f;
}
if (shape.height() == 1) {
Copy1DGreyImage(b, pix, black, contrast, randomizer);
} else {
Copy2DImage(b, pix, black, contrast, randomizer);
}
}
}
// Copies the given pix to *this at the given batch index, stretching and
// clipping the pixel values so that [black, black + 2*contrast] maps to the
// dynamic range of *this, ie [-1,1] for a float and (-127,127) for int.
// This is a 2-d operation in the sense that the output depth is the number
// of input channels, the height is the height of the image, and the width
// is the width of the image, or truncated/padded with noise if the width
// is a fixed size.
void NetworkIO::Copy2DImage(int batch, Image pix, float black, float contrast, TRand *randomizer) {
int width = pixGetWidth(pix);
int height = pixGetHeight(pix);
int wpl = pixGetWpl(pix);
StrideMap::Index index(stride_map_);
index.AddOffset(batch, FD_BATCH);
int t = index.t();
int target_height = stride_map_.Size(FD_HEIGHT);
int target_width = stride_map_.Size(FD_WIDTH);
int num_features = NumFeatures();
bool color = num_features == 3;
if (width > target_width) {
width = target_width;
}
uint32_t *line = pixGetData(pix);
for (int y = 0; y < target_height; ++y, line += wpl) {
int x = 0;
if (y < height) {
for (x = 0; x < width; ++x, ++t) {
if (color) {
int f = 0;
for (int c = COLOR_RED; c <= COLOR_BLUE; ++c) {
int pixel = GET_DATA_BYTE(line + x, c);
SetPixel(t, f++, pixel, black, contrast);
}
} else {
int pixel = GET_DATA_BYTE(line, x);
SetPixel(t, 0, pixel, black, contrast);
}
}
}
for (; x < target_width; ++x) {
Randomize(t++, 0, num_features, randomizer);
}
}
}
// Copies the given pix to *this at the given batch index, as Copy2DImage
// above, except that the output depth is the height of the input image, the
// output height is 1, and the output width as for Copy2DImage.
// The image is thus treated as a 1-d set of vertical pixel strips.
void NetworkIO::Copy1DGreyImage(int batch, Image pix, float black, float contrast,
TRand *randomizer) {
int width = pixGetWidth(pix);
int height = pixGetHeight(pix);
ASSERT_HOST(height == NumFeatures());
int wpl = pixGetWpl(pix);
StrideMap::Index index(stride_map_);
index.AddOffset(batch, FD_BATCH);
int t = index.t();
int target_width = stride_map_.Size(FD_WIDTH);
if (width > target_width) {
width = target_width;
}
int x;
for (x = 0; x < width; ++x, ++t) {
for (int y = 0; y < height; ++y) {
uint32_t *line = pixGetData(pix) + wpl * y;
int pixel = GET_DATA_BYTE(line, x);
SetPixel(t, y, pixel, black, contrast);
}
}
for (; x < target_width; ++x) {
Randomize(t++, 0, height, randomizer);
}
}
// Helper stores the pixel value in i_ or f_ according to int_mode_.
// t: is the index from the StrideMap corresponding to the current
// [batch,y,x] position
// f: is the index into the depth/channel
// pixel: the value of the pixel from the image (in one channel)
// black: the pixel value to map to the lowest of the range of *this
// contrast: the range of pixel values to stretch to half the range of *this.
void NetworkIO::SetPixel(int t, int f, int pixel, float black, float contrast) {
float float_pixel = (pixel - black) / contrast - 1.0f;
if (int_mode_) {
i_[t][f] = ClipToRange<int>(IntCastRounded((INT8_MAX + 1) * float_pixel), -INT8_MAX, INT8_MAX);
} else {
f_[t][f] = float_pixel;
}
}
// Converts the array to a Pix. Must be pixDestroyed after use.
Image NetworkIO::ToPix() const {
// Count the width of the image, and find the max multiplication factor.
int im_width = stride_map_.Size(FD_WIDTH);
int im_height = stride_map_.Size(FD_HEIGHT);
int num_features = NumFeatures();
int feature_factor = 1;
if (num_features == 3) {
// Special hack for color.
num_features = 1;
feature_factor = 3;
}
Image pix = pixCreate(im_width, im_height * num_features, 32);
StrideMap::Index index(stride_map_);
do {
int im_x = index.index(FD_WIDTH);
int top_im_y = index.index(FD_HEIGHT);
int im_y = top_im_y;
int t = index.t();
if (int_mode_) {
const int8_t *features = i_[t];
for (int y = 0; y < num_features; ++y, im_y += im_height) {
int pixel = features[y * feature_factor];
// 1 or 2 features use greyscale.
int red = ClipToRange<int>(pixel + 128, 0, 255);
int green = red, blue = red;
if (feature_factor == 3) {
// With 3 features assume RGB color.
green = ClipToRange<int>(features[y * feature_factor + 1] + 128, 0, 255);
blue = ClipToRange<int>(features[y * feature_factor + 2] + 128, 0, 255);
} else if (num_features > 3) {
// More than 3 features use false yellow/blue color, assuming a signed
// input in the range [-1,1].
red = abs(pixel) * 2;
if (pixel >= 0) {
green = red;
blue = 0;
} else {
blue = red;
green = red = 0;
}
}
pixSetPixel(pix, im_x, im_y,
(red << L_RED_SHIFT) | (green << L_GREEN_SHIFT) | (blue << L_BLUE_SHIFT));
}
} else {
const float *features = f_[t];
for (int y = 0; y < num_features; ++y, im_y += im_height) {
float pixel = features[y * feature_factor];
// 1 or 2 features use greyscale.
int red = ClipToRange<int>(IntCastRounded((pixel + 1.0f) * 127.5f), 0, 255);
int green = red, blue = red;
if (feature_factor == 3) {
// With 3 features assume RGB color.
pixel = features[y * feature_factor + 1];
green = ClipToRange<int>(IntCastRounded((pixel + 1.0f) * 127.5f), 0, 255);
pixel = features[y * feature_factor + 2];
blue = ClipToRange<int>(IntCastRounded((pixel + 1.0f) * 127.5f), 0, 255);
} else if (num_features > 3) {
// More than 3 features use false yellow/blue color, assuming a signed
// input in the range [-1,1].
red = ClipToRange<int>(IntCastRounded(std::fabs(pixel) * 255), 0, 255);
if (pixel >= 0) {
green = red;
blue = 0;
} else {
blue = red;
green = red = 0;
}
}
pixSetPixel(pix, im_x, im_y,
(red << L_RED_SHIFT) | (green << L_GREEN_SHIFT) | (blue << L_BLUE_SHIFT));
}
}
} while (index.Increment());
return pix;
}
// Prints the first and last num timesteps of the array for each feature.
void NetworkIO::Print(int num) const {
int num_features = NumFeatures();
for (int y = 0; y < num_features; ++y) {
for (int t = 0; t < Width(); ++t) {
if (num == 0 || t < num || t + num >= Width()) {
if (int_mode_) {
tprintf(" %g", static_cast<float>(i_[t][y]) / INT8_MAX);
} else {
tprintf(" %g", f_[t][y]);
}
}
}
tprintf("\n");
}
}
// Copies a single time step from src.
void NetworkIO::CopyTimeStepFrom(int dest_t, const NetworkIO &src, int src_t) {
ASSERT_HOST(int_mode_ == src.int_mode_);
if (int_mode_) {
memcpy(i_[dest_t], src.i_[src_t], i_.dim2() * sizeof(i_[0][0]));
} else {
memcpy(f_[dest_t], src.f_[src_t], f_.dim2() * sizeof(f_[0][0]));
}
}
// Copies a part of single time step from src.
void NetworkIO::CopyTimeStepGeneral(int dest_t, int dest_offset, int num_features,
const NetworkIO &src, int src_t, int src_offset) {
ASSERT_HOST(int_mode_ == src.int_mode_);
if (int_mode_) {
memcpy(i_[dest_t] + dest_offset, src.i_[src_t] + src_offset, num_features * sizeof(i_[0][0]));
} else {
memcpy(f_[dest_t] + dest_offset, src.f_[src_t] + src_offset, num_features * sizeof(f_[0][0]));
}
}
// Sets the given range to random values.
void NetworkIO::Randomize(int t, int offset, int num_features, TRand *randomizer) {
if (int_mode_) {
int8_t *line = i_[t] + offset;
for (int i = 0; i < num_features; ++i) {
line[i] = IntCastRounded(randomizer->SignedRand(INT8_MAX));
}
} else {
// float mode.
float *line = f_[t] + offset;
for (int i = 0; i < num_features; ++i) {
line[i] = randomizer->SignedRand(1.0);
}
}
}
// Helper returns the label and score of the best choice over a range.
int NetworkIO::BestChoiceOverRange(int t_start, int t_end, int not_this, int null_ch, float *rating,
float *certainty) const {
if (t_end <= t_start) {
return -1;
}
int max_char = -1;
float min_score = 0.0f;
for (int c = 0; c < NumFeatures(); ++c) {
if (c == not_this || c == null_ch) {
continue;
}
ScoresOverRange(t_start, t_end, c, null_ch, rating, certainty);
if (max_char < 0 || *rating < min_score) {
min_score = *rating;
max_char = c;
}
}
ScoresOverRange(t_start, t_end, max_char, null_ch, rating, certainty);
return max_char;
}
// Helper returns the rating and certainty of the choice over a range in output.
void NetworkIO::ScoresOverRange(int t_start, int t_end, int choice, int null_ch, float *rating,
float *certainty) const {
ASSERT_HOST(!int_mode_);
*rating = 0.0f;
*certainty = 0.0f;
if (t_end <= t_start || t_end <= 0) {
return;
}
float ratings[3] = {0.0f, 0.0f, 0.0f};
float certs[3] = {0.0f, 0.0f, 0.0f};
for (int t = t_start; t < t_end; ++t) {
const float *line = f_[t];
float score = ProbToCertainty(line[choice]);
float zero = ProbToCertainty(line[null_ch]);
if (t == t_start) {
ratings[2] = FLT_MAX;
ratings[1] = -score;
certs[1] = score;
} else {
for (int i = 2; i >= 1; --i) {
if (ratings[i] > ratings[i - 1]) {
ratings[i] = ratings[i - 1];
certs[i] = certs[i - 1];
}
}
ratings[2] -= zero;
if (zero < certs[2]) {
certs[2] = zero;
}
ratings[1] -= score;
if (score < certs[1]) {
certs[1] = score;
}
}
ratings[0] -= zero;
if (zero < certs[0]) {
certs[0] = zero;
}
}
int best_i = ratings[2] < ratings[1] ? 2 : 1;
*rating = ratings[best_i] + t_end - t_start;
*certainty = certs[best_i];
}
// Returns the index (label) of the best value at the given timestep,
// excluding not_this and not_that, and if not null, sets the score to the
// log of the corresponding value.
int NetworkIO::BestLabel(int t, int not_this, int not_that, float *score) const {
ASSERT_HOST(!int_mode_);
int best_index = -1;
float best_score = -FLT_MAX;
const float *line = f_[t];
for (int i = 0; i < f_.dim2(); ++i) {
if (line[i] > best_score && i != not_this && i != not_that) {
best_score = line[i];
best_index = i;
}
}
if (score != nullptr) {
*score = ProbToCertainty(best_score);
}
return best_index;
}
// Returns the best start position out of [start, end) (into which all labels
// must fit) to obtain the highest cumulative score for the given labels.
int NetworkIO::PositionOfBestMatch(const std::vector<int> &labels, int start, int end) const {
int length = labels.size();
int last_start = end - length;
int best_start = -1;
TFloat best_score = 0;
for (int s = start; s <= last_start; ++s) {
TFloat score = ScoreOfLabels(labels, s);
if (score > best_score || best_start < 0) {
best_score = score;
best_start = s;
}
}
return best_start;
}
// Returns the cumulative score of the given labels starting at start, and
// using one label per time-step.
TFloat NetworkIO::ScoreOfLabels(const std::vector<int> &labels, int start) const {
int length = labels.size();
TFloat score = 0;
for (int i = 0; i < length; ++i) {
score += f_(start + i, labels[i]);
}
return score;
}
// Helper function sets all the outputs for a single timestep, such that
// label has value ok_score, and the other labels share 1 - ok_score.
void NetworkIO::SetActivations(int t, int label, float ok_score) {
ASSERT_HOST(!int_mode_);
int num_classes = NumFeatures();
float bad_score = (1.0f - ok_score) / (num_classes - 1);
float *targets = f_[t];
for (int i = 0; i < num_classes; ++i) {
targets[i] = bad_score;
}
targets[label] = ok_score;
}
// Modifies the values, only if needed, so that the given label is
// the winner at the given time step t.
void NetworkIO::EnsureBestLabel(int t, int label) {
ASSERT_HOST(!int_mode_);
if (BestLabel(t, nullptr) != label) {
// Output value needs enhancing. Third all the other elements and add the
// remainder to best_label.
int num_classes = NumFeatures();
float *targets = f_[t];
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
targets[c] += (1.0 - targets[c]) * (2 / 3.0);
} else {
targets[c] /= 3.0;
}
}
}
}
// Helper function converts prob to certainty taking the minimum into account.
/* static */
float NetworkIO::ProbToCertainty(float prob) {
return prob > kMinProb ? std::log(prob) : kMinCertainty;
}
// Returns true if there is any bad value that is suspiciously like a GT
// error. Assuming that *this is the difference(gradient) between target
// and forward output, returns true if there is a large negative value
// (correcting a very confident output) for which there is no corresponding
// positive value in an adjacent timestep for the same feature index. This
// allows the box-truthed samples to make fine adjustments to position while
// stopping other disagreements of confident output with ground truth.
bool NetworkIO::AnySuspiciousTruth(float confidence_thr) const {
int num_features = NumFeatures();
for (int t = 0; t < Width(); ++t) {
const float *features = f_[t];
for (int y = 0; y < num_features; ++y) {
float grad = features[y];
if (grad < -confidence_thr) {
// Correcting strong output. Check for movement.
if ((t == 0 || f_[t - 1][y] < confidence_thr / 2) &&
(t + 1 == Width() || f_[t + 1][y] < confidence_thr / 2)) {
return true; // No strong positive on either side.
}
}
}
}
return false;
}
// Reads a single timestep to floats in the range [-1, 1].
void NetworkIO::ReadTimeStep(int t, TFloat *output) const {
if (int_mode_) {
const int8_t *line = i_[t];
for (int i = 0; i < i_.dim2(); ++i) {
output[i] = static_cast<TFloat>(line[i]) / INT8_MAX;
}
} else {
const float *line = f_[t];
for (int i = 0; i < f_.dim2(); ++i) {
output[i] = static_cast<TFloat>(line[i]);
}
}
}
// Adds a single timestep to floats.
void NetworkIO::AddTimeStep(int t, TFloat *inout) const {
int num_features = NumFeatures();
if (int_mode_) {
const int8_t *line = i_[t];
for (int i = 0; i < num_features; ++i) {
inout[i] += static_cast<TFloat>(line[i]) / INT8_MAX;
}
} else {
const float *line = f_[t];
for (int i = 0; i < num_features; ++i) {
inout[i] += line[i];
}
}
}
// Adds part of a single timestep to floats.
void NetworkIO::AddTimeStepPart(int t, int offset, int num_features, float *inout) const {
if (int_mode_) {
const int8_t *line = i_[t] + offset;
for (int i = 0; i < num_features; ++i) {
inout[i] += static_cast<float>(line[i]) / INT8_MAX;
}
} else {
const float *line = f_[t] + offset;
for (int i = 0; i < num_features; ++i) {
inout[i] += line[i];
}
}
}
// Writes a single timestep from floats in the range [-1, 1].
void NetworkIO::WriteTimeStep(int t, const TFloat *input) {
WriteTimeStepPart(t, 0, NumFeatures(), input);
}
// Writes a single timestep from floats in the range [-1, 1] writing only
// num_features elements of input to (*this)[t], starting at offset.
void NetworkIO::WriteTimeStepPart(int t, int offset, int num_features, const TFloat *input) {
if (int_mode_) {
int8_t *line = i_[t] + offset;
for (int i = 0; i < num_features; ++i) {
line[i] = ClipToRange<int>(IntCastRounded(input[i] * INT8_MAX), -INT8_MAX, INT8_MAX);
}
} else {
float *line = f_[t] + offset;
for (int i = 0; i < num_features; ++i) {
line[i] = static_cast<float>(input[i]);
}
}
}
// Maxpools a single time step from src.
void NetworkIO::MaxpoolTimeStep(int dest_t, const NetworkIO &src, int src_t, int *max_line) {
ASSERT_HOST(int_mode_ == src.int_mode_);
if (int_mode_) {
int dim = i_.dim2();
int8_t *dest_line = i_[dest_t];
const int8_t *src_line = src.i_[src_t];
for (int i = 0; i < dim; ++i) {
if (dest_line[i] < src_line[i]) {
dest_line[i] = src_line[i];
max_line[i] = src_t;
}
}
} else {
int dim = f_.dim2();
float *dest_line = f_[dest_t];
const float *src_line = src.f_[src_t];
for (int i = 0; i < dim; ++i) {
if (dest_line[i] < src_line[i]) {
dest_line[i] = src_line[i];
max_line[i] = src_t;
}
}
}
}
// Runs maxpool backward, using maxes to index timesteps in *this.
void NetworkIO::MaxpoolBackward(const NetworkIO &fwd, const GENERIC_2D_ARRAY<int> &maxes) {
ASSERT_HOST(!int_mode_);
Zero();
StrideMap::Index index(fwd.stride_map_);
do {
int t = index.t();
const int *max_line = maxes[t];
const float *fwd_line = fwd.f_[t];
int num_features = fwd.f_.dim2();
for (int i = 0; i < num_features; ++i) {
f_[max_line[i]][i] = fwd_line[i];
}
} while (index.Increment());
}
// Returns the min over time of the maxes over features of the outputs.
float NetworkIO::MinOfMaxes() const {
float min_max = 0.0f;
int width = Width();
int num_features = NumFeatures();
for (int t = 0; t < width; ++t) {
float max_value = -FLT_MAX;
if (int_mode_) {
const int8_t *column = i_[t];
for (int i = 0; i < num_features; ++i) {
if (column[i] > max_value) {
max_value = column[i];
}
}
} else {
const float *column = f_[t];
for (int i = 0; i < num_features; ++i) {
if (column[i] > max_value) {
max_value = column[i];
}
}
}
if (t == 0 || max_value < min_max) {
min_max = max_value;
}
}
return min_max;
}
// Computes combined results for a combiner that chooses between an existing
// input and itself, with an additional output to indicate the choice.
void NetworkIO::CombineOutputs(const NetworkIO &base_output, const NetworkIO &combiner_output) {
int no = base_output.NumFeatures();
ASSERT_HOST(combiner_output.NumFeatures() == no + 1);
Resize(base_output, no);
int width = Width();
if (int_mode_) {
// Number of outputs from base and final result.
for (int t = 0; t < width; ++t) {
int8_t *out_line = i_[t];
const int8_t *base_line = base_output.i_[t];
const int8_t *comb_line = combiner_output.i_[t];
float base_weight = static_cast<float>(comb_line[no]) / INT8_MAX;
float boost_weight = 1.0f - base_weight;
for (int i = 0; i < no; ++i) {
out_line[i] = IntCastRounded(base_line[i] * base_weight + comb_line[i] * boost_weight);
}
}
} else {
for (int t = 0; t < width; ++t) {
float *out_line = f_[t];
const float *base_line = base_output.f_[t];
const float *comb_line = combiner_output.f_[t];
float base_weight = comb_line[no];
float boost_weight = 1.0f - base_weight;
for (int i = 0; i < no; ++i) {
out_line[i] = base_line[i] * base_weight + comb_line[i] * boost_weight;
}
}
}
}
// Computes deltas for a combiner that chooses between 2 sets of inputs.
void NetworkIO::ComputeCombinerDeltas(const NetworkIO &fwd_deltas, const NetworkIO &base_output) {
ASSERT_HOST(!int_mode_);
// Compute the deltas for the combiner.
int width = Width();
int no = NumFeatures() - 1;
ASSERT_HOST(fwd_deltas.NumFeatures() == no);
ASSERT_HOST(base_output.NumFeatures() == no);
// Number of outputs from base and final result.
for (int t = 0; t < width; ++t) {
const float *delta_line = fwd_deltas.f_[t];
const float *base_line = base_output.f_[t];
float *comb_line = f_[t];
float base_weight = comb_line[no];
float boost_weight = 1.0f - base_weight;
float max_base_delta = 0.0;
for (int i = 0; i < no; ++i) {
// What did the combiner actually produce?
float output = base_line[i] * base_weight + comb_line[i] * boost_weight;
// Reconstruct the target from the delta.
float comb_target = delta_line[i] + output;
comb_line[i] = comb_target - comb_line[i];
float base_delta = std::fabs(comb_target - base_line[i]);
if (base_delta > max_base_delta) {
max_base_delta = base_delta;
}
}
if (max_base_delta >= 0.5) {
// The base network got it wrong. The combiner should output the right
// answer and 0 for the base network.
comb_line[no] = 0.0 - base_weight;
} else {
// The base network was right. The combiner should flag that.
for (int i = 0; i < no; ++i) {
// All other targets are 0.
if (comb_line[i] > 0.0) {
comb_line[i] -= 1.0;
}
}
comb_line[no] = 1.0 - base_weight;
}
}
}
// Copies the array checking that the types match.
void NetworkIO::CopyAll(const NetworkIO &src) {
ASSERT_HOST(src.int_mode_ == int_mode_);
f_ = src.f_;
}
// Checks that both are floats and adds the src array to *this.
void NetworkIO::AddAllToFloat(const NetworkIO &src) {
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!src.int_mode_);
f_ += src.f_;
}
// Subtracts the array from a float array. src must also be float.
void NetworkIO::SubtractAllFromFloat(const NetworkIO &src) {
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!src.int_mode_);
f_ -= src.f_;
}
// Copies src to *this, with maxabs normalization to match scale.
void NetworkIO::CopyWithNormalization(const NetworkIO &src, const NetworkIO &scale) {
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!src.int_mode_);
ASSERT_HOST(!scale.int_mode_);
float src_max = src.f_.MaxAbs();
ASSERT_HOST(std::isfinite(src_max));
float scale_max = scale.f_.MaxAbs();
ASSERT_HOST(std::isfinite(scale_max));
if (src_max > 0.0f) {
float factor = scale_max / src_max;
for (int t = 0; t < src.Width(); ++t) {
const float *src_ptr = src.f_[t];
float *dest_ptr = f_[t];
for (int i = 0; i < src.f_.dim2(); ++i) {
dest_ptr[i] = src_ptr[i] * factor;
}
}
} else {
f_.Clear();
}
}
// Copies src to *this with independent reversal of the y dimension.
void NetworkIO::CopyWithYReversal(const NetworkIO &src) {
int num_features = src.NumFeatures();
Resize(src, num_features);
StrideMap::Index b_index(src.stride_map_);
do {
int width = b_index.MaxIndexOfDim(FD_WIDTH) + 1;
StrideMap::Index fwd_index(b_index);
StrideMap::Index rev_index(b_index);
rev_index.AddOffset(rev_index.MaxIndexOfDim(FD_HEIGHT), FD_HEIGHT);
do {
int fwd_t = fwd_index.t();
int rev_t = rev_index.t();
for (int x = 0; x < width; ++x) {
CopyTimeStepFrom(rev_t++, src, fwd_t++);
}
} while (fwd_index.AddOffset(1, FD_HEIGHT) && rev_index.AddOffset(-1, FD_HEIGHT));
} while (b_index.AddOffset(1, FD_BATCH));
}
// Copies src to *this with independent reversal of the x dimension.
void NetworkIO::CopyWithXReversal(const NetworkIO &src) {
int num_features = src.NumFeatures();
Resize(src, num_features);
StrideMap::Index b_index(src.stride_map_);
do {
StrideMap::Index y_index(b_index);
do {
StrideMap::Index fwd_index(y_index);
StrideMap::Index rev_index(y_index);
rev_index.AddOffset(rev_index.MaxIndexOfDim(FD_WIDTH), FD_WIDTH);
do {
CopyTimeStepFrom(rev_index.t(), src, fwd_index.t());
} while (fwd_index.AddOffset(1, FD_WIDTH) && rev_index.AddOffset(-1, FD_WIDTH));
} while (y_index.AddOffset(1, FD_HEIGHT));
} while (b_index.AddOffset(1, FD_BATCH));
}
// Copies src to *this with independent transpose of the x and y dimensions.
void NetworkIO::CopyWithXYTranspose(const NetworkIO &src) {
int num_features = src.NumFeatures();
stride_map_ = src.stride_map_;
stride_map_.TransposeXY();
ResizeToMap(src.int_mode(), stride_map_, num_features);
StrideMap::Index src_b_index(src.stride_map_);
StrideMap::Index dest_b_index(stride_map_);
do {
StrideMap::Index src_y_index(src_b_index);
StrideMap::Index dest_x_index(dest_b_index);
do {
StrideMap::Index src_x_index(src_y_index);
StrideMap::Index dest_y_index(dest_x_index);
do {
CopyTimeStepFrom(dest_y_index.t(), src, src_x_index.t());
} while (src_x_index.AddOffset(1, FD_WIDTH) && dest_y_index.AddOffset(1, FD_HEIGHT));
} while (src_y_index.AddOffset(1, FD_HEIGHT) && dest_x_index.AddOffset(1, FD_WIDTH));
} while (src_b_index.AddOffset(1, FD_BATCH) && dest_b_index.AddOffset(1, FD_BATCH));
}
// Copies src to *this, at the given feature_offset, returning the total
// feature offset after the copy. Multiple calls will stack outputs from
// multiple sources in feature space.
int NetworkIO::CopyPacking(const NetworkIO &src, int feature_offset) {
ASSERT_HOST(int_mode_ == src.int_mode_);
int width = src.Width();
ASSERT_HOST(width <= Width());
int num_features = src.NumFeatures();
ASSERT_HOST(num_features + feature_offset <= NumFeatures());
if (int_mode_) {
for (int t = 0; t < width; ++t) {
memcpy(i_[t] + feature_offset, src.i_[t], num_features * sizeof(i_[t][0]));
}
for (int t = width; t < i_.dim1(); ++t) {
memset(i_[t], 0, num_features * sizeof(i_[t][0]));
}
} else {
for (int t = 0; t < width; ++t) {
memcpy(f_[t] + feature_offset, src.f_[t], num_features * sizeof(f_[t][0]));
}
for (int t = width; t < f_.dim1(); ++t) {
memset(f_[t], 0, num_features * sizeof(f_[t][0]));
}
}
return num_features + feature_offset;
}
// Opposite of CopyPacking, fills *this with a part of src, starting at
// feature_offset, and picking num_features.
void NetworkIO::CopyUnpacking(const NetworkIO &src, int feature_offset, int num_features) {
Resize(src, num_features);
int width = src.Width();
ASSERT_HOST(num_features + feature_offset <= src.NumFeatures());
if (int_mode_) {
for (int t = 0; t < width; ++t) {
memcpy(i_[t], src.i_[t] + feature_offset, num_features * sizeof(i_[t][0]));
}
} else {
for (int t = 0; t < width; ++t) {
memcpy(f_[t], src.f_[t] + feature_offset, num_features * sizeof(f_[t][0]));
}
}
}
// Transposes the float part of *this into dest.
void NetworkIO::Transpose(TransposedArray *dest) const {
int width = Width();
dest->ResizeNoInit(NumFeatures(), width);
for (int t = 0; t < width; ++t) {
dest->WriteStrided(t, f_[t]);
}
}
// Clips the content of a single time-step to +/-range.
void NetworkIO::ClipVector(int t, float range) {
ASSERT_HOST(!int_mode_);
float *v = f_[t];
int dim = f_.dim2();
for (int i = 0; i < dim; ++i) {
v[i] = ClipToRange<float>(v[i], -range, range);
}
}
// Returns the padding required for the given number of features in order
// for the SIMD operations to be safe.
/* static */
int NetworkIO::GetPadding(int num_features) {
int padding = 0;
if (IntSimdMatrix::intSimdMatrix) {
padding = IntSimdMatrix::intSimdMatrix->RoundInputs(num_features) - num_features;
}
return padding;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/networkio.cpp
|
C++
|
apache-2.0
| 34,708
|
///////////////////////////////////////////////////////////////////////
// File: networkio.h
// Description: Network input/output data, allowing float/int implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_NETWORKIO_H_
#define TESSERACT_LSTM_NETWORKIO_H_
#include "helpers.h"
#include "image.h"
#include "static_shape.h"
#include "stridemap.h"
#include "weightmatrix.h"
#include <cmath>
#include <cstdio>
#include <vector>
struct Pix;
namespace tesseract {
// Class to contain all the input/output of a network, allowing for fixed or
// variable-strided 2d to 1d mapping, and float or int8_t values. Provides
// enough calculating functions to hide the detail of the implementation.
class TESS_API NetworkIO {
public:
NetworkIO() : int_mode_(false) {}
// Resizes the array (and stride), avoiding realloc if possible, to the given
// size from various size specs:
// Same stride size, but given number of features.
void Resize(const NetworkIO &src, int num_features) {
ResizeToMap(src.int_mode(), src.stride_map(), num_features);
}
// Resizes to a specific size as a 2-d temp buffer. No batches, no y-dim.
void Resize2d(bool int_mode, int width, int num_features);
// Resizes forcing a float representation with the stridemap of src and the
// given number of features.
void ResizeFloat(const NetworkIO &src, int num_features) {
ResizeToMap(false, src.stride_map(), num_features);
}
// Resizes to a specific stride_map.
void ResizeToMap(bool int_mode, const StrideMap &stride_map, int num_features);
// Shrinks image size by x_scale,y_scale, and use given number of features.
void ResizeScaled(const NetworkIO &src, int x_scale, int y_scale, int num_features);
// Resizes to just 1 x-coord, whatever the input.
void ResizeXTo1(const NetworkIO &src, int num_features);
// Initialize all the array to zero.
void Zero();
// Initializes to zero all elements of the array that do not correspond to
// valid image positions. (If a batch of different-sized images are packed
// together, then there will be padding pixels.)
void ZeroInvalidElements();
// Sets up the array from the given image, using the currently set int_mode_.
// If the image width doesn't match the shape, the image is truncated or
// padded with noise to match.
void FromPix(const StaticShape &shape, const Image pix, TRand *randomizer);
// Sets up the array from the given set of images, using the currently set
// int_mode_. If the image width doesn't match the shape, the images are
// truncated or padded with noise to match.
void FromPixes(const StaticShape &shape, const std::vector<Image> &pixes,
TRand *randomizer);
// Copies the given pix to *this at the given batch index, stretching and
// clipping the pixel values so that [black, black + 2*contrast] maps to the
// dynamic range of *this, ie [-1,1] for a float and (-127,127) for int.
// This is a 2-d operation in the sense that the output depth is the number
// of input channels, the height is the height of the image, and the width
// is the width of the image, or truncated/padded with noise if the width
// is a fixed size.
void Copy2DImage(int batch, Image pix, float black, float contrast, TRand *randomizer);
// Copies the given pix to *this at the given batch index, as Copy2DImage
// above, except that the output depth is the height of the input image, the
// output height is 1, and the output width as for Copy2DImage.
// The image is thus treated as a 1-d set of vertical pixel strips.
void Copy1DGreyImage(int batch, Image pix, float black, float contrast, TRand *randomizer);
// Helper stores the pixel value in i_ or f_ according to int_mode_.
// t: is the index from the StrideMap corresponding to the current
// [batch,y,x] position
// f: is the index into the depth/channel
// pixel: the value of the pixel from the image (in one channel)
// black: the pixel value to map to the lowest of the range of *this
// contrast: the range of pixel values to stretch to half the range of *this.
void SetPixel(int t, int f, int pixel, float black, float contrast);
// Converts the array to a Pix. Must be pixDestroyed after use.
Image ToPix() const;
// Prints the first and last num timesteps of the array for each feature.
void Print(int num) const;
// Returns the timestep width.
int Width() const {
return int_mode_ ? i_.dim1() : f_.dim1();
}
// Returns the number of features.
int NumFeatures() const {
return int_mode_ ? i_.dim2() : f_.dim2();
}
// Accessor to a timestep of the float matrix.
float *f(int t) {
ASSERT_HOST(!int_mode_);
return f_[t];
}
const float *f(int t) const {
ASSERT_HOST(!int_mode_);
return f_[t];
}
const int8_t *i(int t) const {
ASSERT_HOST(int_mode_);
return i_[t];
}
bool int_mode() const {
return int_mode_;
}
void set_int_mode(bool is_quantized) {
int_mode_ = is_quantized;
}
const StrideMap &stride_map() const {
return stride_map_;
}
void set_stride_map(const StrideMap &map) {
stride_map_ = map;
}
const GENERIC_2D_ARRAY<float> &float_array() const {
return f_;
}
GENERIC_2D_ARRAY<float> *mutable_float_array() {
return &f_;
}
// Copies a single time step from src.
void CopyTimeStepFrom(int dest_t, const NetworkIO &src, int src_t);
// Copies a part of single time step from src.
void CopyTimeStepGeneral(int dest_t, int dest_offset, int num_features, const NetworkIO &src,
int src_t, int src_offset);
// Zeroes a single time step.
void ZeroTimeStep(int t) {
if (int_mode_) {
memset(i_[t], 0, sizeof(*i_[t]) * NumFeatures());
} else {
memset(f_[t], 0, sizeof(*f_[t]) * NumFeatures());
}
}
// Sets the given range to random values.
void Randomize(int t, int offset, int num_features, TRand *randomizer);
// Helper returns the label and score of the best choice over a range.
int BestChoiceOverRange(int t_start, int t_end, int not_this, int null_ch, float *rating,
float *certainty) const;
// Helper returns the rating and certainty of the choice over a range in t.
void ScoresOverRange(int t_start, int t_end, int choice, int null_ch, float *rating,
float *certainty) const;
// Returns the index (label) of the best value at the given timestep,
// and if not null, sets the score to the log of the corresponding value.
int BestLabel(int t, float *score) const {
return BestLabel(t, -1, -1, score);
}
// Returns the index (label) of the best value at the given timestep,
// excluding not_this and not_that, and if not null, sets the score to the
// log of the corresponding value.
int BestLabel(int t, int not_this, int not_that, float *score) const;
// Returns the best start position out of range (into which both start and end
// must fit) to obtain the highest cumulative score for the given labels.
int PositionOfBestMatch(const std::vector<int> &labels, int start, int end) const;
// Returns the cumulative score of the given labels starting at start, and
// using one label per time-step.
TFloat ScoreOfLabels(const std::vector<int> &labels, int start) const;
// Helper function sets all the outputs for a single timestep, such that
// label has value ok_score, and the other labels share 1 - ok_score.
// Assumes float mode.
void SetActivations(int t, int label, float ok_score);
// Modifies the values, only if needed, so that the given label is
// the winner at the given time step t.
// Assumes float mode.
void EnsureBestLabel(int t, int label);
// Helper function converts prob to certainty taking the minimum into account.
static float ProbToCertainty(float prob);
// Returns true if there is any bad value that is suspiciously like a GT
// error. Assuming that *this is the difference(gradient) between target
// and forward output, returns true if there is a large negative value
// (correcting a very confident output) for which there is no corresponding
// positive value in an adjacent timestep for the same feature index. This
// allows the box-truthed samples to make fine adjustments to position while
// stopping other disagreements of confident output with ground truth.
bool AnySuspiciousTruth(float confidence_thr) const;
// Reads a single timestep to floats in the range [-1, 1].
void ReadTimeStep(int t, TFloat *output) const;
// Adds a single timestep to floats.
void AddTimeStep(int t, TFloat *inout) const;
// Adds part of a single timestep to floats.
void AddTimeStepPart(int t, int offset, int num_features, float *inout) const;
// Writes a single timestep from floats in the range [-1, 1].
void WriteTimeStep(int t, const TFloat *input);
// Writes a single timestep from floats in the range [-1, 1] writing only
// num_features elements of input to (*this)[t], starting at offset.
void WriteTimeStepPart(int t, int offset, int num_features, const TFloat *input);
// Maxpools a single time step from src.
void MaxpoolTimeStep(int dest_t, const NetworkIO &src, int src_t, int *max_line);
// Runs maxpool backward, using maxes to index timesteps in *this.
void MaxpoolBackward(const NetworkIO &fwd, const GENERIC_2D_ARRAY<int> &maxes);
// Returns the min over time of the maxes over features of the outputs.
float MinOfMaxes() const;
// Returns the min over time.
float Max() const {
return int_mode_ ? i_.Max() : f_.Max();
}
// Computes combined results for a combiner that chooses between an existing
// input and itself, with an additional output to indicate the choice.
void CombineOutputs(const NetworkIO &base_output, const NetworkIO &combiner_output);
// Computes deltas for a combiner that chooses between 2 sets of inputs.
void ComputeCombinerDeltas(const NetworkIO &fwd_deltas, const NetworkIO &base_output);
// Copies the array checking that the types match.
void CopyAll(const NetworkIO &src);
// Adds the array to a float array, with scaling to [-1, 1] if the src is int.
void AddAllToFloat(const NetworkIO &src);
// Subtracts the array from a float array. src must also be float.
void SubtractAllFromFloat(const NetworkIO &src);
// Copies src to *this, with maxabs normalization to match scale.
void CopyWithNormalization(const NetworkIO &src, const NetworkIO &scale);
// Multiplies the float data by the given factor.
void ScaleFloatBy(float factor) {
f_ *= factor;
}
// Copies src to *this with independent reversal of the y dimension.
void CopyWithYReversal(const NetworkIO &src);
// Copies src to *this with independent reversal of the x dimension.
void CopyWithXReversal(const NetworkIO &src);
// Copies src to *this with independent transpose of the x and y dimensions.
void CopyWithXYTranspose(const NetworkIO &src);
// Copies src to *this, at the given feature_offset, returning the total
// feature offset after the copy. Multiple calls will stack outputs from
// multiple sources in feature space.
int CopyPacking(const NetworkIO &src, int feature_offset);
// Opposite of CopyPacking, fills *this with a part of src, starting at
// feature_offset, and picking num_features. Resizes *this to match.
void CopyUnpacking(const NetworkIO &src, int feature_offset, int num_features);
// Transposes the float part of *this into dest.
void Transpose(TransposedArray *dest) const;
// Clips the content of a single time-step to +/-range.
void ClipVector(int t, float range);
// Applies Func to timestep t of *this (u) and multiplies the result by v
// component-wise, putting the product in *product.
// *this and v may be int or float, but must match. The outputs are TFloat.
template <class Func>
void FuncMultiply(const NetworkIO &v_io, int t, TFloat *product) {
Func f;
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!v_io.int_mode_);
int dim = f_.dim2();
if (int_mode_) {
const int8_t *u = i_[t];
const int8_t *v = v_io.i_[t];
for (int i = 0; i < dim; ++i) {
product[i] = f(u[i] / static_cast<TFloat>(INT8_MAX)) * v[i] / INT8_MAX;
}
} else {
const float *u = f_[t];
const float *v = v_io.f_[t];
for (int i = 0; i < dim; ++i) {
product[i] = f(u[i]) * v[i];
}
}
}
// Applies Func to *this (u) at u_t, and multiplies the result by v[v_t] * w,
// component-wise, putting the product in *product.
// All NetworkIOs are assumed to be float.
template <class Func>
void FuncMultiply3(int u_t, const NetworkIO &v_io, int v_t, const TFloat *w,
TFloat *product) const {
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!v_io.int_mode_);
Func f;
const float *u = f_[u_t];
const float *v = v_io.f_[v_t];
int dim = f_.dim2();
for (int i = 0; i < dim; ++i) {
product[i] = f(u[i]) * v[i] * w[i];
}
}
// Applies Func to *this (u) at u_t, and multiplies the result by v[v_t] * w,
// component-wise, adding the product to *product.
// All NetworkIOs are assumed to be float.
template <class Func>
void FuncMultiply3Add(const NetworkIO &v_io, int t, const TFloat *w, TFloat *product) const {
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!v_io.int_mode_);
Func f;
const float *u = f_[t];
const float *v = v_io.f_[t];
int dim = f_.dim2();
for (int i = 0; i < dim; ++i) {
product[i] += f(u[i]) * v[i] * w[i];
}
}
// Applies Func1 to *this (u), Func2 to v, and multiplies the result by w,
// component-wise, putting the product in product, all at timestep t, except
// w, which is a simple array. All NetworkIOs are assumed to be float.
template <class Func1, class Func2>
void Func2Multiply3(const NetworkIO &v_io, int t, const TFloat *w, TFloat *product) const {
ASSERT_HOST(!int_mode_);
ASSERT_HOST(!v_io.int_mode_);
Func1 f;
Func2 g;
const float *u = f_[t];
const float *v = v_io.f_[t];
int dim = f_.dim2();
for (int i = 0; i < dim; ++i) {
product[i] = f(u[i]) * g(v[i]) * w[i];
}
}
private:
// Returns the padding required for the given number of features in order
// for the SIMD operations to be safe.
static int GetPadding(int num_features);
// Choice of float vs 8 bit int for data.
GENERIC_2D_ARRAY<float> f_;
GENERIC_2D_ARRAY<int8_t> i_;
// Which of f_ and i_ are we actually using.
bool int_mode_;
// Stride for 2d input data.
StrideMap stride_map_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_NETWORKIO_H_
|
2301_81045437/tesseract
|
src/lstm/networkio.h
|
C++
|
apache-2.0
| 15,249
|
///////////////////////////////////////////////////////////////////////
// File: networkscratch.h
// Description: Scratch space for Network layers that hides distinction
// between float/int implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_NETWORKSCRATCH_H_
#define TESSERACT_LSTM_NETWORKSCRATCH_H_
#include <mutex>
#include "matrix.h"
#include "networkio.h"
namespace tesseract {
// Generic scratch space for network layers. Provides NetworkIO that can store
// a complete set (over time) of intermediates, and vector<float>
// scratch space that auto-frees after use. The aim here is to provide a set
// of temporary buffers to network layers that can be reused between layers
// and don't have to be reallocated on each call.
class NetworkScratch {
public:
NetworkScratch() : int_mode_(false) {}
~NetworkScratch() = default;
// Sets the network representation. If the representation is integer, then
// default (integer) NetworkIOs are separated from the always-float variety.
// This saves memory by having separate int-specific and float-specific
// stacks. If the network representation is float, then all NetworkIOs go
// to the float stack.
void set_int_mode(bool int_mode) {
int_mode_ = int_mode;
}
// Class that acts like a NetworkIO (by having an implicit cast operator),
// yet actually holds a pointer to NetworkIOs in the source NetworkScratch,
// and knows how to unstack the borrowed pointers on destruction.
class IO {
public:
// The NetworkIO should be sized after construction.
IO(const NetworkIO &src, NetworkScratch *scratch)
: int_mode_(scratch->int_mode_ && src.int_mode()), scratch_space_(scratch) {
network_io_ =
int_mode_ ? scratch_space_->int_stack_.Borrow() : scratch_space_->float_stack_.Borrow();
}
// Default constructor for arrays. Use one of the Resize functions
// below to initialize and size.
IO() : int_mode_(false), network_io_(nullptr), scratch_space_(nullptr) {}
~IO() {
if (scratch_space_ == nullptr) {
ASSERT_HOST(network_io_ == nullptr);
} else if (int_mode_) {
scratch_space_->int_stack_.Return(network_io_);
} else {
scratch_space_->float_stack_.Return(network_io_);
}
}
// Resizes the array (and stride), avoiding realloc if possible, to the
// size from various size specs:
// Same time size, given number of features.
void Resize(const NetworkIO &src, int num_features, NetworkScratch *scratch) {
if (scratch_space_ == nullptr) {
int_mode_ = scratch->int_mode_ && src.int_mode();
scratch_space_ = scratch;
network_io_ =
int_mode_ ? scratch_space_->int_stack_.Borrow() : scratch_space_->float_stack_.Borrow();
}
network_io_->Resize(src, num_features);
}
// Resizes to a specific size as a temp buffer. No batches, no y-dim.
void Resize2d(bool int_mode, int width, int num_features, NetworkScratch *scratch) {
if (scratch_space_ == nullptr) {
int_mode_ = scratch->int_mode_ && int_mode;
scratch_space_ = scratch;
network_io_ =
int_mode_ ? scratch_space_->int_stack_.Borrow() : scratch_space_->float_stack_.Borrow();
}
network_io_->Resize2d(int_mode, width, num_features);
}
// Resize forcing a float representation with the width of src and the given
// number of features.
void ResizeFloat(const NetworkIO &src, int num_features, NetworkScratch *scratch) {
if (scratch_space_ == nullptr) {
int_mode_ = false;
scratch_space_ = scratch;
network_io_ = scratch_space_->float_stack_.Borrow();
}
network_io_->ResizeFloat(src, num_features);
}
// Returns a ref to a NetworkIO that enables *this to be treated as if
// it were just a NetworkIO*.
NetworkIO &operator*() {
return *network_io_;
}
NetworkIO *operator->() {
return network_io_;
}
operator NetworkIO *() {
return network_io_;
}
private:
// True if this is from the always-float stack, otherwise the default stack.
bool int_mode_;
// The NetworkIO that we have borrowed from the scratch_space_.
NetworkIO *network_io_;
// The source scratch_space_. Borrowed pointer, used to free the
// NetworkIO. Don't delete!
NetworkScratch *scratch_space_;
}; // class IO.
// Class that acts like a fixed array of float, yet actually uses space
// from a vector<float> in the source NetworkScratch, and knows how
// to unstack the borrowed vector on destruction.
class FloatVec {
public:
// The array will have size elements in it, uninitialized.
FloatVec(int size, NetworkScratch *scratch) : vec_(nullptr), scratch_space_(scratch) {
Init(size, scratch);
}
// Default constructor is for arrays. Use Init to setup.
FloatVec() : vec_(nullptr), data_(nullptr), scratch_space_(nullptr) {}
~FloatVec() {
if (scratch_space_ != nullptr) {
scratch_space_->vec_stack_.Return(vec_);
}
}
void Init(int /*size*/, int reserve, NetworkScratch *scratch) {
if (scratch_space_ != nullptr && vec_ != nullptr) {
scratch_space_->vec_stack_.Return(vec_);
}
scratch_space_ = scratch;
vec_ = scratch_space_->vec_stack_.Borrow();
// TODO: optimize.
vec_->resize(reserve);
data_ = &(*vec_)[0];
}
void Init(int size, NetworkScratch *scratch) {
Init(size, size, scratch);
}
// Use the cast operator instead of operator[] so the FloatVec can be used
// as a TFloat* argument to a function call.
operator TFloat *() const {
return data_;
}
TFloat *get() {
return data_;
}
private:
// Vector borrowed from the scratch space. Use Return to free it.
std::vector<TFloat> *vec_;
// Short-cut pointer to the underlying array.
TFloat *data_;
// The source scratch_space_. Borrowed pointer, used to free the
// vector. Don't delete!
NetworkScratch *scratch_space_;
}; // class FloatVec
// Class that acts like a 2-D array of TFloat, yet actually uses space
// from the source NetworkScratch, and knows how to unstack the borrowed
// array on destruction.
class GradientStore {
public:
// Default constructor is for arrays. Use Init to setup.
GradientStore() : array_(nullptr), scratch_space_(nullptr) {}
~GradientStore() {
if (scratch_space_ != nullptr) {
scratch_space_->array_stack_.Return(array_);
}
}
void Init(int size1, int size2, NetworkScratch *scratch) {
if (scratch_space_ != nullptr && array_ != nullptr) {
scratch_space_->array_stack_.Return(array_);
}
scratch_space_ = scratch;
array_ = scratch_space_->array_stack_.Borrow();
array_->Resize(size1, size2, 0.0);
}
// Accessors to get to the underlying TransposedArray.
TransposedArray *get() const {
return array_;
}
const TransposedArray &operator*() const {
return *array_;
}
private:
// Array borrowed from the scratch space. Use Return to free it.
TransposedArray *array_;
// The source scratch_space_. Borrowed pointer, used to free the
// vector. Don't delete!
NetworkScratch *scratch_space_;
}; // class GradientStore
// Class that does the work of holding a stack of objects, a stack pointer
// and a vector of in-use flags, so objects can be returned out of order.
// It is safe to attempt to Borrow/Return in multiple threads.
template <typename T>
class Stack {
public:
Stack() = default;
~Stack() {
for (auto data : stack_) {
delete data;
}
}
// Lends out the next free item, creating one if none available, sets
// the used flags and increments the stack top.
T *Borrow() {
std::lock_guard<std::mutex> lock(mutex_);
if (stack_top_ == stack_.size()) {
stack_.push_back(new T);
flags_.push_back(false);
}
flags_[stack_top_] = true;
return stack_[stack_top_++];
}
// Takes back the given item, and marks it free. Item does not have to be
// the most recently lent out, but free slots don't get re-used until the
// blocking item is returned. The assumption is that there will only be
// small, temporary variations from true stack use. (Determined by the order
// of destructors within a local scope.)
void Return(T *item) {
std::lock_guard<std::mutex> lock(mutex_);
// Linear search will do.
int index = stack_top_;
while (--index >= 0 && stack_[index] != item) {
}
if (index >= 0) {
flags_[index] = false;
}
while (stack_top_ > 0 && !flags_[stack_top_ - 1]) {
--stack_top_;
}
}
private:
std::vector<T *> stack_;
std::vector<bool> flags_;
unsigned stack_top_ = 0;
std::mutex mutex_;
}; // class Stack.
private:
// If true, the network weights are int8_t, if false, float.
bool int_mode_;
// Stacks of NetworkIO and vector<float>. Once allocated, they are not
// deleted until the NetworkScratch is deleted.
Stack<NetworkIO> int_stack_;
Stack<NetworkIO> float_stack_;
Stack<std::vector<TFloat>> vec_stack_;
Stack<TransposedArray> array_stack_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_NETWORKSCRATCH_H_
|
2301_81045437/tesseract
|
src/lstm/networkscratch.h
|
C++
|
apache-2.0
| 10,062
|
/////////////////////////////////////////////////////////////////////////
// File: parallel.cpp
// Description: Runs networks in parallel on the same input.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "parallel.h"
#ifdef _OPENMP
# include <omp.h>
#endif
#include "functions.h" // For conditional undef of _OPENMP.
#include "networkscratch.h"
namespace tesseract {
// ni_ and no_ will be set by AddToStack.
Parallel::Parallel(const std::string &name, NetworkType type) : Plumbing(name) {
type_ = type;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape Parallel::OutputShape(const StaticShape &input_shape) const {
StaticShape result = stack_[0]->OutputShape(input_shape);
int stack_size = stack_.size();
for (int i = 1; i < stack_size; ++i) {
StaticShape shape = stack_[i]->OutputShape(input_shape);
result.set_depth(result.depth() + shape.depth());
}
return result;
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void Parallel::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
bool parallel_debug = false;
// If this parallel is a replicator of convolvers, or holds a 1-d LSTM pair,
// or a 2-d LSTM quad, do debug locally, and don't pass the flag on.
if (debug && type_ != NT_PARALLEL) {
parallel_debug = true;
debug = false;
}
int stack_size = stack_.size();
if (type_ == NT_PAR_2D_LSTM) {
// Special case, run parallel in parallel.
std::vector<NetworkScratch::IO> results(stack_size);
for (int i = 0; i < stack_size; ++i) {
results[i].Resize(input, stack_[i]->NumOutputs(), scratch);
}
#ifdef _OPENMP
# pragma omp parallel for num_threads(stack_size)
#endif
for (int i = 0; i < stack_size; ++i) {
stack_[i]->Forward(debug, input, nullptr, scratch, results[i]);
}
// Now pack all the results (serially) into the output.
int out_offset = 0;
output->Resize(*results[0], NumOutputs());
for (int i = 0; i < stack_size; ++i) {
out_offset = output->CopyPacking(*results[i], out_offset);
}
} else {
// Revolving intermediate result.
NetworkScratch::IO result(input, scratch);
// Source for divided replicated.
NetworkScratch::IO source_part;
TransposedArray *src_transpose = nullptr;
if (IsTraining() && type_ == NT_REPLICATED) {
// Make a transposed copy of the input.
input.Transpose(&transposed_input_);
src_transpose = &transposed_input_;
}
// Run each network, putting the outputs into result.
int out_offset = 0;
for (int i = 0; i < stack_size; ++i) {
stack_[i]->Forward(debug, input, src_transpose, scratch, result);
// All networks must have the same output width
if (i == 0) {
output->Resize(*result, NumOutputs());
} else {
ASSERT_HOST(result->Width() == output->Width());
}
out_offset = output->CopyPacking(*result, out_offset);
}
}
#ifndef GRAPHICS_DISABLED
if (parallel_debug) {
DisplayForward(*output);
}
#endif
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Parallel::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
// If this parallel is a replicator of convolvers, or holds a 1-d LSTM pair,
// or a 2-d LSTM quad, do debug locally, and don't pass the flag on.
if (debug && type_ != NT_PARALLEL) {
#ifndef GRAPHICS_DISABLED
DisplayBackward(fwd_deltas);
#endif
debug = false;
}
auto stack_size = stack_.size();
if (type_ == NT_PAR_2D_LSTM) {
// Special case, run parallel in parallel.
std::vector<NetworkScratch::IO> in_deltas(stack_size);
std::vector<NetworkScratch::IO> out_deltas(stack_size);
// Split the forward deltas for each stack element.
int feature_offset = 0;
for (unsigned i = 0; i < stack_.size(); ++i) {
int num_features = stack_[i]->NumOutputs();
in_deltas[i].Resize(fwd_deltas, num_features, scratch);
out_deltas[i].Resize(fwd_deltas, stack_[i]->NumInputs(), scratch);
in_deltas[i]->CopyUnpacking(fwd_deltas, feature_offset, num_features);
feature_offset += num_features;
}
#ifdef _OPENMP
# pragma omp parallel for num_threads(stack_size)
#endif
for (unsigned i = 0; i < stack_size; ++i) {
stack_[i]->Backward(debug, *in_deltas[i], scratch, i == 0 ? back_deltas : out_deltas[i]);
}
if (needs_to_backprop_) {
for (unsigned i = 1; i < stack_size; ++i) {
back_deltas->AddAllToFloat(*out_deltas[i]);
}
}
} else {
// Revolving partial deltas.
NetworkScratch::IO in_deltas(fwd_deltas, scratch);
// The sum of deltas from different sources, which will eventually go into
// back_deltas.
NetworkScratch::IO out_deltas;
int feature_offset = 0;
for (unsigned i = 0; i < stack_.size(); ++i) {
int num_features = stack_[i]->NumOutputs();
in_deltas->CopyUnpacking(fwd_deltas, feature_offset, num_features);
feature_offset += num_features;
if (stack_[i]->Backward(debug, *in_deltas, scratch, back_deltas)) {
if (i == 0) {
out_deltas.ResizeFloat(*back_deltas, back_deltas->NumFeatures(), scratch);
out_deltas->CopyAll(*back_deltas);
} else if (back_deltas->NumFeatures() == out_deltas->NumFeatures()) {
// Widths are allowed to be different going back, as we may have
// input nets, so only accumulate the deltas if the widths are the
// same.
out_deltas->AddAllToFloat(*back_deltas);
}
}
}
if (needs_to_backprop_) {
back_deltas->CopyAll(*out_deltas);
}
}
if (needs_to_backprop_) {
back_deltas->ScaleFloatBy(1.0f / stack_size);
}
return needs_to_backprop_;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/parallel.cpp
|
C++
|
apache-2.0
| 6,758
|
///////////////////////////////////////////////////////////////////////
// File: parallel.h
// Description: Runs networks in parallel on the same input.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_PARALLEL_H_
#define TESSERACT_LSTM_PARALLEL_H_
#include "plumbing.h"
namespace tesseract {
// Runs multiple networks in parallel, interlacing their outputs.
class Parallel : public Plumbing {
public:
// ni_ and no_ will be set by AddToStack.
TESS_API
Parallel(const std::string &name, NetworkType type);
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override;
std::string spec() const override {
std::string spec;
if (type_ == NT_PAR_2D_LSTM) {
// We have 4 LSTMs operating in parallel here, so the size of each is
// the number of outputs/4.
spec += "L2xy" + std::to_string(no_ / 4);
} else if (type_ == NT_PAR_RL_LSTM) {
// We have 2 LSTMs operating in parallel here, so the size of each is
// the number of outputs/2.
if (stack_[0]->type() == NT_LSTM_SUMMARY) {
spec += "Lbxs" + std::to_string(no_ / 2);
} else {
spec += "Lbx" + std::to_string(no_ / 2);
}
} else {
if (type_ == NT_REPLICATED) {
spec += "R" + std::to_string(stack_.size()) + "(" + stack_[0]->spec();
} else {
for (auto &it : stack_) {
spec += it->spec();
}
}
spec += ")";
}
return spec;
}
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
private:
// If *this is a NT_REPLICATED, then it feeds a replicated network with
// identical inputs, and it would be extremely wasteful for them to each
// calculate and store the same transpose of the inputs, so Parallel does it
// and passes a pointer to the replicated network, allowing it to use the
// transpose on the next call to Backward.
TransposedArray transposed_input_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_PARALLEL_H_
|
2301_81045437/tesseract
|
src/lstm/parallel.h
|
C++
|
apache-2.0
| 3,222
|
///////////////////////////////////////////////////////////////////////
// File: plumbing.cpp
// Description: Base class for networks that organize other networks
// eg series or parallel.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "plumbing.h"
namespace tesseract {
// ni_ and no_ will be set by AddToStack.
Plumbing::Plumbing(const std::string &name) : Network(NT_PARALLEL, name, 0, 0) {}
// Suspends/Enables training by setting the training_ flag. Serialize and
// DeSerialize only operate on the run-time data if state is false.
void Plumbing::SetEnableTraining(TrainingState state) {
Network::SetEnableTraining(state);
for (auto &i : stack_) {
i->SetEnableTraining(state);
}
}
// Sets flags that control the action of the network. See NetworkFlags enum
// for bit values.
void Plumbing::SetNetworkFlags(uint32_t flags) {
Network::SetNetworkFlags(flags);
for (auto &i : stack_) {
i->SetNetworkFlags(flags);
}
}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
// Returns the number of weights initialized.
int Plumbing::InitWeights(float range, TRand *randomizer) {
num_weights_ = 0;
for (auto &i : stack_) {
num_weights_ += i->InitWeights(range, randomizer);
}
return num_weights_;
}
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int Plumbing::RemapOutputs(int old_no, const std::vector<int> &code_map) {
num_weights_ = 0;
for (auto &i : stack_) {
num_weights_ += i->RemapOutputs(old_no, code_map);
}
return num_weights_;
}
// Converts a float network to an int network.
void Plumbing::ConvertToInt() {
for (auto &i : stack_) {
i->ConvertToInt();
}
}
// Provides a pointer to a TRand for any networks that care to use it.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
void Plumbing::SetRandomizer(TRand *randomizer) {
for (auto &i : stack_) {
i->SetRandomizer(randomizer);
}
}
// Adds the given network to the stack.
void Plumbing::AddToStack(Network *network) {
if (stack_.empty()) {
ni_ = network->NumInputs();
no_ = network->NumOutputs();
} else if (type_ == NT_SERIES) {
// ni is input of first, no output of last, others match output to input.
ASSERT_HOST(no_ == network->NumInputs());
no_ = network->NumOutputs();
} else {
// All parallel types. Output is sum of outputs, inputs all match.
ASSERT_HOST(ni_ == network->NumInputs());
no_ += network->NumOutputs();
}
stack_.push_back(network);
}
// Sets needs_to_backprop_ to needs_backprop and calls on sub-network
// according to needs_backprop || any weights in this network.
bool Plumbing::SetupNeedsBackprop(bool needs_backprop) {
if (IsTraining()) {
needs_to_backprop_ = needs_backprop;
bool retval = needs_backprop;
for (auto &i : stack_) {
if (i->SetupNeedsBackprop(needs_backprop)) {
retval = true;
}
}
return retval;
}
// Frozen networks don't do backprop.
needs_to_backprop_ = false;
return false;
}
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int Plumbing::XScaleFactor() const {
return stack_[0]->XScaleFactor();
}
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
void Plumbing::CacheXScaleFactor(int factor) {
for (auto &i : stack_) {
i->CacheXScaleFactor(factor);
}
}
// Provides debug output on the weights.
void Plumbing::DebugWeights() {
for (auto &i : stack_) {
i->DebugWeights();
}
}
// Returns a set of strings representing the layer-ids of all layers below.
void Plumbing::EnumerateLayers(const std::string *prefix, std::vector<std::string> &layers) const {
for (size_t i = 0; i < stack_.size(); ++i) {
std::string layer_name;
if (prefix) {
layer_name = *prefix;
}
layer_name += ":" + std::to_string(i);
if (stack_[i]->IsPlumbingType()) {
auto *plumbing = static_cast<Plumbing *>(stack_[i]);
plumbing->EnumerateLayers(&layer_name, layers);
} else {
layers.push_back(layer_name);
}
}
}
// Returns a pointer to the network layer corresponding to the given id.
Network *Plumbing::GetLayer(const char *id) const {
char *next_id;
int index = strtol(id, &next_id, 10);
if (index < 0 || static_cast<unsigned>(index) >= stack_.size()) {
return nullptr;
}
if (stack_[index]->IsPlumbingType()) {
auto *plumbing = static_cast<Plumbing *>(stack_[index]);
ASSERT_HOST(*next_id == ':');
return plumbing->GetLayer(next_id + 1);
}
return stack_[index];
}
// Returns a pointer to the learning rate for the given layer id.
float *Plumbing::LayerLearningRatePtr(const char *id) {
char *next_id;
int index = strtol(id, &next_id, 10);
if (index < 0 || static_cast<unsigned>(index) >= stack_.size()) {
return nullptr;
}
if (stack_[index]->IsPlumbingType()) {
auto *plumbing = static_cast<Plumbing *>(stack_[index]);
ASSERT_HOST(*next_id == ':');
return plumbing->LayerLearningRatePtr(next_id + 1);
}
if (static_cast<unsigned>(index) >= learning_rates_.size()) {
return nullptr;
}
return &learning_rates_[index];
}
// Writes to the given file. Returns false in case of error.
bool Plumbing::Serialize(TFile *fp) const {
if (!Network::Serialize(fp)) {
return false;
}
uint32_t size = stack_.size();
// Can't use PointerVector::Serialize here as we need a special DeSerialize.
if (!fp->Serialize(&size)) {
return false;
}
for (uint32_t i = 0; i < size; ++i) {
if (!stack_[i]->Serialize(fp)) {
return false;
}
}
if ((network_flags_ & NF_LAYER_SPECIFIC_LR) && !fp->Serialize(learning_rates_)) {
return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
bool Plumbing::DeSerialize(TFile *fp) {
for (auto data : stack_) {
delete data;
}
stack_.clear();
no_ = 0; // We will be modifying this as we AddToStack.
uint32_t size;
if (!fp->DeSerialize(&size)) {
return false;
}
for (uint32_t i = 0; i < size; ++i) {
Network *network = CreateFromFile(fp);
if (network == nullptr) {
return false;
}
AddToStack(network);
}
if ((network_flags_ & NF_LAYER_SPECIFIC_LR) && !fp->DeSerialize(learning_rates_)) {
return false;
}
return true;
}
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
void Plumbing::Update(float learning_rate, float momentum, float adam_beta, int num_samples) {
for (size_t i = 0; i < stack_.size(); ++i) {
if (network_flags_ & NF_LAYER_SPECIFIC_LR) {
if (i < learning_rates_.size()) {
learning_rate = learning_rates_[i];
} else {
learning_rates_.push_back(learning_rate);
}
}
if (stack_[i]->IsTraining()) {
stack_[i]->Update(learning_rate, momentum, adam_beta, num_samples);
}
}
}
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void Plumbing::CountAlternators(const Network &other, TFloat *same, TFloat *changed) const {
ASSERT_HOST(other.type() == type_);
const auto *plumbing = static_cast<const Plumbing *>(&other);
ASSERT_HOST(plumbing->stack_.size() == stack_.size());
for (size_t i = 0; i < stack_.size(); ++i) {
stack_[i]->CountAlternators(*plumbing->stack_[i], same, changed);
}
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/plumbing.cpp
|
C++
|
apache-2.0
| 8,866
|
///////////////////////////////////////////////////////////////////////
// File: plumbing.h
// Description: Base class for networks that organize other networks
// eg series or parallel.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_PLUMBING_H_
#define TESSERACT_LSTM_PLUMBING_H_
#include "matrix.h"
#include "network.h"
namespace tesseract {
// Holds a collection of other networks and forwards calls to each of them.
class TESS_API Plumbing : public Network {
public:
// ni_ and no_ will be set by AddToStack.
explicit Plumbing(const std::string &name);
~Plumbing() override {
for (auto data : stack_) {
delete data;
}
}
// Returns the required shape input to the network.
StaticShape InputShape() const override {
return stack_[0]->InputShape();
}
std::string spec() const override {
return "Sub-classes of Plumbing must implement spec()!";
}
// Returns true if the given type is derived from Plumbing, and thus contains
// multiple sub-networks that can have their own learning rate.
bool IsPlumbingType() const override {
return true;
}
// Suspends/Enables training by setting the training_ flag. Serialize and
// DeSerialize only operate on the run-time data if state is false.
void SetEnableTraining(TrainingState state) override;
// Sets flags that control the action of the network. See NetworkFlags enum
// for bit values.
void SetNetworkFlags(uint32_t flags) override;
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
// Returns the number of weights initialized.
int InitWeights(float range, TRand *randomizer) override;
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int RemapOutputs(int old_no, const std::vector<int> &code_map) override;
// Converts a float network to an int network.
void ConvertToInt() override;
// Provides a pointer to a TRand for any networks that care to use it.
// Note that randomizer is a borrowed pointer that should outlive the network
// and should not be deleted by any of the networks.
void SetRandomizer(TRand *randomizer) override;
// Adds the given network to the stack.
virtual void AddToStack(Network *network);
// Sets needs_to_backprop_ to needs_backprop and returns true if
// needs_backprop || any weights in this network so the next layer forward
// can be told to produce backprop for this layer if needed.
bool SetupNeedsBackprop(bool needs_backprop) override;
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int XScaleFactor() const override;
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
void CacheXScaleFactor(int factor) override;
// Provides debug output on the weights.
void DebugWeights() override;
// Returns the current stack.
const std::vector<Network *> &stack() const {
return stack_;
}
// Returns a set of strings representing the layer-ids of all layers below.
void EnumerateLayers(const std::string *prefix, std::vector<std::string> &layers) const;
// Returns a pointer to the network layer corresponding to the given id.
Network *GetLayer(const char *id) const;
// Returns the learning rate for a specific layer of the stack.
float LayerLearningRate(const char *id) {
const float *lr_ptr = LayerLearningRatePtr(id);
ASSERT_HOST(lr_ptr != nullptr);
return *lr_ptr;
}
// Scales the learning rate for a specific layer of the stack.
void ScaleLayerLearningRate(const char *id, double factor) {
float *lr_ptr = LayerLearningRatePtr(id);
ASSERT_HOST(lr_ptr != nullptr);
*lr_ptr *= factor;
}
// Set the learning rate for a specific layer of the stack to the given value.
void SetLayerLearningRate(const char *id, float learning_rate) {
float *lr_ptr = LayerLearningRatePtr(id);
ASSERT_HOST(lr_ptr != nullptr);
*lr_ptr = learning_rate;
}
// Returns a pointer to the learning rate for the given layer id.
float *LayerLearningRatePtr(const char *id);
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the adam computation iff use_adam_ is true.
void Update(float learning_rate, float momentum, float adam_beta, int num_samples) override;
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void CountAlternators(const Network &other, TFloat *same, TFloat *changed) const override;
protected:
// The networks.
std::vector<Network *> stack_;
// Layer-specific learning rate iff network_flags_ & NF_LAYER_SPECIFIC_LR.
// One element for each element of stack_.
std::vector<float> learning_rates_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_PLUMBING_H_
|
2301_81045437/tesseract
|
src/lstm/plumbing.h
|
C++
|
apache-2.0
| 6,421
|
///////////////////////////////////////////////////////////////////////
// File: recodebeam.cpp
// Description: Beam search to decode from the re-encoded CJK as a sequence of
// smaller numbers in place of a single large code.
// Author: Ray Smith
//
// (C) Copyright 2015, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "recodebeam.h"
#include "networkio.h"
#include "pageres.h"
#include "unicharcompress.h"
#include <algorithm> // for std::reverse
namespace tesseract {
// The beam width at each code position.
const int RecodeBeamSearch::kBeamWidths[RecodedCharID::kMaxCodeLen + 1] = {
5, 10, 16, 16, 16, 16, 16, 16, 16, 16,
};
static const char *kNodeContNames[] = {"Anything", "OnlyDup", "NoDup"};
// Prints debug details of the node.
void RecodeNode::Print(int null_char, const UNICHARSET &unicharset,
int depth) const {
if (code == null_char) {
tprintf("null_char");
} else {
tprintf("label=%d, uid=%d=%s", code, unichar_id,
unicharset.debug_str(unichar_id).c_str());
}
tprintf(" score=%g, c=%g,%s%s%s perm=%d, hash=%" PRIx64, score, certainty,
start_of_dawg ? " DawgStart" : "", start_of_word ? " Start" : "",
end_of_word ? " End" : "", permuter, code_hash);
if (depth > 0 && prev != nullptr) {
tprintf(" prev:");
prev->Print(null_char, unicharset, depth - 1);
} else {
tprintf("\n");
}
}
// Borrows the pointer, which is expected to survive until *this is deleted.
RecodeBeamSearch::RecodeBeamSearch(const UnicharCompress &recoder,
int null_char, bool simple_text, Dict *dict)
: recoder_(recoder),
beam_size_(0),
top_code_(-1),
second_code_(-1),
dict_(dict),
space_delimited_(true),
is_simple_text_(simple_text),
null_char_(null_char) {
if (dict_ != nullptr && !dict_->IsSpaceDelimitedLang()) {
space_delimited_ = false;
}
}
RecodeBeamSearch::~RecodeBeamSearch() {
for (auto data : beam_) {
delete data;
}
for (auto data : secondary_beam_) {
delete data;
}
}
// Decodes the set of network outputs, storing the lattice internally.
void RecodeBeamSearch::Decode(const NetworkIO &output, double dict_ratio,
double cert_offset, double worst_dict_cert,
const UNICHARSET *charset, int lstm_choice_mode) {
beam_size_ = 0;
int width = output.Width();
if (lstm_choice_mode) {
timesteps.clear();
}
for (int t = 0; t < width; ++t) {
ComputeTopN(output.f(t), output.NumFeatures(), kBeamWidths[0]);
DecodeStep(output.f(t), t, dict_ratio, cert_offset, worst_dict_cert,
charset);
if (lstm_choice_mode) {
SaveMostCertainChoices(output.f(t), output.NumFeatures(), charset, t);
}
}
}
void RecodeBeamSearch::Decode(const GENERIC_2D_ARRAY<float> &output,
double dict_ratio, double cert_offset,
double worst_dict_cert,
const UNICHARSET *charset) {
beam_size_ = 0;
int width = output.dim1();
for (int t = 0; t < width; ++t) {
ComputeTopN(output[t], output.dim2(), kBeamWidths[0]);
DecodeStep(output[t], t, dict_ratio, cert_offset, worst_dict_cert, charset);
}
}
void RecodeBeamSearch::DecodeSecondaryBeams(
const NetworkIO &output, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset, int lstm_choice_mode) {
for (auto data : secondary_beam_) {
delete data;
}
secondary_beam_.clear();
if (character_boundaries_.size() < 2) {
return;
}
int width = output.Width();
unsigned bucketNumber = 0;
for (int t = 0; t < width; ++t) {
while ((bucketNumber + 1) < character_boundaries_.size() &&
t >= character_boundaries_[bucketNumber + 1]) {
++bucketNumber;
}
ComputeSecTopN(&(excludedUnichars)[bucketNumber], output.f(t),
output.NumFeatures(), kBeamWidths[0]);
DecodeSecondaryStep(output.f(t), t, dict_ratio, cert_offset,
worst_dict_cert, charset);
}
}
void RecodeBeamSearch::SaveMostCertainChoices(const float *outputs,
int num_outputs,
const UNICHARSET *charset,
int xCoord) {
std::vector<std::pair<const char *, float>> choices;
for (int i = 0; i < num_outputs; ++i) {
if (outputs[i] >= 0.01f) {
const char *character;
if (i + 2 >= num_outputs) {
character = "";
} else if (i > 0) {
character = charset->id_to_unichar_ext(i + 2);
} else {
character = charset->id_to_unichar_ext(i);
}
size_t pos = 0;
// order the possible choices within one timestep
// beginning with the most likely
while (choices.size() > pos && choices[pos].second > outputs[i]) {
pos++;
}
choices.insert(choices.begin() + pos,
std::pair<const char *, float>(character, outputs[i]));
}
}
timesteps.push_back(choices);
}
void RecodeBeamSearch::segmentTimestepsByCharacters() {
for (unsigned i = 1; i < character_boundaries_.size(); ++i) {
std::vector<std::vector<std::pair<const char *, float>>> segment;
for (int j = character_boundaries_[i - 1]; j < character_boundaries_[i];
++j) {
segment.push_back(timesteps[j]);
}
segmentedTimesteps.push_back(segment);
}
}
std::vector<std::vector<std::pair<const char *, float>>>
RecodeBeamSearch::combineSegmentedTimesteps(
std::vector<std::vector<std::vector<std::pair<const char *, float>>>>
*segmentedTimesteps) {
std::vector<std::vector<std::pair<const char *, float>>> combined_timesteps;
for (auto &segmentedTimestep : *segmentedTimesteps) {
for (auto &j : segmentedTimestep) {
combined_timesteps.push_back(j);
}
}
return combined_timesteps;
}
void RecodeBeamSearch::calculateCharBoundaries(std::vector<int> *starts,
std::vector<int> *ends,
std::vector<int> *char_bounds_,
int maxWidth) {
char_bounds_->push_back(0);
for (unsigned i = 0; i < ends->size(); ++i) {
int middle = ((*starts)[i + 1] - (*ends)[i]) / 2;
char_bounds_->push_back((*ends)[i] + middle);
}
char_bounds_->pop_back();
char_bounds_->push_back(maxWidth);
}
// Returns the best path as labels/scores/xcoords similar to simple CTC.
void RecodeBeamSearch::ExtractBestPathAsLabels(
std::vector<int> *labels, std::vector<int> *xcoords) const {
labels->clear();
xcoords->clear();
std::vector<const RecodeNode *> best_nodes;
ExtractBestPaths(&best_nodes, nullptr);
// Now just run CTC on the best nodes.
int t = 0;
int width = best_nodes.size();
while (t < width) {
int label = best_nodes[t]->code;
if (label != null_char_) {
labels->push_back(label);
xcoords->push_back(t);
}
while (++t < width && !is_simple_text_ && best_nodes[t]->code == label) {
}
}
xcoords->push_back(width);
}
// Returns the best path as unichar-ids/certs/ratings/xcoords skipping
// duplicates, nulls and intermediate parts.
void RecodeBeamSearch::ExtractBestPathAsUnicharIds(
bool debug, const UNICHARSET *unicharset, std::vector<int> *unichar_ids,
std::vector<float> *certs, std::vector<float> *ratings,
std::vector<int> *xcoords) const {
std::vector<const RecodeNode *> best_nodes;
ExtractBestPaths(&best_nodes, nullptr);
ExtractPathAsUnicharIds(best_nodes, unichar_ids, certs, ratings, xcoords);
if (debug) {
DebugPath(unicharset, best_nodes);
DebugUnicharPath(unicharset, best_nodes, *unichar_ids, *certs, *ratings,
*xcoords);
}
}
// Returns the best path as a set of WERD_RES.
void RecodeBeamSearch::ExtractBestPathAsWords(const TBOX &line_box,
float scale_factor, bool debug,
const UNICHARSET *unicharset,
PointerVector<WERD_RES> *words,
int lstm_choice_mode) {
words->truncate(0);
std::vector<int> unichar_ids;
std::vector<float> certs;
std::vector<float> ratings;
std::vector<int> xcoords;
std::vector<const RecodeNode *> best_nodes;
std::vector<const RecodeNode *> second_nodes;
character_boundaries_.clear();
ExtractBestPaths(&best_nodes, &second_nodes);
if (debug) {
DebugPath(unicharset, best_nodes);
ExtractPathAsUnicharIds(second_nodes, &unichar_ids, &certs, &ratings,
&xcoords);
tprintf("\nSecond choice path:\n");
DebugUnicharPath(unicharset, second_nodes, unichar_ids, certs, ratings,
xcoords);
}
// If lstm choice mode is required in granularity level 2, it stores the x
// Coordinates of every chosen character, to match the alternative choices to
// it.
ExtractPathAsUnicharIds(best_nodes, &unichar_ids, &certs, &ratings, &xcoords,
&character_boundaries_);
int num_ids = unichar_ids.size();
if (debug) {
DebugUnicharPath(unicharset, best_nodes, unichar_ids, certs, ratings,
xcoords);
}
// Convert labels to unichar-ids.
int word_end = 0;
float prev_space_cert = 0.0f;
for (int word_start = 0; word_start < num_ids; word_start = word_end) {
for (word_end = word_start + 1; word_end < num_ids; ++word_end) {
// A word is terminated when a space character or start_of_word flag is
// hit. We also want to force a separate word for every non
// space-delimited character when not in a dictionary context.
if (unichar_ids[word_end] == UNICHAR_SPACE) {
break;
}
int index = xcoords[word_end];
if (best_nodes[index]->start_of_word) {
break;
}
if (best_nodes[index]->permuter == TOP_CHOICE_PERM &&
(!unicharset->IsSpaceDelimited(unichar_ids[word_end]) ||
!unicharset->IsSpaceDelimited(unichar_ids[word_end - 1]))) {
break;
}
}
float space_cert = 0.0f;
if (word_end < num_ids && unichar_ids[word_end] == UNICHAR_SPACE) {
space_cert = certs[word_end];
}
bool leading_space =
word_start > 0 && unichar_ids[word_start - 1] == UNICHAR_SPACE;
// Create a WERD_RES for the output word.
WERD_RES *word_res =
InitializeWord(leading_space, line_box, word_start, word_end,
std::min(space_cert, prev_space_cert), unicharset,
xcoords, scale_factor);
for (int i = word_start; i < word_end; ++i) {
auto *choices = new BLOB_CHOICE_LIST;
BLOB_CHOICE_IT bc_it(choices);
auto *choice = new BLOB_CHOICE(unichar_ids[i], ratings[i], certs[i], -1,
1.0f, static_cast<float>(INT16_MAX), 0.0f,
BCC_STATIC_CLASSIFIER);
int col = i - word_start;
choice->set_matrix_cell(col, col);
bc_it.add_after_then_move(choice);
word_res->ratings->put(col, col, choices);
}
int index = xcoords[word_end - 1];
word_res->FakeWordFromRatings(best_nodes[index]->permuter);
words->push_back(word_res);
prev_space_cert = space_cert;
if (word_end < num_ids && unichar_ids[word_end] == UNICHAR_SPACE) {
++word_end;
}
}
}
struct greater_than {
inline bool operator()(const RecodeNode *&node1, const RecodeNode *&node2) const {
return (node1->score > node2->score);
}
};
void RecodeBeamSearch::PrintBeam2(bool uids, int num_outputs,
const UNICHARSET *charset,
bool secondary) const {
std::vector<std::vector<const RecodeNode *>> topology;
std::unordered_set<const RecodeNode *> visited;
const std::vector<RecodeBeam *> &beam = !secondary ? beam_ : secondary_beam_;
// create the topology
for (int step = beam.size() - 1; step >= 0; --step) {
std::vector<const RecodeNode *> layer;
topology.push_back(layer);
}
// fill the topology with depths first
for (int step = beam.size() - 1; step >= 0; --step) {
std::vector<tesseract::RecodePair> &heaps = beam.at(step)->beams_->heap();
for (auto &&node : heaps) {
int backtracker = 0;
const RecodeNode *curr = &node.data();
while (curr != nullptr && !visited.count(curr)) {
visited.insert(curr);
topology[step - backtracker].push_back(curr);
curr = curr->prev;
++backtracker;
}
}
}
int ct = 0;
unsigned cb = 1;
for (const std::vector<const RecodeNode *> &layer : topology) {
if (cb >= character_boundaries_.size()) {
break;
}
if (ct == character_boundaries_[cb]) {
tprintf("***\n");
++cb;
}
for (const RecodeNode *node : layer) {
const char *code;
int intCode;
if (node->unichar_id != INVALID_UNICHAR_ID) {
code = charset->id_to_unichar(node->unichar_id);
intCode = node->unichar_id;
} else if (node->code == null_char_) {
intCode = 0;
code = " ";
} else {
intCode = 666;
code = "*";
}
int intPrevCode = 0;
const char *prevCode;
float prevScore = 0;
if (node->prev != nullptr) {
prevScore = node->prev->score;
if (node->prev->unichar_id != INVALID_UNICHAR_ID) {
prevCode = charset->id_to_unichar(node->prev->unichar_id);
intPrevCode = node->prev->unichar_id;
} else if (node->code == null_char_) {
intPrevCode = 0;
prevCode = " ";
} else {
prevCode = "*";
intPrevCode = 666;
}
} else {
prevCode = " ";
}
if (uids) {
tprintf("%x(|)%f(>)%x(|)%f\n", intPrevCode, prevScore, intCode,
node->score);
} else {
tprintf("%s(|)%f(>)%s(|)%f\n", prevCode, prevScore, code, node->score);
}
}
tprintf("-\n");
++ct;
}
tprintf("***\n");
}
void RecodeBeamSearch::extractSymbolChoices(const UNICHARSET *unicharset) {
if (character_boundaries_.size() < 2) {
return;
}
// For the first iteration the original beam is analyzed. After that a
// new beam is calculated based on the results from the original beam.
std::vector<RecodeBeam *> ¤tBeam =
secondary_beam_.empty() ? beam_ : secondary_beam_;
character_boundaries_[0] = 0;
for (unsigned j = 1; j < character_boundaries_.size(); ++j) {
std::vector<int> unichar_ids;
std::vector<float> certs;
std::vector<float> ratings;
std::vector<int> xcoords;
int backpath = character_boundaries_[j] - character_boundaries_[j - 1];
std::vector<tesseract::RecodePair> &heaps =
currentBeam.at(character_boundaries_[j] - 1)->beams_->heap();
std::vector<const RecodeNode *> best_nodes;
std::vector<const RecodeNode *> best;
// Scan the segmented node chain for valid unichar ids.
for (auto &&entry : heaps) {
bool validChar = false;
int backcounter = 0;
const RecodeNode *node = &entry.data();
while (node != nullptr && backcounter < backpath) {
if (node->code != null_char_ &&
node->unichar_id != INVALID_UNICHAR_ID) {
validChar = true;
break;
}
node = node->prev;
++backcounter;
}
if (validChar) {
best.push_back(&entry.data());
}
}
// find the best rated segmented node chain and extract the unichar id.
if (!best.empty()) {
std::sort(best.begin(), best.end(), greater_than());
ExtractPath(best[0], &best_nodes, backpath);
ExtractPathAsUnicharIds(best_nodes, &unichar_ids, &certs, &ratings,
&xcoords);
}
if (!unichar_ids.empty()) {
int bestPos = 0;
for (unsigned i = 1; i < unichar_ids.size(); ++i) {
if (ratings[i] < ratings[bestPos]) {
bestPos = i;
}
}
#if 0 // TODO: bestCode is currently unused (see commit 2dd5d0d60).
int bestCode = -10;
for (auto &node : best_nodes) {
if (node->unichar_id == unichar_ids[bestPos]) {
bestCode = node->code;
}
}
#endif
// Exclude the best choice for the followup decoding.
std::unordered_set<int> excludeCodeList;
for (auto &best_node : best_nodes) {
if (best_node->code != null_char_) {
excludeCodeList.insert(best_node->code);
}
}
if (j - 1 < excludedUnichars.size()) {
for (auto elem : excludeCodeList) {
excludedUnichars[j - 1].insert(elem);
}
} else {
excludedUnichars.push_back(excludeCodeList);
}
// Save the best choice for the choice iterator.
if (j - 1 < ctc_choices.size()) {
int id = unichar_ids[bestPos];
const char *result = unicharset->id_to_unichar_ext(id);
float rating = ratings[bestPos];
ctc_choices[j - 1].push_back(
std::pair<const char *, float>(result, rating));
} else {
std::vector<std::pair<const char *, float>> choice;
int id = unichar_ids[bestPos];
const char *result = unicharset->id_to_unichar_ext(id);
float rating = ratings[bestPos];
choice.emplace_back(result, rating);
ctc_choices.push_back(choice);
}
// fill the blank spot with an empty array
} else {
if (j - 1 >= excludedUnichars.size()) {
std::unordered_set<int> excludeCodeList;
excludedUnichars.push_back(excludeCodeList);
}
if (j - 1 >= ctc_choices.size()) {
std::vector<std::pair<const char *, float>> choice;
ctc_choices.push_back(choice);
}
}
}
for (auto data : secondary_beam_) {
delete data;
}
secondary_beam_.clear();
}
// Generates debug output of the content of the beams after a Decode.
void RecodeBeamSearch::DebugBeams(const UNICHARSET &unicharset) const {
for (int p = 0; p < beam_size_; ++p) {
for (int d = 0; d < 2; ++d) {
for (int c = 0; c < NC_COUNT; ++c) {
auto cont = static_cast<NodeContinuation>(c);
int index = BeamIndex(d, cont, 0);
if (beam_[p]->beams_[index].empty()) {
continue;
}
// Print all the best scoring nodes for each unichar found.
tprintf("Position %d: %s+%s beam\n", p, d ? "Dict" : "Non-Dict",
kNodeContNames[c]);
DebugBeamPos(unicharset, beam_[p]->beams_[index]);
}
}
}
}
// Generates debug output of the content of a single beam position.
void RecodeBeamSearch::DebugBeamPos(const UNICHARSET &unicharset,
const RecodeHeap &heap) const {
std::vector<const RecodeNode *> unichar_bests(unicharset.size());
const RecodeNode *null_best = nullptr;
int heap_size = heap.size();
for (int i = 0; i < heap_size; ++i) {
const RecodeNode *node = &heap.get(i).data();
if (node->unichar_id == INVALID_UNICHAR_ID) {
if (null_best == nullptr || null_best->score < node->score) {
null_best = node;
}
} else {
if (unichar_bests[node->unichar_id] == nullptr ||
unichar_bests[node->unichar_id]->score < node->score) {
unichar_bests[node->unichar_id] = node;
}
}
}
for (auto &unichar_best : unichar_bests) {
if (unichar_best != nullptr) {
const RecodeNode &node = *unichar_best;
node.Print(null_char_, unicharset, 1);
}
}
if (null_best != nullptr) {
null_best->Print(null_char_, unicharset, 1);
}
}
// Returns the given best_nodes as unichar-ids/certs/ratings/xcoords skipping
// duplicates, nulls and intermediate parts.
/* static */
void RecodeBeamSearch::ExtractPathAsUnicharIds(
const std::vector<const RecodeNode *> &best_nodes,
std::vector<int> *unichar_ids, std::vector<float> *certs,
std::vector<float> *ratings, std::vector<int> *xcoords,
std::vector<int> *character_boundaries) {
unichar_ids->clear();
certs->clear();
ratings->clear();
xcoords->clear();
std::vector<int> starts;
std::vector<int> ends;
// Backtrack extracting only valid, non-duplicate unichar-ids.
int t = 0;
int width = best_nodes.size();
while (t < width) {
double certainty = 0.0;
double rating = 0.0;
while (t < width && best_nodes[t]->unichar_id == INVALID_UNICHAR_ID) {
double cert = best_nodes[t++]->certainty;
if (cert < certainty) {
certainty = cert;
}
rating -= cert;
}
starts.push_back(t);
if (t < width) {
int unichar_id = best_nodes[t]->unichar_id;
if (unichar_id == UNICHAR_SPACE && !certs->empty() &&
best_nodes[t]->permuter != NO_PERM) {
// All the rating and certainty go on the previous character except
// for the space itself.
if (certainty < certs->back()) {
certs->back() = certainty;
}
ratings->back() += rating;
certainty = 0.0;
rating = 0.0;
}
unichar_ids->push_back(unichar_id);
xcoords->push_back(t);
do {
double cert = best_nodes[t++]->certainty;
// Special-case NO-PERM space to forget the certainty of the previous
// nulls. See long comment in ContinueContext.
if (cert < certainty || (unichar_id == UNICHAR_SPACE &&
best_nodes[t - 1]->permuter == NO_PERM)) {
certainty = cert;
}
rating -= cert;
} while (t < width && best_nodes[t]->duplicate);
ends.push_back(t);
certs->push_back(certainty);
ratings->push_back(rating);
} else if (!certs->empty()) {
if (certainty < certs->back()) {
certs->back() = certainty;
}
ratings->back() += rating;
}
}
starts.push_back(width);
if (character_boundaries != nullptr) {
calculateCharBoundaries(&starts, &ends, character_boundaries, width);
}
xcoords->push_back(width);
}
// Sets up a word with the ratings matrix and fake blobs with boxes in the
// right places.
WERD_RES *RecodeBeamSearch::InitializeWord(bool leading_space,
const TBOX &line_box, int word_start,
int word_end, float space_certainty,
const UNICHARSET *unicharset,
const std::vector<int> &xcoords,
float scale_factor) {
// Make a fake blob for each non-zero label.
C_BLOB_LIST blobs;
C_BLOB_IT b_it(&blobs);
for (int i = word_start; i < word_end; ++i) {
if (static_cast<unsigned>(i + 1) < character_boundaries_.size()) {
TBOX box(static_cast<int16_t>(
std::floor(character_boundaries_[i] * scale_factor)) +
line_box.left(),
line_box.bottom(),
static_cast<int16_t>(
std::ceil(character_boundaries_[i + 1] * scale_factor)) +
line_box.left(),
line_box.top());
b_it.add_after_then_move(C_BLOB::FakeBlob(box));
}
}
// Make a fake word from the blobs.
WERD *word = new WERD(&blobs, leading_space, nullptr);
// Make a WERD_RES from the word.
auto *word_res = new WERD_RES(word);
word_res->end = word_end - word_start + leading_space;
word_res->uch_set = unicharset;
word_res->combination = true; // Give it ownership of the word.
word_res->space_certainty = space_certainty;
word_res->ratings = new MATRIX(word_end - word_start, 1);
return word_res;
}
// Fills top_n_flags_ with bools that are true iff the corresponding output
// is one of the top_n.
void RecodeBeamSearch::ComputeTopN(const float *outputs, int num_outputs,
int top_n) {
top_n_flags_.clear();
top_n_flags_.resize(num_outputs, TN_ALSO_RAN);
top_code_ = -1;
second_code_ = -1;
top_heap_.clear();
for (int i = 0; i < num_outputs; ++i) {
if (top_heap_.size() < top_n || outputs[i] > top_heap_.PeekTop().key()) {
TopPair entry(outputs[i], i);
top_heap_.Push(&entry);
if (top_heap_.size() > top_n) {
top_heap_.Pop(&entry);
}
}
}
while (!top_heap_.empty()) {
TopPair entry;
top_heap_.Pop(&entry);
if (top_heap_.size() > 1) {
top_n_flags_[entry.data()] = TN_TOPN;
} else {
top_n_flags_[entry.data()] = TN_TOP2;
if (top_heap_.empty()) {
top_code_ = entry.data();
} else {
second_code_ = entry.data();
}
}
}
top_n_flags_[null_char_] = TN_TOP2;
}
void RecodeBeamSearch::ComputeSecTopN(std::unordered_set<int> *exList,
const float *outputs, int num_outputs,
int top_n) {
top_n_flags_.clear();
top_n_flags_.resize(num_outputs, TN_ALSO_RAN);
top_code_ = -1;
second_code_ = -1;
top_heap_.clear();
for (int i = 0; i < num_outputs; ++i) {
if ((top_heap_.size() < top_n || outputs[i] > top_heap_.PeekTop().key()) &&
!exList->count(i)) {
TopPair entry(outputs[i], i);
top_heap_.Push(&entry);
if (top_heap_.size() > top_n) {
top_heap_.Pop(&entry);
}
}
}
while (!top_heap_.empty()) {
TopPair entry;
top_heap_.Pop(&entry);
if (top_heap_.size() > 1) {
top_n_flags_[entry.data()] = TN_TOPN;
} else {
top_n_flags_[entry.data()] = TN_TOP2;
if (top_heap_.empty()) {
top_code_ = entry.data();
} else {
second_code_ = entry.data();
}
}
}
top_n_flags_[null_char_] = TN_TOP2;
}
// Adds the computation for the current time-step to the beam. Call at each
// time-step in sequence from left to right. outputs is the activation vector
// for the current timestep.
void RecodeBeamSearch::DecodeStep(const float *outputs, int t,
double dict_ratio, double cert_offset,
double worst_dict_cert,
const UNICHARSET *charset, bool debug) {
if (t == static_cast<int>(beam_.size())) {
beam_.push_back(new RecodeBeam);
}
RecodeBeam *step = beam_[t];
beam_size_ = t + 1;
step->Clear();
if (t == 0) {
// The first step can only use singles and initials.
ContinueContext(nullptr, BeamIndex(false, NC_ANYTHING, 0), outputs, TN_TOP2,
charset, dict_ratio, cert_offset, worst_dict_cert, step);
if (dict_ != nullptr) {
ContinueContext(nullptr, BeamIndex(true, NC_ANYTHING, 0), outputs,
TN_TOP2, charset, dict_ratio, cert_offset,
worst_dict_cert, step);
}
} else {
RecodeBeam *prev = beam_[t - 1];
if (debug) {
int beam_index = BeamIndex(true, NC_ANYTHING, 0);
for (int i = prev->beams_[beam_index].size() - 1; i >= 0; --i) {
std::vector<const RecodeNode *> path;
ExtractPath(&prev->beams_[beam_index].get(i).data(), &path);
tprintf("Step %d: Dawg beam %d:\n", t, i);
DebugPath(charset, path);
}
beam_index = BeamIndex(false, NC_ANYTHING, 0);
for (int i = prev->beams_[beam_index].size() - 1; i >= 0; --i) {
std::vector<const RecodeNode *> path;
ExtractPath(&prev->beams_[beam_index].get(i).data(), &path);
tprintf("Step %d: Non-Dawg beam %d:\n", t, i);
DebugPath(charset, path);
}
}
int total_beam = 0;
// Work through the scores by group (top-2, top-n, the rest) while the beam
// is empty. This enables extending the context using only the top-n results
// first, which may have an empty intersection with the valid codes, so we
// fall back to the rest if the beam is empty.
for (int tn = 0; tn < TN_COUNT && total_beam == 0; ++tn) {
auto top_n = static_cast<TopNState>(tn);
for (int index = 0; index < kNumBeams; ++index) {
// Working backwards through the heaps doesn't guarantee that we see the
// best first, but it comes before a lot of the worst, so it is slightly
// more efficient than going forwards.
for (int i = prev->beams_[index].size() - 1; i >= 0; --i) {
ContinueContext(&prev->beams_[index].get(i).data(), index, outputs,
top_n, charset, dict_ratio, cert_offset,
worst_dict_cert, step);
}
}
for (int index = 0; index < kNumBeams; ++index) {
if (ContinuationFromBeamsIndex(index) == NC_ANYTHING) {
total_beam += step->beams_[index].size();
}
}
}
// Special case for the best initial dawg. Push it on the heap if good
// enough, but there is only one, so it doesn't blow up the beam.
for (int c = 0; c < NC_COUNT; ++c) {
if (step->best_initial_dawgs_[c].code >= 0) {
int index = BeamIndex(true, static_cast<NodeContinuation>(c), 0);
RecodeHeap *dawg_heap = &step->beams_[index];
PushHeapIfBetter(kBeamWidths[0], &step->best_initial_dawgs_[c],
dawg_heap);
}
}
}
}
void RecodeBeamSearch::DecodeSecondaryStep(
const float *outputs, int t, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset, bool debug) {
if (t == static_cast<int>(secondary_beam_.size())) {
secondary_beam_.push_back(new RecodeBeam);
}
RecodeBeam *step = secondary_beam_[t];
step->Clear();
if (t == 0) {
// The first step can only use singles and initials.
ContinueContext(nullptr, BeamIndex(false, NC_ANYTHING, 0), outputs, TN_TOP2,
charset, dict_ratio, cert_offset, worst_dict_cert, step);
if (dict_ != nullptr) {
ContinueContext(nullptr, BeamIndex(true, NC_ANYTHING, 0), outputs,
TN_TOP2, charset, dict_ratio, cert_offset,
worst_dict_cert, step);
}
} else {
RecodeBeam *prev = secondary_beam_[t - 1];
if (debug) {
int beam_index = BeamIndex(true, NC_ANYTHING, 0);
for (int i = prev->beams_[beam_index].size() - 1; i >= 0; --i) {
std::vector<const RecodeNode *> path;
ExtractPath(&prev->beams_[beam_index].get(i).data(), &path);
tprintf("Step %d: Dawg beam %d:\n", t, i);
DebugPath(charset, path);
}
beam_index = BeamIndex(false, NC_ANYTHING, 0);
for (int i = prev->beams_[beam_index].size() - 1; i >= 0; --i) {
std::vector<const RecodeNode *> path;
ExtractPath(&prev->beams_[beam_index].get(i).data(), &path);
tprintf("Step %d: Non-Dawg beam %d:\n", t, i);
DebugPath(charset, path);
}
}
int total_beam = 0;
// Work through the scores by group (top-2, top-n, the rest) while the beam
// is empty. This enables extending the context using only the top-n results
// first, which may have an empty intersection with the valid codes, so we
// fall back to the rest if the beam is empty.
for (int tn = 0; tn < TN_COUNT && total_beam == 0; ++tn) {
auto top_n = static_cast<TopNState>(tn);
for (int index = 0; index < kNumBeams; ++index) {
// Working backwards through the heaps doesn't guarantee that we see the
// best first, but it comes before a lot of the worst, so it is slightly
// more efficient than going forwards.
for (int i = prev->beams_[index].size() - 1; i >= 0; --i) {
ContinueContext(&prev->beams_[index].get(i).data(), index, outputs,
top_n, charset, dict_ratio, cert_offset,
worst_dict_cert, step);
}
}
for (int index = 0; index < kNumBeams; ++index) {
if (ContinuationFromBeamsIndex(index) == NC_ANYTHING) {
total_beam += step->beams_[index].size();
}
}
}
// Special case for the best initial dawg. Push it on the heap if good
// enough, but there is only one, so it doesn't blow up the beam.
for (int c = 0; c < NC_COUNT; ++c) {
if (step->best_initial_dawgs_[c].code >= 0) {
int index = BeamIndex(true, static_cast<NodeContinuation>(c), 0);
RecodeHeap *dawg_heap = &step->beams_[index];
PushHeapIfBetter(kBeamWidths[0], &step->best_initial_dawgs_[c],
dawg_heap);
}
}
}
}
// Adds to the appropriate beams the legal (according to recoder)
// continuations of context prev, which is of the given length, using the
// given network outputs to provide scores to the choices. Uses only those
// choices for which top_n_flags[index] == top_n_flag.
void RecodeBeamSearch::ContinueContext(
const RecodeNode *prev, int index, const float *outputs,
TopNState top_n_flag, const UNICHARSET *charset, double dict_ratio,
double cert_offset, double worst_dict_cert, RecodeBeam *step) {
RecodedCharID prefix;
RecodedCharID full_code;
const RecodeNode *previous = prev;
int length = LengthFromBeamsIndex(index);
bool use_dawgs = IsDawgFromBeamsIndex(index);
NodeContinuation prev_cont = ContinuationFromBeamsIndex(index);
for (int p = length - 1; p >= 0 && previous != nullptr; --p) {
while (previous->duplicate || previous->code == null_char_) {
previous = previous->prev;
}
prefix.Set(p, previous->code);
full_code.Set(p, previous->code);
previous = previous->prev;
}
if (prev != nullptr && !is_simple_text_) {
if (top_n_flags_[prev->code] == top_n_flag) {
if (prev_cont != NC_NO_DUP) {
float cert =
NetworkIO::ProbToCertainty(outputs[prev->code]) + cert_offset;
PushDupOrNoDawgIfBetter(length, true, prev->code, prev->unichar_id,
cert, worst_dict_cert, dict_ratio, use_dawgs,
NC_ANYTHING, prev, step);
}
if (prev_cont == NC_ANYTHING && top_n_flag == TN_TOP2 &&
prev->code != null_char_) {
float cert = NetworkIO::ProbToCertainty(outputs[prev->code] +
outputs[null_char_]) +
cert_offset;
PushDupOrNoDawgIfBetter(length, true, prev->code, prev->unichar_id,
cert, worst_dict_cert, dict_ratio, use_dawgs,
NC_NO_DUP, prev, step);
}
}
if (prev_cont == NC_ONLY_DUP) {
return;
}
if (prev->code != null_char_ && length > 0 &&
top_n_flags_[null_char_] == top_n_flag) {
// Allow nulls within multi code sequences, as the nulls within are not
// explicitly included in the code sequence.
float cert =
NetworkIO::ProbToCertainty(outputs[null_char_]) + cert_offset;
PushDupOrNoDawgIfBetter(length, false, null_char_, INVALID_UNICHAR_ID,
cert, worst_dict_cert, dict_ratio, use_dawgs,
NC_ANYTHING, prev, step);
}
}
const std::vector<int> *final_codes = recoder_.GetFinalCodes(prefix);
if (final_codes != nullptr) {
for (int code : *final_codes) {
if (top_n_flags_[code] != top_n_flag) {
continue;
}
if (prev != nullptr && prev->code == code && !is_simple_text_) {
continue;
}
float cert = NetworkIO::ProbToCertainty(outputs[code]) + cert_offset;
if (cert < kMinCertainty && code != null_char_) {
continue;
}
full_code.Set(length, code);
int unichar_id = recoder_.DecodeUnichar(full_code);
// Map the null char to INVALID.
if (length == 0 && code == null_char_) {
unichar_id = INVALID_UNICHAR_ID;
}
if (unichar_id != INVALID_UNICHAR_ID && charset != nullptr &&
!charset->get_enabled(unichar_id)) {
continue; // disabled by whitelist/blacklist
}
ContinueUnichar(code, unichar_id, cert, worst_dict_cert, dict_ratio,
use_dawgs, NC_ANYTHING, prev, step);
if (top_n_flag == TN_TOP2 && code != null_char_) {
float prob = outputs[code] + outputs[null_char_];
if (prev != nullptr && prev_cont == NC_ANYTHING &&
prev->code != null_char_ &&
((prev->code == top_code_ && code == second_code_) ||
(code == top_code_ && prev->code == second_code_))) {
prob += outputs[prev->code];
}
cert = NetworkIO::ProbToCertainty(prob) + cert_offset;
ContinueUnichar(code, unichar_id, cert, worst_dict_cert, dict_ratio,
use_dawgs, NC_ONLY_DUP, prev, step);
}
}
}
const std::vector<int> *next_codes = recoder_.GetNextCodes(prefix);
if (next_codes != nullptr) {
for (int code : *next_codes) {
if (top_n_flags_[code] != top_n_flag) {
continue;
}
if (prev != nullptr && prev->code == code && !is_simple_text_) {
continue;
}
float cert = NetworkIO::ProbToCertainty(outputs[code]) + cert_offset;
PushDupOrNoDawgIfBetter(length + 1, false, code, INVALID_UNICHAR_ID, cert,
worst_dict_cert, dict_ratio, use_dawgs,
NC_ANYTHING, prev, step);
if (top_n_flag == TN_TOP2 && code != null_char_) {
float prob = outputs[code] + outputs[null_char_];
if (prev != nullptr && prev_cont == NC_ANYTHING &&
prev->code != null_char_ &&
((prev->code == top_code_ && code == second_code_) ||
(code == top_code_ && prev->code == second_code_))) {
prob += outputs[prev->code];
}
cert = NetworkIO::ProbToCertainty(prob) + cert_offset;
PushDupOrNoDawgIfBetter(length + 1, false, code, INVALID_UNICHAR_ID,
cert, worst_dict_cert, dict_ratio, use_dawgs,
NC_ONLY_DUP, prev, step);
}
}
}
}
// Continues for a new unichar, using dawg or non-dawg as per flag.
void RecodeBeamSearch::ContinueUnichar(int code, int unichar_id, float cert,
float worst_dict_cert, float dict_ratio,
bool use_dawgs, NodeContinuation cont,
const RecodeNode *prev,
RecodeBeam *step) {
if (use_dawgs) {
if (cert > worst_dict_cert) {
ContinueDawg(code, unichar_id, cert, cont, prev, step);
}
} else {
RecodeHeap *nodawg_heap = &step->beams_[BeamIndex(false, cont, 0)];
PushHeapIfBetter(kBeamWidths[0], code, unichar_id, TOP_CHOICE_PERM, false,
false, false, false, cert * dict_ratio, prev, nullptr,
nodawg_heap);
if (dict_ != nullptr &&
((unichar_id == UNICHAR_SPACE && cert > worst_dict_cert) ||
!dict_->getUnicharset().IsSpaceDelimited(unichar_id))) {
// Any top choice position that can start a new word, ie a space or
// any non-space-delimited character, should also be considered
// by the dawg search, so push initial dawg to the dawg heap.
float dawg_cert = cert;
PermuterType permuter = TOP_CHOICE_PERM;
// Since we use the space either side of a dictionary word in the
// certainty of the word, (to properly handle weak spaces) and the
// space is coming from a non-dict word, we need special conditions
// to avoid degrading the certainty of the dict word that follows.
// With a space we don't multiply the certainty by dict_ratio, and we
// flag the space with NO_PERM to indicate that we should not use the
// predecessor nulls to generate the confidence for the space, as they
// have already been multiplied by dict_ratio, and we can't go back to
// insert more entries in any previous heaps.
if (unichar_id == UNICHAR_SPACE) {
permuter = NO_PERM;
} else {
dawg_cert *= dict_ratio;
}
PushInitialDawgIfBetter(code, unichar_id, permuter, false, false,
dawg_cert, cont, prev, step);
}
}
}
// Adds a RecodeNode composed of the tuple (code, unichar_id, cert, prev,
// appropriate-dawg-args, cert) to the given heap (dawg_beam_) if unichar_id
// is a valid continuation of whatever is in prev.
void RecodeBeamSearch::ContinueDawg(int code, int unichar_id, float cert,
NodeContinuation cont,
const RecodeNode *prev, RecodeBeam *step) {
RecodeHeap *dawg_heap = &step->beams_[BeamIndex(true, cont, 0)];
RecodeHeap *nodawg_heap = &step->beams_[BeamIndex(false, cont, 0)];
if (unichar_id == INVALID_UNICHAR_ID) {
PushHeapIfBetter(kBeamWidths[0], code, unichar_id, NO_PERM, false, false,
false, false, cert, prev, nullptr, dawg_heap);
return;
}
// Avoid dictionary probe if score a total loss.
float score = cert;
if (prev != nullptr) {
score += prev->score;
}
if (dawg_heap->size() >= kBeamWidths[0] &&
score <= dawg_heap->PeekTop().data().score &&
nodawg_heap->size() >= kBeamWidths[0] &&
score <= nodawg_heap->PeekTop().data().score) {
return;
}
const RecodeNode *uni_prev = prev;
// Prev may be a partial code, null_char, or duplicate, so scan back to the
// last valid unichar_id.
while (uni_prev != nullptr &&
(uni_prev->unichar_id == INVALID_UNICHAR_ID || uni_prev->duplicate)) {
uni_prev = uni_prev->prev;
}
if (unichar_id == UNICHAR_SPACE) {
if (uni_prev != nullptr && uni_prev->end_of_word) {
// Space is good. Push initial state, to the dawg beam and a regular
// space to the top choice beam.
PushInitialDawgIfBetter(code, unichar_id, uni_prev->permuter, false,
false, cert, cont, prev, step);
PushHeapIfBetter(kBeamWidths[0], code, unichar_id, uni_prev->permuter,
false, false, false, false, cert, prev, nullptr,
nodawg_heap);
}
return;
} else if (uni_prev != nullptr && uni_prev->start_of_dawg &&
uni_prev->unichar_id != UNICHAR_SPACE &&
dict_->getUnicharset().IsSpaceDelimited(uni_prev->unichar_id) &&
dict_->getUnicharset().IsSpaceDelimited(unichar_id)) {
return; // Can't break words between space delimited chars.
}
DawgPositionVector initial_dawgs;
auto *updated_dawgs = new DawgPositionVector;
DawgArgs dawg_args(&initial_dawgs, updated_dawgs, NO_PERM);
bool word_start = false;
if (uni_prev == nullptr) {
// Starting from beginning of line.
dict_->default_dawgs(&initial_dawgs, false);
word_start = true;
} else if (uni_prev->dawgs != nullptr) {
// Continuing a previous dict word.
dawg_args.active_dawgs = uni_prev->dawgs;
word_start = uni_prev->start_of_dawg;
} else {
return; // Can't continue if not a dict word.
}
auto permuter = static_cast<PermuterType>(dict_->def_letter_is_okay(
&dawg_args, dict_->getUnicharset(), unichar_id, false));
if (permuter != NO_PERM) {
PushHeapIfBetter(kBeamWidths[0], code, unichar_id, permuter, false,
word_start, dawg_args.valid_end, false, cert, prev,
dawg_args.updated_dawgs, dawg_heap);
if (dawg_args.valid_end && !space_delimited_) {
// We can start another word right away, so push initial state as well,
// to the dawg beam, and the regular character to the top choice beam,
// since non-dict words can start here too.
PushInitialDawgIfBetter(code, unichar_id, permuter, word_start, true,
cert, cont, prev, step);
PushHeapIfBetter(kBeamWidths[0], code, unichar_id, permuter, false,
word_start, true, false, cert, prev, nullptr,
nodawg_heap);
}
} else {
delete updated_dawgs;
}
}
// Adds a RecodeNode composed of the tuple (code, unichar_id,
// initial-dawg-state, prev, cert) to the given heap if/ there is room or if
// better than the current worst element if already full.
void RecodeBeamSearch::PushInitialDawgIfBetter(int code, int unichar_id,
PermuterType permuter,
bool start, bool end, float cert,
NodeContinuation cont,
const RecodeNode *prev,
RecodeBeam *step) {
RecodeNode *best_initial_dawg = &step->best_initial_dawgs_[cont];
float score = cert;
if (prev != nullptr) {
score += prev->score;
}
if (best_initial_dawg->code < 0 || score > best_initial_dawg->score) {
auto *initial_dawgs = new DawgPositionVector;
dict_->default_dawgs(initial_dawgs, false);
RecodeNode node(code, unichar_id, permuter, true, start, end, false, cert,
score, prev, initial_dawgs,
ComputeCodeHash(code, false, prev));
*best_initial_dawg = node;
}
}
// Adds a RecodeNode composed of the tuple (code, unichar_id, permuter,
// false, false, false, false, cert, prev, nullptr) to heap if there is room
// or if better than the current worst element if already full.
/* static */
void RecodeBeamSearch::PushDupOrNoDawgIfBetter(
int length, bool dup, int code, int unichar_id, float cert,
float worst_dict_cert, float dict_ratio, bool use_dawgs,
NodeContinuation cont, const RecodeNode *prev, RecodeBeam *step) {
int index = BeamIndex(use_dawgs, cont, length);
if (use_dawgs) {
if (cert > worst_dict_cert) {
PushHeapIfBetter(kBeamWidths[length], code, unichar_id,
prev ? prev->permuter : NO_PERM, false, false, false,
dup, cert, prev, nullptr, &step->beams_[index]);
}
} else {
cert *= dict_ratio;
if (cert >= kMinCertainty || code == null_char_) {
PushHeapIfBetter(kBeamWidths[length], code, unichar_id,
prev ? prev->permuter : TOP_CHOICE_PERM, false, false,
false, dup, cert, prev, nullptr, &step->beams_[index]);
}
}
}
// Adds a RecodeNode composed of the tuple (code, unichar_id, permuter,
// dawg_start, word_start, end, dup, cert, prev, d) to heap if there is room
// or if better than the current worst element if already full.
void RecodeBeamSearch::PushHeapIfBetter(int max_size, int code, int unichar_id,
PermuterType permuter, bool dawg_start,
bool word_start, bool end, bool dup,
float cert, const RecodeNode *prev,
DawgPositionVector *d,
RecodeHeap *heap) {
float score = cert;
if (prev != nullptr) {
score += prev->score;
}
if (heap->size() < max_size || score > heap->PeekTop().data().score) {
uint64_t hash = ComputeCodeHash(code, dup, prev);
RecodeNode node(code, unichar_id, permuter, dawg_start, word_start, end,
dup, cert, score, prev, d, hash);
if (UpdateHeapIfMatched(&node, heap)) {
return;
}
RecodePair entry(score, node);
heap->Push(&entry);
ASSERT_HOST(entry.data().dawgs == nullptr);
if (heap->size() > max_size) {
heap->Pop(&entry);
}
} else {
delete d;
}
}
// Adds a RecodeNode to heap if there is room
// or if better than the current worst element if already full.
void RecodeBeamSearch::PushHeapIfBetter(int max_size, RecodeNode *node,
RecodeHeap *heap) {
if (heap->size() < max_size || node->score > heap->PeekTop().data().score) {
if (UpdateHeapIfMatched(node, heap)) {
return;
}
RecodePair entry(node->score, *node);
heap->Push(&entry);
ASSERT_HOST(entry.data().dawgs == nullptr);
if (heap->size() > max_size) {
heap->Pop(&entry);
}
}
}
// Searches the heap for a matching entry, and updates the score with
// reshuffle if needed. Returns true if there was a match.
bool RecodeBeamSearch::UpdateHeapIfMatched(RecodeNode *new_node,
RecodeHeap *heap) {
// TODO(rays) consider hash map instead of linear search.
// It might not be faster because the hash map would have to be updated
// every time a heap reshuffle happens, and that would be a lot of overhead.
std::vector<RecodePair> &nodes = heap->heap();
for (auto &i : nodes) {
RecodeNode &node = i.data();
if (node.code == new_node->code && node.code_hash == new_node->code_hash &&
node.permuter == new_node->permuter &&
node.start_of_dawg == new_node->start_of_dawg) {
if (new_node->score > node.score) {
// The new one is better. Update the entire node in the heap and
// reshuffle.
node = *new_node;
i.key() = node.score;
heap->Reshuffle(&i);
}
return true;
}
}
return false;
}
// Computes and returns the code-hash for the given code and prev.
uint64_t RecodeBeamSearch::ComputeCodeHash(int code, bool dup,
const RecodeNode *prev) const {
uint64_t hash = prev == nullptr ? 0 : prev->code_hash;
if (!dup && code != null_char_) {
int num_classes = recoder_.code_range();
uint64_t carry = (((hash >> 32) * num_classes) >> 32);
hash *= num_classes;
hash += carry;
hash += code;
}
return hash;
}
// Backtracks to extract the best path through the lattice that was built
// during Decode. On return the best_nodes vector essentially contains the set
// of code, score pairs that make the optimal path with the constraint that
// the recoder can decode the code sequence back to a sequence of unichar-ids.
void RecodeBeamSearch::ExtractBestPaths(
std::vector<const RecodeNode *> *best_nodes,
std::vector<const RecodeNode *> *second_nodes) const {
// Scan both beams to extract the best and second best paths.
const RecodeNode *best_node = nullptr;
const RecodeNode *second_best_node = nullptr;
const RecodeBeam *last_beam = beam_[beam_size_ - 1];
for (int c = 0; c < NC_COUNT; ++c) {
if (c == NC_ONLY_DUP) {
continue;
}
auto cont = static_cast<NodeContinuation>(c);
for (int is_dawg = 0; is_dawg < 2; ++is_dawg) {
int beam_index = BeamIndex(is_dawg, cont, 0);
int heap_size = last_beam->beams_[beam_index].size();
for (int h = 0; h < heap_size; ++h) {
const RecodeNode *node = &last_beam->beams_[beam_index].get(h).data();
if (is_dawg) {
// dawg_node may be a null_char, or duplicate, so scan back to the
// last valid unichar_id.
const RecodeNode *dawg_node = node;
while (dawg_node != nullptr &&
(dawg_node->unichar_id == INVALID_UNICHAR_ID ||
dawg_node->duplicate)) {
dawg_node = dawg_node->prev;
}
if (dawg_node == nullptr ||
(!dawg_node->end_of_word &&
dawg_node->unichar_id != UNICHAR_SPACE)) {
// Dawg node is not valid.
continue;
}
}
if (best_node == nullptr || node->score > best_node->score) {
second_best_node = best_node;
best_node = node;
} else if (second_best_node == nullptr ||
node->score > second_best_node->score) {
second_best_node = node;
}
}
}
}
if (second_nodes != nullptr) {
ExtractPath(second_best_node, second_nodes);
}
ExtractPath(best_node, best_nodes);
}
// Helper backtracks through the lattice from the given node, storing the
// path and reversing it.
void RecodeBeamSearch::ExtractPath(
const RecodeNode *node, std::vector<const RecodeNode *> *path) const {
path->clear();
while (node != nullptr) {
path->push_back(node);
node = node->prev;
}
std::reverse(path->begin(), path->end());
}
void RecodeBeamSearch::ExtractPath(const RecodeNode *node,
std::vector<const RecodeNode *> *path,
int limiter) const {
int pathcounter = 0;
path->clear();
while (node != nullptr && pathcounter < limiter) {
path->push_back(node);
node = node->prev;
++pathcounter;
}
std::reverse(path->begin(), path->end());
}
// Helper prints debug information on the given lattice path.
void RecodeBeamSearch::DebugPath(
const UNICHARSET *unicharset,
const std::vector<const RecodeNode *> &path) const {
for (unsigned c = 0; c < path.size(); ++c) {
const RecodeNode &node = *path[c];
tprintf("%u ", c);
node.Print(null_char_, *unicharset, 1);
}
}
// Helper prints debug information on the given unichar path.
void RecodeBeamSearch::DebugUnicharPath(
const UNICHARSET *unicharset, const std::vector<const RecodeNode *> &path,
const std::vector<int> &unichar_ids, const std::vector<float> &certs,
const std::vector<float> &ratings, const std::vector<int> &xcoords) const {
auto num_ids = unichar_ids.size();
double total_rating = 0.0;
for (unsigned c = 0; c < num_ids; ++c) {
int coord = xcoords[c];
tprintf("%d %d=%s r=%g, c=%g, s=%d, e=%d, perm=%d\n", coord, unichar_ids[c],
unicharset->debug_str(unichar_ids[c]).c_str(), ratings[c], certs[c],
path[coord]->start_of_word, path[coord]->end_of_word,
path[coord]->permuter);
total_rating += ratings[c];
}
tprintf("Path total rating = %g\n", total_rating);
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/recodebeam.cpp
|
C++
|
apache-2.0
| 54,247
|
///////////////////////////////////////////////////////////////////////
// File: recodebeam.h
// Description: Beam search to decode from the re-encoded CJK as a sequence of
// smaller numbers in place of a single large code.
// Author: Ray Smith
//
// (C) Copyright 2015, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef THIRD_PARTY_TESSERACT_LSTM_RECODEBEAM_H_
#define THIRD_PARTY_TESSERACT_LSTM_RECODEBEAM_H_
#include "dawg.h"
#include "dict.h"
#include "genericheap.h"
#include "genericvector.h"
#include "kdpair.h"
#include "networkio.h"
#include "ratngs.h"
#include "unicharcompress.h"
#include <unordered_set> // for std::unordered_set
#include <vector> // for std::vector
namespace tesseract {
// Enum describing what can follow the current node.
// Consider the following softmax outputs:
// Timestep 0 1 2 3 4 5 6 7 8
// X-score 0.01 0.55 0.98 0.42 0.01 0.01 0.40 0.95 0.01
// Y-score 0.00 0.01 0.01 0.01 0.01 0.97 0.59 0.04 0.01
// Null-score 0.99 0.44 0.01 0.57 0.98 0.02 0.01 0.01 0.98
// Then the correct CTC decoding (in which adjacent equal classes are folded,
// and then all nulls are dropped) is clearly XYX, but simple decoding (taking
// the max at each timestep) leads to:
// Null@0.99 X@0.55 X@0.98 Null@0.57 Null@0.98 Y@0.97 Y@0.59 X@0.95 Null@0.98,
// which folds to the correct XYX. The conversion to Tesseract rating and
// certainty uses the sum of the log probs (log of the product of probabilities)
// for the Rating and the minimum log prob for the certainty, but that yields a
// minimum certainty of log(0.55), which is poor for such an obvious case.
// CTC says that the probability of the result is the SUM of the products of the
// probabilities over ALL PATHS that decode to the same result, which includes:
// NXXNNYYXN, NNXNNYYN, NXXXNYYXN, NNXXNYXXN, and others including XXXXXYYXX.
// That is intractable, so some compromise between simple and ideal is needed.
// Observing that evenly split timesteps rarely happen next to each other, we
// allow scores at a transition between classes to be added for decoding thus:
// N@0.99 (N+X)@0.99 X@0.98 (N+X)@0.99 N@0.98 Y@0.97 (X+Y+N)@1.00 X@0.95 N@0.98.
// This works because NNX and NXX both decode to X, so in the middle we can use
// N+X. Note that the classes either side of a sum must stand alone, i.e. use a
// single score, to force all paths to pass through them and decode to the same
// result. Also in the special case of a transition from X to Y, with only one
// timestep between, it is possible to add X+Y+N, since XXY, XYY, and XNY all
// decode to XY.
// An important condition is that we cannot combine X and Null between two
// stand-alone Xs, since that can decode as XNX->XX or XXX->X, so the scores for
// X and Null have to go in separate paths. Combining scores in this way
// provides a much better minimum certainty of log(0.95).
// In the implementation of the beam search, we have to place the possibilities
// X, X+N and X+Y+N in the beam under appropriate conditions of the previous
// node, and constrain what can follow, to enforce the rules explained above.
// We therefore have 3 different types of node determined by what can follow:
enum NodeContinuation {
NC_ANYTHING, // This node used just its own score, so anything can follow.
NC_ONLY_DUP, // The current node combined another score with the score for
// itself, without a stand-alone duplicate before, so must be
// followed by a stand-alone duplicate.
NC_NO_DUP, // The current node combined another score with the score for
// itself, after a stand-alone, so can only be followed by
// something other than a duplicate of the current node.
NC_COUNT
};
// Enum describing the top-n status of a code.
enum TopNState {
TN_TOP2, // Winner or 2nd.
TN_TOPN, // Runner up in top-n, but not 1st or 2nd.
TN_ALSO_RAN, // Not in the top-n.
TN_COUNT
};
// Lattice element for Re-encode beam search.
struct RecodeNode {
RecodeNode()
: code(-1)
, unichar_id(INVALID_UNICHAR_ID)
, permuter(TOP_CHOICE_PERM)
, start_of_dawg(false)
, start_of_word(false)
, end_of_word(false)
, duplicate(false)
, certainty(0.0f)
, score(0.0f)
, prev(nullptr)
, dawgs(nullptr)
, code_hash(0) {}
RecodeNode(int c, int uni_id, PermuterType perm, bool dawg_start, bool word_start, bool end,
bool dup, float cert, float s, const RecodeNode *p, DawgPositionVector *d,
uint64_t hash)
: code(c)
, unichar_id(uni_id)
, permuter(perm)
, start_of_dawg(dawg_start)
, start_of_word(word_start)
, end_of_word(end)
, duplicate(dup)
, certainty(cert)
, score(s)
, prev(p)
, dawgs(d)
, code_hash(hash) {}
// NOTE: If we could use C++11, then this would be a move constructor.
// Instead we have copy constructor that does a move!! This is because we
// don't want to copy the whole DawgPositionVector each time, and true
// copying isn't necessary for this struct. It does get moved around a lot
// though inside the heap and during heap push, hence the move semantics.
RecodeNode(const RecodeNode &src) : dawgs(nullptr) {
*this = src;
ASSERT_HOST(src.dawgs == nullptr);
}
RecodeNode &operator=(const RecodeNode &src) {
delete dawgs;
memcpy(this, &src, sizeof(src));
((RecodeNode &)src).dawgs = nullptr;
return *this;
}
~RecodeNode() {
delete dawgs;
}
// Prints details of the node.
void Print(int null_char, const UNICHARSET &unicharset, int depth) const;
// The re-encoded code here = index to network output.
int code;
// The decoded unichar_id is only valid for the final code of a sequence.
int unichar_id;
// The type of permuter active at this point. Intervals between start_of_word
// and end_of_word make valid words of type given by permuter where
// end_of_word is true. These aren't necessarily delimited by spaces.
PermuterType permuter;
// True if this is the initial dawg state. May be attached to a space or,
// in a non-space-delimited lang, the end of the previous word.
bool start_of_dawg;
// True if this is the first node in a dictionary word.
bool start_of_word;
// True if this represents a valid candidate end of word position. Does not
// necessarily mark the end of a word, since a word can be extended beyond a
// candidate end by a continuation, eg 'the' continues to 'these'.
bool end_of_word;
// True if this->code is a duplicate of prev->code. Some training modes
// allow the network to output duplicate characters and crush them with CTC,
// but that would mess up the dictionary search, so we just smash them
// together on the fly using the duplicate flag.
bool duplicate;
// Certainty (log prob) of (just) this position.
float certainty;
// Total certainty of the path to this position.
float score;
// The previous node in this chain. Borrowed pointer.
const RecodeNode *prev;
// The currently active dawgs at this position. Owned pointer.
DawgPositionVector *dawgs;
// A hash of all codes in the prefix and this->code as well. Used for
// duplicate path removal.
uint64_t code_hash;
};
using RecodePair = KDPairInc<double, RecodeNode>;
using RecodeHeap = GenericHeap<RecodePair>;
// Class that holds the entire beam search for recognition of a text line.
class TESS_API RecodeBeamSearch {
public:
// Borrows the pointer, which is expected to survive until *this is deleted.
RecodeBeamSearch(const UnicharCompress &recoder, int null_char, bool simple_text, Dict *dict);
~RecodeBeamSearch();
// Decodes the set of network outputs, storing the lattice internally.
// If charset is not null, it enables detailed debugging of the beam search.
void Decode(const NetworkIO &output, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset, int lstm_choice_mode = 0);
void Decode(const GENERIC_2D_ARRAY<float> &output, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset);
void DecodeSecondaryBeams(const NetworkIO &output, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset,
int lstm_choice_mode = 0);
// Returns the best path as labels/scores/xcoords similar to simple CTC.
void ExtractBestPathAsLabels(std::vector<int> *labels, std::vector<int> *xcoords) const;
// Returns the best path as unichar-ids/certs/ratings/xcoords skipping
// duplicates, nulls and intermediate parts.
void ExtractBestPathAsUnicharIds(bool debug, const UNICHARSET *unicharset,
std::vector<int> *unichar_ids, std::vector<float> *certs,
std::vector<float> *ratings, std::vector<int> *xcoords) const;
// Returns the best path as a set of WERD_RES.
void ExtractBestPathAsWords(const TBOX &line_box, float scale_factor, bool debug,
const UNICHARSET *unicharset, PointerVector<WERD_RES> *words,
int lstm_choice_mode = 0);
// Generates debug output of the content of the beams after a Decode.
void DebugBeams(const UNICHARSET &unicharset) const;
// Extract the best characters from the current decode iteration and block
// those symbols for the next iteration. In contrast to Tesseract's standard
// method to chose the best overall node chain, this methods looks at a short
// node chain segmented by the character boundaries and chooses the best
// option independent of the remaining node chain.
void extractSymbolChoices(const UNICHARSET *unicharset);
// Generates debug output of the content of the beams after a Decode.
void PrintBeam2(bool uids, int num_outputs, const UNICHARSET *charset, bool secondary) const;
// Segments the timestep bundle by the character_boundaries.
void segmentTimestepsByCharacters();
std::vector<std::vector<std::pair<const char *, float>>>
// Unions the segmented timestep character bundles to one big bundle.
combineSegmentedTimesteps(
std::vector<std::vector<std::vector<std::pair<const char *, float>>>> *segmentedTimesteps);
// Stores the alternative characters of every timestep together with their
// probability.
std::vector<std::vector<std::pair<const char *, float>>> timesteps;
std::vector<std::vector<std::vector<std::pair<const char *, float>>>> segmentedTimesteps;
// Stores the character choices found in the ctc algorithm
std::vector<std::vector<std::pair<const char *, float>>> ctc_choices;
// Stores all unicharids which are excluded for future iterations
std::vector<std::unordered_set<int>> excludedUnichars;
// Stores the character boundaries regarding timesteps.
std::vector<int> character_boundaries_;
// Clipping value for certainty inside Tesseract. Reflects the minimum value
// of certainty that will be returned by ExtractBestPathAsUnicharIds.
// Supposedly on a uniform scale that can be compared across languages and
// engines.
static constexpr float kMinCertainty = -20.0f;
// Number of different code lengths for which we have a separate beam.
static const int kNumLengths = RecodedCharID::kMaxCodeLen + 1;
// Total number of beams: dawg/nodawg * number of NodeContinuation * number
// of different lengths.
static const int kNumBeams = 2 * NC_COUNT * kNumLengths;
// Returns the relevant factor in the beams_ index.
static int LengthFromBeamsIndex(int index) {
return index % kNumLengths;
}
static NodeContinuation ContinuationFromBeamsIndex(int index) {
return static_cast<NodeContinuation>((index / kNumLengths) % NC_COUNT);
}
static bool IsDawgFromBeamsIndex(int index) {
return index / (kNumLengths * NC_COUNT) > 0;
}
// Computes a beams_ index from the given factors.
static int BeamIndex(bool is_dawg, NodeContinuation cont, int length) {
return (is_dawg * NC_COUNT + cont) * kNumLengths + length;
}
private:
// Struct for the Re-encode beam search. This struct holds the data for
// a single time-step position of the output. Use a vector<RecodeBeam>
// to hold all the timesteps and prevent reallocation of the individual heaps.
struct RecodeBeam {
// Resets to the initial state without deleting all the memory.
void Clear() {
for (auto &beam : beams_) {
beam.clear();
}
RecodeNode empty;
for (auto &best_initial_dawg : best_initial_dawgs_) {
best_initial_dawg = empty;
}
}
// A separate beam for each combination of code length,
// NodeContinuation, and dictionary flag. Separating out all these types
// allows the beam to be quite narrow, and yet still have a low chance of
// losing the best path.
// We have to keep all these beams separate, since the highest scoring paths
// come from the paths that are most likely to dead-end at any time, like
// dawg paths, NC_ONLY_DUP etc.
// Each heap is stored with the WORST result at the top, so we can quickly
// get the top-n values.
RecodeHeap beams_[kNumBeams];
// While the language model is only a single word dictionary, we can use
// word starts as a choke point in the beam, and keep only a single dict
// start node at each step (for each NodeContinuation type), so we find the
// best one here and push it on the heap, if it qualifies, after processing
// all of the step.
RecodeNode best_initial_dawgs_[NC_COUNT];
};
using TopPair = KDPairInc<float, int>;
// Generates debug output of the content of a single beam position.
void DebugBeamPos(const UNICHARSET &unicharset, const RecodeHeap &heap) const;
// Returns the given best_nodes as unichar-ids/certs/ratings/xcoords skipping
// duplicates, nulls and intermediate parts.
static void ExtractPathAsUnicharIds(const std::vector<const RecodeNode *> &best_nodes,
std::vector<int> *unichar_ids, std::vector<float> *certs,
std::vector<float> *ratings, std::vector<int> *xcoords,
std::vector<int> *character_boundaries = nullptr);
// Sets up a word with the ratings matrix and fake blobs with boxes in the
// right places.
WERD_RES *InitializeWord(bool leading_space, const TBOX &line_box, int word_start, int word_end,
float space_certainty, const UNICHARSET *unicharset,
const std::vector<int> &xcoords, float scale_factor);
// Fills top_n_flags_ with bools that are true iff the corresponding output
// is one of the top_n.
void ComputeTopN(const float *outputs, int num_outputs, int top_n);
void ComputeSecTopN(std::unordered_set<int> *exList, const float *outputs, int num_outputs,
int top_n);
// Adds the computation for the current time-step to the beam. Call at each
// time-step in sequence from left to right. outputs is the activation vector
// for the current timestep.
void DecodeStep(const float *outputs, int t, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset, bool debug = false);
void DecodeSecondaryStep(const float *outputs, int t, double dict_ratio, double cert_offset,
double worst_dict_cert, const UNICHARSET *charset, bool debug = false);
// Saves the most certain choices for the current time-step.
void SaveMostCertainChoices(const float *outputs, int num_outputs, const UNICHARSET *charset,
int xCoord);
// Calculates more accurate character boundaries which can be used to
// provide more accurate alternative symbol choices.
static void calculateCharBoundaries(std::vector<int> *starts, std::vector<int> *ends,
std::vector<int> *character_boundaries_, int maxWidth);
// Adds to the appropriate beams the legal (according to recoder)
// continuations of context prev, which is from the given index to beams_,
// using the given network outputs to provide scores to the choices. Uses only
// those choices for which top_n_flags[code] == top_n_flag.
void ContinueContext(const RecodeNode *prev, int index, const float *outputs,
TopNState top_n_flag, const UNICHARSET *unicharset, double dict_ratio,
double cert_offset, double worst_dict_cert, RecodeBeam *step);
// Continues for a new unichar, using dawg or non-dawg as per flag.
void ContinueUnichar(int code, int unichar_id, float cert, float worst_dict_cert,
float dict_ratio, bool use_dawgs, NodeContinuation cont,
const RecodeNode *prev, RecodeBeam *step);
// Adds a RecodeNode composed of the args to the correct heap in step if
// unichar_id is a valid dictionary continuation of whatever is in prev.
void ContinueDawg(int code, int unichar_id, float cert, NodeContinuation cont,
const RecodeNode *prev, RecodeBeam *step);
// Sets the correct best_initial_dawgs_ with a RecodeNode composed of the args
// if better than what is already there.
void PushInitialDawgIfBetter(int code, int unichar_id, PermuterType permuter, bool start,
bool end, float cert, NodeContinuation cont, const RecodeNode *prev,
RecodeBeam *step);
// Adds a RecodeNode composed of the args to the correct heap in step for
// partial unichar or duplicate if there is room or if better than the
// current worst element if already full.
void PushDupOrNoDawgIfBetter(int length, bool dup, int code, int unichar_id, float cert,
float worst_dict_cert, float dict_ratio, bool use_dawgs,
NodeContinuation cont, const RecodeNode *prev, RecodeBeam *step);
// Adds a RecodeNode composed of the args to the correct heap in step if there
// is room or if better than the current worst element if already full.
void PushHeapIfBetter(int max_size, int code, int unichar_id, PermuterType permuter,
bool dawg_start, bool word_start, bool end, bool dup, float cert,
const RecodeNode *prev, DawgPositionVector *d, RecodeHeap *heap);
// Adds a RecodeNode to heap if there is room
// or if better than the current worst element if already full.
void PushHeapIfBetter(int max_size, RecodeNode *node, RecodeHeap *heap);
// Searches the heap for an entry matching new_node, and updates the entry
// with reshuffle if needed. Returns true if there was a match.
bool UpdateHeapIfMatched(RecodeNode *new_node, RecodeHeap *heap);
// Computes and returns the code-hash for the given code and prev.
uint64_t ComputeCodeHash(int code, bool dup, const RecodeNode *prev) const;
// Backtracks to extract the best path through the lattice that was built
// during Decode. On return the best_nodes vector essentially contains the set
// of code, score pairs that make the optimal path with the constraint that
// the recoder can decode the code sequence back to a sequence of unichar-ids.
void ExtractBestPaths(std::vector<const RecodeNode *> *best_nodes,
std::vector<const RecodeNode *> *second_nodes) const;
// Helper backtracks through the lattice from the given node, storing the
// path and reversing it.
void ExtractPath(const RecodeNode *node, std::vector<const RecodeNode *> *path) const;
void ExtractPath(const RecodeNode *node, std::vector<const RecodeNode *> *path,
int limiter) const;
// Helper prints debug information on the given lattice path.
void DebugPath(const UNICHARSET *unicharset, const std::vector<const RecodeNode *> &path) const;
// Helper prints debug information on the given unichar path.
void DebugUnicharPath(const UNICHARSET *unicharset, const std::vector<const RecodeNode *> &path,
const std::vector<int> &unichar_ids, const std::vector<float> &certs,
const std::vector<float> &ratings, const std::vector<int> &xcoords) const;
static const int kBeamWidths[RecodedCharID::kMaxCodeLen + 1];
// The encoder/decoder that we will be using.
const UnicharCompress &recoder_;
// The beam for each timestep in the output.
std::vector<RecodeBeam *> beam_;
// Secondary Beam for Results with less Probability
std::vector<RecodeBeam *> secondary_beam_;
// The number of timesteps valid in beam_;
int beam_size_;
// A flag to indicate which outputs are the top-n choices. Current timestep
// only.
std::vector<TopNState> top_n_flags_;
// A record of the highest and second scoring codes.
int top_code_;
int second_code_;
// Heap used to compute the top_n_flags_.
GenericHeap<TopPair> top_heap_;
// Borrowed pointer to the dictionary to use in the search.
Dict *dict_;
// True if the language is space-delimited, which is true for most languages
// except chi*, jpn, tha.
bool space_delimited_;
// True if the input is simple text, ie adjacent equal chars are not to be
// eliminated.
bool is_simple_text_;
// The encoded (class label) of the null/reject character.
int null_char_;
};
} // namespace tesseract.
#endif // THIRD_PARTY_TESSERACT_LSTM_RECODEBEAM_H_
|
2301_81045437/tesseract
|
src/lstm/recodebeam.h
|
C++
|
apache-2.0
| 22,082
|
///////////////////////////////////////////////////////////////////////
// File: reconfig.cpp
// Description: Network layer that reconfigures the scaling vs feature
// depth.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "reconfig.h"
namespace tesseract {
Reconfig::Reconfig(const std::string &name, int ni, int x_scale, int y_scale)
: Network(NT_RECONFIG, name, ni, ni * x_scale * y_scale)
, x_scale_(x_scale)
, y_scale_(y_scale) {}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape Reconfig::OutputShape(const StaticShape &input_shape) const {
StaticShape result = input_shape;
result.set_height(result.height() / y_scale_);
result.set_width(result.width() / x_scale_);
if (type_ != NT_MAXPOOL) {
result.set_depth(result.depth() * y_scale_ * x_scale_);
}
return result;
}
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int Reconfig::XScaleFactor() const {
return x_scale_;
}
// Writes to the given file. Returns false in case of error.
bool Reconfig::Serialize(TFile *fp) const {
return Network::Serialize(fp) && fp->Serialize(&x_scale_) && fp->Serialize(&y_scale_);
}
// Reads from the given file. Returns false in case of error.
bool Reconfig::DeSerialize(TFile *fp) {
if (!fp->DeSerialize(&x_scale_)) {
return false;
}
if (!fp->DeSerialize(&y_scale_)) {
return false;
}
no_ = ni_ * x_scale_ * y_scale_;
return true;
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void Reconfig::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
output->ResizeScaled(input, x_scale_, y_scale_, no_);
back_map_ = input.stride_map();
StrideMap::Index dest_index(output->stride_map());
do {
int out_t = dest_index.t();
StrideMap::Index src_index(input.stride_map(), dest_index.index(FD_BATCH),
dest_index.index(FD_HEIGHT) * y_scale_,
dest_index.index(FD_WIDTH) * x_scale_);
// Stack x_scale_ groups of y_scale_ inputs together.
for (int x = 0; x < x_scale_; ++x) {
for (int y = 0; y < y_scale_; ++y) {
StrideMap::Index src_xy(src_index);
if (src_xy.AddOffset(x, FD_WIDTH) && src_xy.AddOffset(y, FD_HEIGHT)) {
output->CopyTimeStepGeneral(out_t, (x * y_scale_ + y) * ni_, ni_, input, src_xy.t(), 0);
}
}
}
} while (dest_index.Increment());
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Reconfig::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
back_deltas->ResizeToMap(fwd_deltas.int_mode(), back_map_, ni_);
StrideMap::Index src_index(fwd_deltas.stride_map());
do {
int in_t = src_index.t();
StrideMap::Index dest_index(back_deltas->stride_map(), src_index.index(FD_BATCH),
src_index.index(FD_HEIGHT) * y_scale_,
src_index.index(FD_WIDTH) * x_scale_);
// Unstack x_scale_ groups of y_scale_ inputs that are together.
for (int x = 0; x < x_scale_; ++x) {
for (int y = 0; y < y_scale_; ++y) {
StrideMap::Index dest_xy(dest_index);
if (dest_xy.AddOffset(x, FD_WIDTH) && dest_xy.AddOffset(y, FD_HEIGHT)) {
back_deltas->CopyTimeStepGeneral(dest_xy.t(), 0, ni_, fwd_deltas, in_t,
(x * y_scale_ + y) * ni_);
}
}
}
} while (src_index.Increment());
return needs_to_backprop_;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/reconfig.cpp
|
C++
|
apache-2.0
| 4,794
|
///////////////////////////////////////////////////////////////////////
// File: reconfig.h
// Description: Network layer that reconfigures the scaling vs feature
// depth.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_RECONFIG_H_
#define TESSERACT_LSTM_RECONFIG_H_
#include "matrix.h"
#include "network.h"
namespace tesseract {
// Reconfigures (Shrinks) the inputs by concatenating an x_scale by y_scale tile
// of inputs together, producing a single, deeper output per tile.
// Note that fractional parts are truncated for efficiency, so make sure the
// input stride is a multiple of the y_scale factor!
class Reconfig : public Network {
public:
TESS_API
Reconfig(const std::string &name, int ni, int x_scale, int y_scale);
~Reconfig() override = default;
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override;
std::string spec() const override {
return "S" + std::to_string(y_scale_) + "," + std::to_string(x_scale_);
}
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int XScaleFactor() const override;
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
private:
void DebugWeights() override {
tprintf("Must override Network::DebugWeights for type %d\n", type_);
}
protected:
// Non-serialized data used to store parameters between forward and back.
StrideMap back_map_;
// Serialized data.
int32_t x_scale_;
int32_t y_scale_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_SUBSAMPLE_H_
|
2301_81045437/tesseract
|
src/lstm/reconfig.h
|
C++
|
apache-2.0
| 3,323
|
///////////////////////////////////////////////////////////////////////
// File: reversed.cpp
// Description: Runs a single network on time-reversed input, reversing output.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "reversed.h"
#include <cstdio>
#include "networkscratch.h"
namespace tesseract {
Reversed::Reversed(const std::string &name, NetworkType type) : Plumbing(name) {
type_ = type;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape Reversed::OutputShape(const StaticShape &input_shape) const {
if (type_ == NT_XYTRANSPOSE) {
StaticShape x_shape(input_shape);
x_shape.set_width(input_shape.height());
x_shape.set_height(input_shape.width());
x_shape = stack_[0]->OutputShape(x_shape);
x_shape.SetShape(x_shape.batch(), x_shape.width(), x_shape.height(), x_shape.depth());
return x_shape;
}
return stack_[0]->OutputShape(input_shape);
}
// Takes ownership of the given network to make it the reversed one.
void Reversed::SetNetwork(Network *network) {
stack_.clear();
AddToStack(network);
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void Reversed::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
NetworkScratch::IO rev_input(input, scratch);
ReverseData(input, rev_input);
NetworkScratch::IO rev_output(input, scratch);
stack_[0]->Forward(debug, *rev_input, nullptr, scratch, rev_output);
ReverseData(*rev_output, output);
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Reversed::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
NetworkScratch::IO rev_input(fwd_deltas, scratch);
ReverseData(fwd_deltas, rev_input);
NetworkScratch::IO rev_output(fwd_deltas, scratch);
if (stack_[0]->Backward(debug, *rev_input, scratch, rev_output)) {
ReverseData(*rev_output, back_deltas);
return true;
}
return false;
}
// Copies src to *dest with the reversal according to type_.
void Reversed::ReverseData(const NetworkIO &src, NetworkIO *dest) const {
if (type_ == NT_XREVERSED) {
dest->CopyWithXReversal(src);
} else if (type_ == NT_YREVERSED) {
dest->CopyWithYReversal(src);
} else {
dest->CopyWithXYTranspose(src);
}
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/reversed.cpp
|
C++
|
apache-2.0
| 3,215
|
///////////////////////////////////////////////////////////////////////
// File: reversed.h
// Description: Runs a single network on time-reversed input, reversing output.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_REVERSED_H_
#define TESSERACT_LSTM_REVERSED_H_
#include "matrix.h"
#include "plumbing.h"
namespace tesseract {
// C++ Implementation of the Reversed class from lstm.py.
class Reversed : public Plumbing {
public:
TESS_API
explicit Reversed(const std::string &name, NetworkType type);
~Reversed() override = default;
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override;
std::string spec() const override {
std::string spec(type_ == NT_XREVERSED ? "Rx" : (type_ == NT_YREVERSED ? "Ry" : "Txy"));
// For most simple cases, we will output Rx<net> or Ry<net> where <net> is
// the network in stack_[0], but in the special case that <net> is an
// LSTM, we will just output the LSTM's spec modified to take the reversal
// into account. This is because when the user specified Lfy64, we actually
// generated TxyLfx64, and if the user specified Lrx64 we actually
// generated RxLfx64, and we want to display what the user asked for.
std::string net_spec(stack_[0]->spec());
if (net_spec[0] == 'L') {
// Setup a from and to character according to the type of the reversal
// such that the LSTM spec gets modified to the spec that the user
// asked for
char from = 'f';
char to = 'r';
if (type_ == NT_XYTRANSPOSE) {
from = 'x';
to = 'y';
}
// Change the from char to the to char.
for (auto &it : net_spec) {
if (it == from) {
it = to;
}
}
spec += net_spec;
return spec;
}
spec += net_spec;
return spec;
}
// Takes ownership of the given network to make it the reversed one.
TESS_API
void SetNetwork(Network *network);
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
private:
// Copies src to *dest with the reversal according to type_.
void ReverseData(const NetworkIO &src, NetworkIO *dest) const;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_REVERSED_H_
|
2301_81045437/tesseract
|
src/lstm/reversed.h
|
C++
|
apache-2.0
| 3,460
|
///////////////////////////////////////////////////////////////////////
// File: series.cpp
// Description: Runs networks in series on the same input.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "series.h"
#include "fullyconnected.h"
#include "networkscratch.h"
#include "scrollview.h"
#include "tprintf.h"
namespace tesseract {
// ni_ and no_ will be set by AddToStack.
Series::Series(const std::string &name) : Plumbing(name) {
type_ = NT_SERIES;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape Series::OutputShape(const StaticShape &input_shape) const {
StaticShape result(input_shape);
int stack_size = stack_.size();
for (int i = 0; i < stack_size; ++i) {
result = stack_[i]->OutputShape(result);
}
return result;
}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
// Note that series has its own implementation just for debug purposes.
int Series::InitWeights(float range, TRand *randomizer) {
num_weights_ = 0;
tprintf("Num outputs,weights in Series:\n");
for (auto &i : stack_) {
int weights = i->InitWeights(range, randomizer);
tprintf(" %s:%d, %d\n", i->spec().c_str(), i->NumOutputs(), weights);
num_weights_ += weights;
}
tprintf("Total weights = %d\n", num_weights_);
return num_weights_;
}
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int Series::RemapOutputs(int old_no, const std::vector<int> &code_map) {
num_weights_ = 0;
tprintf("Num (Extended) outputs,weights in Series:\n");
for (auto &i : stack_) {
int weights = i->RemapOutputs(old_no, code_map);
tprintf(" %s:%d, %d\n", i->spec().c_str(), i->NumOutputs(), weights);
num_weights_ += weights;
}
tprintf("Total weights = %d\n", num_weights_);
no_ = stack_.back()->NumOutputs();
return num_weights_;
}
// Sets needs_to_backprop_ to needs_backprop and returns true if
// needs_backprop || any weights in this network so the next layer forward
// can be told to produce backprop for this layer if needed.
bool Series::SetupNeedsBackprop(bool needs_backprop) {
needs_to_backprop_ = needs_backprop;
for (auto &i : stack_) {
needs_backprop = i->SetupNeedsBackprop(needs_backprop);
}
return needs_backprop;
}
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int Series::XScaleFactor() const {
int factor = 1;
for (auto i : stack_) {
factor *= i->XScaleFactor();
}
return factor;
}
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
void Series::CacheXScaleFactor(int factor) {
stack_[0]->CacheXScaleFactor(factor);
}
// Runs forward propagation of activations on the input line.
// See NetworkCpp for a detailed discussion of the arguments.
void Series::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
int stack_size = stack_.size();
ASSERT_HOST(stack_size > 1);
// Revolving intermediate buffers.
NetworkScratch::IO buffer1(input, scratch);
NetworkScratch::IO buffer2(input, scratch);
// Run each network in turn, giving the output of n as the input to n + 1,
// with the final network providing the real output.
stack_[0]->Forward(debug, input, input_transpose, scratch, buffer1);
for (int i = 1; i < stack_size; i += 2) {
stack_[i]->Forward(debug, *buffer1, nullptr, scratch, i + 1 < stack_size ? buffer2 : output);
if (i + 1 == stack_size) {
return;
}
stack_[i + 1]->Forward(debug, *buffer2, nullptr, scratch,
i + 2 < stack_size ? buffer1 : output);
}
}
// Runs backward propagation of errors on the deltas line.
// See NetworkCpp for a detailed discussion of the arguments.
bool Series::Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) {
if (!IsTraining()) {
return false;
}
int stack_size = stack_.size();
ASSERT_HOST(stack_size > 1);
// Revolving intermediate buffers.
NetworkScratch::IO buffer1(fwd_deltas, scratch);
NetworkScratch::IO buffer2(fwd_deltas, scratch);
// Run each network in reverse order, giving the back_deltas output of n as
// the fwd_deltas input to n-1, with the 0 network providing the real output.
if (!stack_.back()->IsTraining() ||
!stack_.back()->Backward(debug, fwd_deltas, scratch, buffer1)) {
return false;
}
for (int i = stack_size - 2; i >= 0; i -= 2) {
if (!stack_[i]->IsTraining() ||
!stack_[i]->Backward(debug, *buffer1, scratch, i > 0 ? buffer2 : back_deltas)) {
return false;
}
if (i == 0) {
return needs_to_backprop_;
}
if (!stack_[i - 1]->IsTraining() ||
!stack_[i - 1]->Backward(debug, *buffer2, scratch, i > 1 ? buffer1 : back_deltas)) {
return false;
}
}
return needs_to_backprop_;
}
// Splits the series after the given index, returning the two parts and
// deletes itself. The first part, up to network with index last_start, goes
// into start, and the rest goes into end.
void Series::SplitAt(unsigned last_start, Series **start, Series **end) {
*start = nullptr;
*end = nullptr;
if (last_start >= stack_.size()) {
tprintf("Invalid split index %u must be in range [0,%zu]!\n", last_start, stack_.size() - 1);
return;
}
auto *master_series = new Series("MasterSeries");
auto *boosted_series = new Series("BoostedSeries");
for (unsigned s = 0; s <= last_start; ++s) {
if (s + 1 == stack_.size() && stack_[s]->type() == NT_SOFTMAX) {
// Change the softmax to a tanh.
auto *fc = static_cast<FullyConnected *>(stack_[s]);
fc->ChangeType(NT_TANH);
}
master_series->AddToStack(stack_[s]);
stack_[s] = nullptr;
}
for (unsigned s = last_start + 1; s < stack_.size(); ++s) {
boosted_series->AddToStack(stack_[s]);
stack_[s] = nullptr;
}
*start = master_series;
*end = boosted_series;
delete this;
}
// Appends the elements of the src series to this, removing from src and
// deleting it.
void Series::AppendSeries(Network *src) {
ASSERT_HOST(src->type() == NT_SERIES);
auto *src_series = static_cast<Series *>(src);
for (auto &s : src_series->stack_) {
AddToStack(s);
s = nullptr;
}
delete src;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/series.cpp
|
C++
|
apache-2.0
| 7,560
|
///////////////////////////////////////////////////////////////////////
// File: series.h
// Description: Runs networks in series on the same input.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_SERIES_H_
#define TESSERACT_LSTM_SERIES_H_
#include "plumbing.h"
namespace tesseract {
// Runs two or more networks in series (layers) on the same input.
class Series : public Plumbing {
public:
// ni_ and no_ will be set by AddToStack.
TESS_API
explicit Series(const std::string &name);
~Series() override = default;
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override;
std::string spec() const override {
std::string spec("[");
for (auto &it : stack_) {
spec += it->spec();
}
spec += "]";
return spec;
}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
// Returns the number of weights initialized.
int InitWeights(float range, TRand *randomizer) override;
// Recursively searches the network for softmaxes with old_no outputs,
// and remaps their outputs according to code_map. See network.h for details.
int RemapOutputs(int old_no, const std::vector<int> &code_map) override;
// Sets needs_to_backprop_ to needs_backprop and returns true if
// needs_backprop || any weights in this network so the next layer forward
// can be told to produce backprop for this layer if needed.
bool SetupNeedsBackprop(bool needs_backprop) override;
// Returns an integer reduction factor that the network applies to the
// time sequence. Assumes that any 2-d is already eliminated. Used for
// scaling bounding boxes of truth data.
// WARNING: if GlobalMinimax is used to vary the scale, this will return
// the last used scale factor. Call it before any forward, and it will return
// the minimum scale factor of the paths through the GlobalMinimax.
int XScaleFactor() const override;
// Provides the (minimum) x scale factor to the network (of interest only to
// input units) so they can determine how to scale bounding boxes.
void CacheXScaleFactor(int factor) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override;
// Splits the series after the given index, returning the two parts and
// deletes itself. The first part, up to network with index last_start, goes
// into start, and the rest goes into end.
TESS_API
void SplitAt(unsigned last_start, Series **start, Series **end);
// Appends the elements of the src series to this, removing from src and
// deleting it.
TESS_API
void AppendSeries(Network *src);
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_SERIES_H_
|
2301_81045437/tesseract
|
src/lstm/series.h
|
C++
|
apache-2.0
| 3,977
|
///////////////////////////////////////////////////////////////////////
// File: static_shape.h
// Description: Defines the size of the 4-d tensor input/output from a network.
// Author: Ray Smith
// Created: Fri Oct 14 09:07:31 PST 2016
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_STATIC_SHAPE_H_
#define TESSERACT_LSTM_STATIC_SHAPE_H_
#include "serialis.h" // for TFile
#include "tprintf.h" // for tprintf
namespace tesseract {
// Enum describing the loss function to apply during training and/or the
// decoding method to apply at runtime.
enum LossType {
LT_NONE, // Undefined.
LT_CTC, // Softmax with standard CTC for training/decoding.
LT_SOFTMAX, // Outputs sum to 1 in fixed positions.
LT_LOGISTIC, // Logistic outputs with independent values.
};
// Simple class to hold the tensor shape that is known at network build time
// and the LossType of the loss function.
class StaticShape {
public:
StaticShape() : batch_(0), height_(0), width_(0), depth_(0), loss_type_(LT_NONE) {}
int batch() const {
return batch_;
}
void set_batch(int value) {
batch_ = value;
}
int height() const {
return height_;
}
void set_height(int value) {
height_ = value;
}
int width() const {
return width_;
}
void set_width(int value) {
width_ = value;
}
int depth() const {
return depth_;
}
void set_depth(int value) {
depth_ = value;
}
LossType loss_type() const {
return loss_type_;
}
void set_loss_type(LossType value) {
loss_type_ = value;
}
void SetShape(int batch, int height, int width, int depth) {
batch_ = batch;
height_ = height;
width_ = width;
depth_ = depth;
}
void Print() const {
tprintf("Batch=%d, Height=%d, Width=%d, Depth=%d, loss=%d\n", batch_, height_, width_, depth_,
loss_type_);
}
bool DeSerialize(TFile *fp) {
int32_t tmp = LT_NONE;
bool result = fp->DeSerialize(&batch_) && fp->DeSerialize(&height_) &&
fp->DeSerialize(&width_) && fp->DeSerialize(&depth_) && fp->DeSerialize(&tmp);
loss_type_ = static_cast<LossType>(tmp);
return result;
}
bool Serialize(TFile *fp) const {
int32_t tmp = loss_type_;
return fp->Serialize(&batch_) && fp->Serialize(&height_) && fp->Serialize(&width_) &&
fp->Serialize(&depth_) && fp->Serialize(&tmp);
}
private:
// Size of the 4-D tensor input/output to a network. A value of zero is
// allowed for all except depth_ and means to be determined at runtime, and
// regarded as variable.
// Number of elements in a batch, or number of frames in a video stream.
int32_t batch_;
// Height of the image.
int32_t height_;
// Width of the image.
int32_t width_;
// Depth of the image. (Number of "nodes").
int32_t depth_;
// How to train/interpret the output.
LossType loss_type_;
};
} // namespace tesseract
#endif // TESSERACT_LSTM_STATIC_SHAPE_H_
|
2301_81045437/tesseract
|
src/lstm/static_shape.h
|
C++
|
apache-2.0
| 3,575
|
///////////////////////////////////////////////////////////////////////
// File: stridemap.cpp
// Description: Indexing into a 4-d tensor held in a 2-d Array.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "stridemap.h"
#include <cassert> // for assert
namespace tesseract {
// Returns true if *this is a valid index.
bool StrideMap::Index::IsValid() const {
// Cheap check first.
for (int index : indices_) {
if (index < 0) {
return false;
}
}
for (int d = 0; d < FD_DIMSIZE; ++d) {
if (indices_[d] > MaxIndexOfDim(static_cast<FlexDimensions>(d))) {
return false;
}
}
return true;
}
// Returns true if the index of the given dimension is the last.
bool StrideMap::Index::IsLast(FlexDimensions dimension) const {
return MaxIndexOfDim(dimension) == indices_[dimension];
}
// Given that the dimensions up to and including dim-1 are valid, returns the
// maximum index for dimension dim.
int StrideMap::Index::MaxIndexOfDim(FlexDimensions dim) const {
int max_index = stride_map_->shape_[dim] - 1;
if (dim == FD_BATCH) {
return max_index;
}
assert(0 <= indices_[FD_BATCH]);
const size_t batch = indices_[FD_BATCH];
if (dim == FD_HEIGHT) {
if (batch >= stride_map_->heights_.size() || stride_map_->heights_[batch] > max_index) {
return max_index;
}
return stride_map_->heights_[batch] - 1;
}
if (batch >= stride_map_->widths_.size() || stride_map_->widths_[batch] > max_index) {
return max_index;
}
return stride_map_->widths_[batch] - 1;
}
// Adds the given offset to the given dimension. Returns true if the result
// makes a valid index.
bool StrideMap::Index::AddOffset(int offset, FlexDimensions dimension) {
indices_[dimension] += offset;
SetTFromIndices();
return IsValid();
}
// Increments the index in some encapsulated way that guarantees to remain
// valid until it returns false, meaning that the iteration is complete.
bool StrideMap::Index::Increment() {
for (int d = FD_DIMSIZE - 1; d >= 0; --d) {
if (!IsLast(static_cast<FlexDimensions>(d))) {
t_ += stride_map_->t_increments_[d];
++indices_[d];
return true;
}
t_ -= stride_map_->t_increments_[d] * indices_[d];
indices_[d] = 0;
// Now carry to the next dimension.
}
return false;
}
// Decrements the index in some encapsulated way that guarantees to remain
// valid until it returns false, meaning that the iteration (that started
// with InitToLast()) is complete.
bool StrideMap::Index::Decrement() {
for (int d = FD_DIMSIZE - 1; d >= 0; --d) {
if (indices_[d] > 0) {
--indices_[d];
if (d == FD_BATCH) {
// The upper limits of the other dimensions may have changed as a result
// of a different batch index, so they have to be reset.
InitToLastOfBatch(indices_[FD_BATCH]);
} else {
t_ -= stride_map_->t_increments_[d];
}
return true;
}
indices_[d] = MaxIndexOfDim(static_cast<FlexDimensions>(d));
t_ += stride_map_->t_increments_[d] * indices_[d];
// Now borrow from the next dimension.
}
return false;
}
// Initializes the indices to the last valid location in the given batch
// index.
void StrideMap::Index::InitToLastOfBatch(int batch) {
indices_[FD_BATCH] = batch;
for (int d = FD_BATCH + 1; d < FD_DIMSIZE; ++d) {
indices_[d] = MaxIndexOfDim(static_cast<FlexDimensions>(d));
}
SetTFromIndices();
}
// Computes and sets t_ from the current indices_.
void StrideMap::Index::SetTFromIndices() {
t_ = 0;
for (int d = 0; d < FD_DIMSIZE; ++d) {
t_ += stride_map_->t_increments_[d] * indices_[d];
}
}
// Sets up the stride for the given array of height, width pairs.
void StrideMap::SetStride(const std::vector<std::pair<int, int>> &h_w_pairs) {
int max_height = 0;
int max_width = 0;
for (const std::pair<int, int> &hw : h_w_pairs) {
int height = hw.first;
int width = hw.second;
heights_.push_back(height);
widths_.push_back(width);
if (height > max_height) {
max_height = height;
}
if (width > max_width) {
max_width = width;
}
}
shape_[FD_BATCH] = heights_.size();
shape_[FD_HEIGHT] = max_height;
shape_[FD_WIDTH] = max_width;
ComputeTIncrements();
}
// Scales width and height dimensions by the given factors.
void StrideMap::ScaleXY(int x_factor, int y_factor) {
for (int &height : heights_) {
height /= y_factor;
}
for (int &width : widths_) {
width /= x_factor;
}
shape_[FD_HEIGHT] /= y_factor;
shape_[FD_WIDTH] /= x_factor;
ComputeTIncrements();
}
// Reduces width to 1, across the batch, whatever the input size.
void StrideMap::ReduceWidthTo1() {
widths_.assign(widths_.size(), 1);
shape_[FD_WIDTH] = 1;
ComputeTIncrements();
}
// Transposes the width and height dimensions.
void StrideMap::TransposeXY() {
std::swap(shape_[FD_HEIGHT], shape_[FD_WIDTH]);
std::swap(heights_, widths_);
ComputeTIncrements();
}
// Computes t_increments_ from shape_.
void StrideMap::ComputeTIncrements() {
t_increments_[FD_DIMSIZE - 1] = 1;
for (int d = FD_DIMSIZE - 2; d >= 0; --d) {
t_increments_[d] = t_increments_[d + 1] * shape_[d + 1];
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/lstm/stridemap.cpp
|
C++
|
apache-2.0
| 5,845
|
///////////////////////////////////////////////////////////////////////
// File: stridemap.h
// Description: Indexing into a 4-d tensor held in a 2-d Array.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_STRIDEMAP_H_
#define TESSERACT_LSTM_STRIDEMAP_H_
#include <cstring>
#include <vector>
namespace tesseract {
// Enum describing the dimensions of the 'Tensor' in a NetworkIO.
// A NetworkIO is analogous to a TF Tensor, except that the number of dimensions
// is fixed (4), and they always have the same meaning. The underlying
// representation is a 2-D array, for which the product batch*height*width
// is always dim1 and depth is always dim2. FlexDimensions is used only for
// batch, height, width with the StrideMap, and therefore represents the runtime
// shape. The build-time shape is defined by StaticShape.
enum FlexDimensions {
FD_BATCH, // Index of multiple images.
FD_HEIGHT, // y-coordinate in image.
FD_WIDTH, // x-coordinate in image.
FD_DIMSIZE, // Number of flexible non-depth dimensions.
};
// Encapsulation of information relating to the mapping from [batch][y][x] to
// the first index into the 2-d array underlying a NetworkIO.
class StrideMap {
public:
// Class holding the non-depth indices.
class Index {
public:
explicit Index(const StrideMap &stride_map) : stride_map_(&stride_map) {
InitToFirst();
}
Index(const StrideMap &stride_map, int batch, int y, int x) : stride_map_(&stride_map) {
indices_[FD_BATCH] = batch;
indices_[FD_HEIGHT] = y;
indices_[FD_WIDTH] = x;
SetTFromIndices();
}
// Accesses the index to the underlying array.
int t() const {
return t_;
}
int index(FlexDimensions dimension) const {
return indices_[dimension];
}
// Initializes the indices to the first valid location.
void InitToFirst() {
memset(indices_, 0, sizeof(indices_));
t_ = 0;
}
// Initializes the indices to the last valid location.
void InitToLast() {
InitToLastOfBatch(MaxIndexOfDim(FD_BATCH));
}
// Returns true if *this is a valid index.
bool IsValid() const;
// Returns true if the index of the given dimension is the last.
bool IsLast(FlexDimensions dimension) const;
// Given that the dimensions up to and including dim-1 are valid, returns
// the maximum index for dimension dim.
int MaxIndexOfDim(FlexDimensions dim) const;
// Adds the given offset to the given dimension. Returns true if the result
// makes a valid index.
bool AddOffset(int offset, FlexDimensions dimension);
// Increments the index in some encapsulated way that guarantees to remain
// valid until it returns false, meaning that the iteration is complete.
bool Increment();
// Decrements the index in some encapsulated way that guarantees to remain
// valid until it returns false, meaning that the iteration (that started
// with InitToLast()) is complete.
bool Decrement();
private:
// Initializes the indices to the last valid location in the given batch
// index.
void InitToLastOfBatch(int batch);
// Computes and sets t_ from the current indices_.
void SetTFromIndices();
// Map into which *this is an index.
const StrideMap *stride_map_;
// Index to the first dimension of the underlying array.
int t_;
// Indices into the individual dimensions.
int indices_[FD_DIMSIZE];
};
StrideMap() {
memset(shape_, 0, sizeof(shape_));
memset(t_increments_, 0, sizeof(t_increments_));
}
// Default copy constructor and operator= are OK to use here!
// Sets up the stride for the given array of height, width pairs.
void SetStride(const std::vector<std::pair<int, int>> &h_w_pairs);
// Scales width and height dimensions by the given factors.
void ScaleXY(int x_factor, int y_factor);
// Reduces width to 1, across the batch, whatever the input size.
void ReduceWidthTo1();
// Transposes the width and height dimensions.
void TransposeXY();
// Returns the size of the given dimension.
int Size(FlexDimensions dimension) const {
return shape_[dimension];
}
// Returns the total width required.
int Width() const {
return t_increments_[FD_BATCH] * shape_[FD_BATCH];
}
private:
// Computes t_increments_ from shape_.
void ComputeTIncrements();
// The size of each non-depth dimension.
int shape_[FD_DIMSIZE];
// Precomputed 't' increments for each dimension. This is the value of
// the given dimension in the packed 3-d array that the shape_ represents.
int t_increments_[FD_DIMSIZE];
// Vector of size shape_[FD_BATCH] holds the height of each image in a batch.
std::vector<int> heights_;
// Vector of size shape_[FD_BATCH] holds the width of each image in a batch.
std::vector<int> widths_;
};
} // namespace tesseract
#endif // TESSERACT_LSTM_STRIDEMAP_H_
|
2301_81045437/tesseract
|
src/lstm/stridemap.h
|
C++
|
apache-2.0
| 5,549
|
///////////////////////////////////////////////////////////////////////
// File: tfnetwork.cpp
// Description: Encapsulation of an entire tensorflow graph as a
// Tesseract Network.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifdef INCLUDE_TENSORFLOW
# include "tfnetwork.h"
# include <allheaders.h>
# include "input.h"
# include "networkscratch.h"
using tensorflow::Status;
using tensorflow::Tensor;
using tensorflow::TensorShape;
namespace tesseract {
TFNetwork::TFNetwork(const std::string &name) : Network(NT_TENSORFLOW, name, 0, 0) {}
int TFNetwork::InitFromProtoStr(const std::string &proto_str) {
if (!model_proto_.ParseFromString(proto_str))
return 0;
return InitFromProto();
}
// Writes to the given file. Returns false in case of error.
// Should be overridden by subclasses, but called by their Serialize.
bool TFNetwork::Serialize(TFile *fp) const {
if (!Network::Serialize(fp))
return false;
std::string proto_str;
model_proto_.SerializeToString(&proto_str);
// TODO: optimize and avoid copy from proto_str to data.
std::vector<char> data(proto_str.size());
memcpy(&data[0], proto_str.data(), proto_str.size());
return fp->Serialize(data);
}
// Reads from the given file. Returns false in case of error.
// Should be overridden by subclasses, but NOT called by their DeSerialize.
bool TFNetwork::DeSerialize(TFile *fp) {
std::vector<char> data;
if (!fp->DeSerialize(data))
return false;
if (!model_proto_.ParseFromArray(&data[0], data.size())) {
return false;
}
return InitFromProto();
}
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void TFNetwork::Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) {
std::vector<std::pair<std::string, Tensor>> tf_inputs;
int depth = input_shape_.depth();
ASSERT_HOST(depth == input.NumFeatures());
// TODO(rays) Allow batching. For now batch_size = 1.
const StrideMap &stride_map = input.stride_map();
// TF requires a tensor of shape float[batch, height, width, depth].
TensorShape shape{1, stride_map.Size(FD_HEIGHT), stride_map.Size(FD_WIDTH), depth};
Tensor input_tensor(tensorflow::DT_FLOAT, shape);
// The flat() member gives a 1d array, with a data() member to get the data.
auto eigen_tensor = input_tensor.flat<float>();
memcpy(eigen_tensor.data(), input.f(0), input.Width() * depth * sizeof(input.f(0)[0]));
// Add the tensor to the vector of inputs.
tf_inputs.emplace_back(model_proto_.image_input(), input_tensor);
// Provide tensors giving the width and/or height of the image if they are
// required. Some tf ops require a separate tensor with knowledge of the
// size of the input as they cannot obtain it from the input tensor. This is
// usually true in the case of ops that process a batch of variable-sized
// objects.
if (!model_proto_.image_widths().empty()) {
TensorShape size_shape{1};
Tensor width_tensor(tensorflow::DT_INT64, size_shape);
auto eigen_wtensor = width_tensor.flat<tensorflow::int64>();
*eigen_wtensor.data() = stride_map.Size(FD_WIDTH);
tf_inputs.emplace_back(model_proto_.image_widths(), width_tensor);
}
if (!model_proto_.image_heights().empty()) {
TensorShape size_shape{1};
Tensor height_tensor(tensorflow::DT_INT64, size_shape);
auto eigen_htensor = height_tensor.flat<tensorflow::int64>();
*eigen_htensor.data() = stride_map.Size(FD_HEIGHT);
tf_inputs.emplace_back(model_proto_.image_heights(), height_tensor);
}
std::vector<std::string> target_layers = {model_proto_.output_layer()};
std::vector<Tensor> outputs;
Status s = session_->Run(tf_inputs, target_layers, {}, &outputs);
if (!s.ok())
tprintf("session->Run failed:%s\n", s.error_message().c_str());
ASSERT_HOST(s.ok());
ASSERT_HOST(outputs.size() == 1);
const Tensor &output_tensor = outputs[0];
// Check the dimensions of the output.
ASSERT_HOST(output_tensor.shape().dims() == 3);
int output_batch = output_tensor.shape().dim_size(0);
int output_steps = output_tensor.shape().dim_size(1);
int output_depth = output_tensor.shape().dim_size(2);
ASSERT_HOST(output_batch == 1);
ASSERT_HOST(output_depth == output_shape_.depth());
output->Resize2d(false, output_steps, output_depth);
auto eigen_output = output_tensor.flat<float>();
memcpy(output->f(0), eigen_output.data(), output_steps * output_depth * sizeof(output->f(0)[0]));
}
int TFNetwork::InitFromProto() {
spec_ = model_proto_.spec();
input_shape_.SetShape(model_proto_.batch_size(), std::max(0, model_proto_.y_size()),
std::max(0, model_proto_.x_size()), model_proto_.depth());
output_shape_.SetShape(model_proto_.batch_size(), 1, 0, model_proto_.num_classes());
output_shape_.set_loss_type(model_proto_.using_ctc() ? LT_CTC : LT_SOFTMAX);
ni_ = input_shape_.height();
no_ = output_shape_.depth();
// Initialize the session_ with the graph. Since we can't get the graph
// back from the session_, we have to keep the proto as well
tensorflow::SessionOptions options;
session_.reset(NewSession(options));
Status s = session_->Create(model_proto_.graph());
if (s.ok())
return model_proto_.global_step();
tprintf("Session_->Create returned '%s'\n", s.error_message().c_str());
return 0;
}
} // namespace tesseract
#endif // ifdef INCLUDE_TENSORFLOW
|
2301_81045437/tesseract
|
src/lstm/tfnetwork.cpp
|
C++
|
apache-2.0
| 6,131
|
///////////////////////////////////////////////////////////////////////
// File: tfnetwork.h
// Description: Encapsulation of an entire tensorflow graph as a
// Tesseract Network.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_TFNETWORK_H_
#define TESSERACT_LSTM_TFNETWORK_H_
#ifdef INCLUDE_TENSORFLOW
# include <memory>
# include <string>
# include "network.h"
# include "static_shape.h"
# include "tensorflow/core/framework/graph.pb.h"
# include "tensorflow/core/public/session.h"
# include "tfnetwork.pb.h"
namespace tesseract {
class TFNetwork : public Network {
public:
explicit TFNetwork(const std::string &name);
virtual ~TFNetwork() = default;
// Returns the required shape input to the network.
StaticShape InputShape() const override {
return input_shape_;
}
// Returns the shape output from the network given an input shape (which may
// be partially unknown ie zero).
StaticShape OutputShape(const StaticShape &input_shape) const override {
return output_shape_;
}
std::string spec() const override {
return spec_;
}
// Deserializes *this from a serialized TFNetwork proto. Returns 0 if failed,
// otherwise the global step of the serialized graph.
int InitFromProtoStr(const std::string &proto_str);
// The number of classes in this network should be equal to those in the
// recoder_ in LSTMRecognizer.
int num_classes() const {
return output_shape_.depth();
}
// Writes to the given file. Returns false in case of error.
// Should be overridden by subclasses, but called by their Serialize.
bool Serialize(TFile *fp) const override;
// Reads from the given file. Returns false in case of error.
// Should be overridden by subclasses, but NOT called by their DeSerialize.
bool DeSerialize(TFile *fp) override;
// Runs forward propagation of activations on the input line.
// See Network for a detailed discussion of the arguments.
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose,
NetworkScratch *scratch, NetworkIO *output) override;
private:
// Runs backward propagation of errors on the deltas line.
// See Network for a detailed discussion of the arguments.
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch,
NetworkIO *back_deltas) override {
tprintf("Must override Network::Backward for type %d\n", type_);
return false;
}
void DebugWeights() override {
tprintf("Must override Network::DebugWeights for type %d\n", type_);
}
int InitFromProto();
// The original network definition for reference.
std::string spec_;
// Input tensor parameters.
StaticShape input_shape_;
// Output tensor parameters.
StaticShape output_shape_;
// The tensor flow graph is contained in here.
std::unique_ptr<tensorflow::Session> session_;
// The serialized graph is also contained in here.
TFNetworkModel model_proto_;
};
} // namespace tesseract.
#endif // ifdef INCLUDE_TENSORFLOW
#endif // TESSERACT_TENSORFLOW_TFNETWORK_H_
|
2301_81045437/tesseract
|
src/lstm/tfnetwork.h
|
C++
|
apache-2.0
| 3,744
|
///////////////////////////////////////////////////////////////////////
// File: weightmatrix.cpp
// Description: Hides distinction between float/int implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "weightmatrix.h"
#include <cassert> // for assert
#include "intsimdmatrix.h"
#include "simddetect.h" // for DotProduct
#include "statistc.h"
#include "tprintf.h" // forTFloat
namespace tesseract {
#if defined(ANDROID)
static inline TFloat log2(TFloat n) {
return log(n) / log(2.0);
}
#endif // ANDROID
// Number of iterations after which the correction effectively becomes unity.
const int kAdamCorrectionIterations = 200000;
// Epsilon in Adam to prevent division by zero.
const TFloat kAdamEpsilon = 1e-8;
// Utility functions convert between double and float arrays.
#ifdef FAST_FLOAT
static void DoubleToFloat(const GENERIC_2D_ARRAY<double> &src, GENERIC_2D_ARRAY<float> &dst) {
const auto dim1 = src.dim1();
const auto dim2 = src.dim2();
dst.ResizeNoInit(dim1, dim2);
for (int i = 0; i < dim1; ++i) {
const auto *src_i = src[i];
auto *dst_i = dst[i];
for (int j = 0; j < dim2; ++j) {
dst_i[j] = static_cast<float>(src_i[j]);
}
}
}
#endif
static void FloatToDouble(const GENERIC_2D_ARRAY<float> &src, GENERIC_2D_ARRAY<double> &dst) {
const auto dim1 = src.dim1();
const auto dim2 = src.dim2();
dst.ResizeNoInit(dim1, dim2);
for (int i = 0; i < dim1; ++i) {
const auto *src_i = src[i];
auto *dst_i = dst[i];
for (int j = 0; j < dim2; ++j) {
dst_i[j] = static_cast<double>(src_i[j]);
}
}
}
static bool DeSerialize(TFile *fp, GENERIC_2D_ARRAY<TFloat> &tfloat_array) {
#ifdef FAST_FLOAT
GENERIC_2D_ARRAY<double> double_array;
if (!double_array.DeSerialize(fp)) {
return false;
}
DoubleToFloat(double_array, tfloat_array);
return true;
#else
return tfloat_array.DeSerialize(fp);
#endif
}
static bool Serialize(TFile *fp, const GENERIC_2D_ARRAY<TFloat> &tfloat_array) {
#ifdef FAST_FLOAT
GENERIC_2D_ARRAY<double> double_array;
FloatToDouble(tfloat_array, double_array);
return double_array.Serialize(fp);
#else
return tfloat_array.Serialize(fp);
#endif
}
// Computes matrix.vector v = Wu.
// u is of size W.dim2() - add_bias_fwd and the output v is of size
// W.dim1() - skip_bias_back.
// If add_bias_fwd, u is imagined to have an extra element at the end with value
// 1, to implement the bias, weight.
// If skip_bias_back, we are actually performing the backwards product on a
// transposed matrix, so we need to drop the v output corresponding to the last
// element in dim1.
static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY<TFloat> &w, bool add_bias_fwd,
bool skip_bias_back, const TFloat *u, TFloat *v) {
int num_results = w.dim1() - skip_bias_back;
int extent = w.dim2() - add_bias_fwd;
for (int i = 0; i < num_results; ++i) {
const TFloat *wi = w[i];
TFloat total = DotProduct(wi, u, extent);
if (add_bias_fwd) {
total += wi[extent]; // The bias value.
}
v[i] = total;
}
}
// Copies the whole input transposed, converted to TFloat, into *this.
void TransposedArray::Transpose(const GENERIC_2D_ARRAY<TFloat> &input) {
int width = input.dim1();
int num_features = input.dim2();
ResizeNoInit(num_features, width);
for (int t = 0; t < width; ++t) {
WriteStrided(t, input[t]);
}
}
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
TransposedArray::~TransposedArray() = default;
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
int WeightMatrix::InitWeightsFloat(int no, int ni, bool use_adam, float weight_range,
TRand *randomizer) {
int_mode_ = false;
wf_.Resize(no, ni, 0.0);
if (randomizer != nullptr) {
for (int i = 0; i < no; ++i) {
for (int j = 0; j < ni; ++j) {
wf_[i][j] = randomizer->SignedRand(weight_range);
}
}
}
use_adam_ = use_adam;
InitBackward();
return ni * no;
}
// Changes the number of outputs to the size of the given code_map, copying
// the old weight matrix entries for each output from code_map[output] where
// non-negative, and uses the mean (over all outputs) of the existing weights
// for all outputs with negative code_map entries. Returns the new number of
// weights.
int WeightMatrix::RemapOutputs(const std::vector<int> &code_map) {
GENERIC_2D_ARRAY<TFloat> old_wf(wf_);
int old_no = wf_.dim1();
int new_no = code_map.size();
int ni = wf_.dim2();
std::vector<TFloat> means(ni, 0.0);
for (int c = 0; c < old_no; ++c) {
const TFloat *weights = wf_[c];
for (int i = 0; i < ni; ++i) {
means[i] += weights[i];
}
}
for (auto &mean : means) {
mean /= old_no;
}
wf_.Resize(new_no, ni, 0.0);
InitBackward();
for (int dest = 0; dest < new_no; ++dest) {
int src = code_map[dest];
const TFloat *src_data = src >= 0 ? old_wf[src] : means.data();
memcpy(wf_[dest], src_data, ni * sizeof(*src_data));
}
return ni * new_no;
}
// Converts a float network to an int network. Each set of input weights that
// corresponds to a single output weight is converted independently:
// Compute the max absolute value of the weight set.
// Scale so the max absolute value becomes INT8_MAX.
// Round to integer.
// Store a multiplicative scale factor (as a TFloat) that will reproduce
// the original value, subject to rounding errors.
void WeightMatrix::ConvertToInt() {
wi_.ResizeNoInit(wf_.dim1(), wf_.dim2());
scales_.reserve(wi_.dim1());
int dim2 = wi_.dim2();
for (int t = 0; t < wi_.dim1(); ++t) {
TFloat *f_line = wf_[t];
int8_t *i_line = wi_[t];
TFloat max_abs = 0;
for (int f = 0; f < dim2; ++f) {
TFloat abs_val = fabs(f_line[f]);
if (abs_val > max_abs) {
max_abs = abs_val;
}
}
TFloat scale = max_abs / INT8_MAX;
scales_.push_back(scale / INT8_MAX);
if (scale == 0.0) {
scale = 1.0;
}
for (int f = 0; f < dim2; ++f) {
i_line[f] = IntCastRounded(f_line[f] / scale);
}
}
wf_.Resize(1, 1, 0.0);
int_mode_ = true;
if (IntSimdMatrix::intSimdMatrix) {
int32_t rounded_num_out;
IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_, rounded_num_out);
scales_.resize(rounded_num_out);
}
}
// Allocates any needed memory for running Backward, and zeroes the deltas,
// thus eliminating any existing momentum.
void WeightMatrix::InitBackward() {
int no = int_mode_ ? wi_.dim1() : wf_.dim1();
int ni = int_mode_ ? wi_.dim2() : wf_.dim2();
dw_.Resize(no, ni, 0.0);
updates_.Resize(no, ni, 0.0);
wf_t_.Transpose(wf_);
if (use_adam_) {
dw_sq_sum_.Resize(no, ni, 0.0);
}
}
// Flag on mode to indicate that this weightmatrix uses int8_t.
const int kInt8Flag = 1;
// Flag on mode to indicate that this weightmatrix uses adam.
const int kAdamFlag = 4;
// Flag on mode to indicate that this weightmatrix uses double. Set
// independently of kInt8Flag as even in int mode the scales can
// be float or double.
const int kDoubleFlag = 128;
// Writes to the given file. Returns false in case of error.
bool WeightMatrix::Serialize(bool training, TFile *fp) const {
// For backward compatibility, add kDoubleFlag to mode to indicate the doubles
// format, without errs, so we can detect and read old format weight matrices.
uint8_t mode = (int_mode_ ? kInt8Flag : 0) | (use_adam_ ? kAdamFlag : 0) | kDoubleFlag;
if (!fp->Serialize(&mode)) {
return false;
}
if (int_mode_) {
if (!wi_.Serialize(fp)) {
return false;
}
uint32_t size = scales_.size();
if (!fp->Serialize(&size)) {
return false;
}
for (auto scale : scales_) {
// The scales stored in memory have an extra factor applied to them
// to allow faster operation. We have to remove that factor here
// before writing to disc.
double value = scale * INT8_MAX;
if (!fp->Serialize(&value)) {
return false;
}
}
} else {
if (!tesseract::Serialize(fp, wf_)) {
return false;
}
if (training) {
if (!tesseract::Serialize(fp, updates_)) {
return false;
}
if (use_adam_ && !tesseract::Serialize(fp, dw_sq_sum_)) {
return false;
}
}
}
return true;
}
// Reads from the given file. Returns false in case of error.
bool WeightMatrix::DeSerialize(bool training, TFile *fp) {
uint8_t mode;
if (!fp->DeSerialize(&mode)) {
return false;
}
int_mode_ = (mode & kInt8Flag) != 0;
use_adam_ = (mode & kAdamFlag) != 0;
if ((mode & kDoubleFlag) == 0) {
return DeSerializeOld(training, fp);
}
if (int_mode_) {
if (!wi_.DeSerialize(fp)) {
return false;
}
uint32_t size;
if (!fp->DeSerialize(&size)) {
return false;
}
#ifdef FAST_FLOAT
scales_.reserve(size);
for (auto n = size; n > 0; n--) {
double val;
if (!fp->DeSerialize(&val)) {
return false;
}
scales_.push_back(val / INT8_MAX);
}
#else
scales_.resize(size);
if (!fp->DeSerialize(&scales_[0], size)) {
return false;
}
for (auto &scale : scales_) {
scale /= INT8_MAX;
}
#endif
if (IntSimdMatrix::intSimdMatrix) {
int32_t rounded_num_out;
IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_, rounded_num_out);
scales_.resize(rounded_num_out);
}
} else {
if (!tesseract::DeSerialize(fp, wf_)) {
return false;
}
if (training) {
InitBackward();
if (!tesseract::DeSerialize(fp, updates_)) {
return false;
}
if (use_adam_) {
if (!tesseract::DeSerialize(fp, dw_sq_sum_)) {
return false;
}
}
}
}
return true;
}
// As DeSerialize, but reads an old (float) format WeightMatrix for
// backward compatibility.
bool WeightMatrix::DeSerializeOld(bool training, TFile *fp) {
#ifdef FAST_FLOAT
// Not implemented.
ASSERT_HOST(!"not implemented");
return false;
#else
if (int_mode_) {
if (!wi_.DeSerialize(fp)) {
return false;
}
std::vector<float> old_scales;
if (!fp->DeSerialize(old_scales)) {
return false;
}
scales_.reserve(old_scales.size());
for (float old_scale : old_scales) {
scales_.push_back(old_scale);
}
} else {
GENERIC_2D_ARRAY<float> float_array;
if (!float_array.DeSerialize(fp)) {
return false;
}
FloatToDouble(float_array, wf_);
}
if (training) {
InitBackward();
GENERIC_2D_ARRAY<float> float_array;
if (!float_array.DeSerialize(fp)) {
return false;
}
FloatToDouble(float_array, updates_);
// Errs was only used in int training, which is now dead.
if (!float_array.DeSerialize(fp)) {
return false;
}
}
return true;
#endif
}
// Computes matrix.vector v = Wu.
// u is of size W.dim2() - 1 and the output v is of size W.dim1().
// u is imagined to have an extra element at the end with value 1, to
// implement the bias, but it doesn't actually have it.
// Asserts that the call matches what we have.
void WeightMatrix::MatrixDotVector(const TFloat *u, TFloat *v) const {
assert(!int_mode_);
MatrixDotVectorInternal(wf_, true, false, u, v);
}
void WeightMatrix::MatrixDotVector(const int8_t *u, TFloat *v) const {
assert(int_mode_);
if (IntSimdMatrix::intSimdMatrix) {
IntSimdMatrix::intSimdMatrix->matrixDotVectorFunction(wi_.dim1(), wi_.dim2(), &shaped_w_[0],
&scales_[0], u, v);
} else {
IntSimdMatrix::MatrixDotVector(wi_, scales_, u, v);
}
}
// MatrixDotVector for peep weights, MultiplyAccumulate adds the
// component-wise products of *this[0] and v to inout.
void WeightMatrix::MultiplyAccumulate(const TFloat *v, TFloat *inout) {
assert(!int_mode_);
assert(wf_.dim1() == 1);
int n = wf_.dim2();
const TFloat *u = wf_[0];
for (int i = 0; i < n; ++i) {
inout[i] += u[i] * v[i];
}
}
// Computes vector.matrix v = uW.
// u is of size W.dim1() and the output v is of size W.dim2() - 1.
// The last result is discarded, as v is assumed to have an imaginary
// last value of 1, as with MatrixDotVector.
void WeightMatrix::VectorDotMatrix(const TFloat *u, TFloat *v) const {
assert(!int_mode_);
MatrixDotVectorInternal(wf_t_, false, true, u, v);
}
// Fills dw_[i][j] with the dot product u[i][] . v[j][], using elements from
// u and v. In terms of the neural network, u is the gradients and v is the
// inputs.
// Note that (matching MatrixDotVector) v[last][] is missing, presumed 1.0.
// Runs parallel if requested. Note that u and v must be transposed.
void WeightMatrix::SumOuterTransposed(const TransposedArray &u, const TransposedArray &v,
bool in_parallel) {
assert(!int_mode_);
int num_outputs = dw_.dim1();
assert(u.dim1() == num_outputs);
assert(u.dim2() == v.dim2());
int num_inputs = dw_.dim2() - 1;
int num_samples = u.dim2();
// v is missing the last element in dim1.
assert(v.dim1() == num_inputs);
#ifdef _OPENMP
# pragma omp parallel for num_threads(4) if (in_parallel)
#endif
for (int i = 0; i < num_outputs; ++i) {
TFloat *dwi = dw_[i];
const TFloat *ui = u[i];
for (int j = 0; j < num_inputs; ++j) {
dwi[j] = DotProduct(ui, v[j], num_samples);
}
// The last element of v is missing, presumed 1.0f.
TFloat total = 0;
for (int k = 0; k < num_samples; ++k) {
total += ui[k];
}
dwi[num_inputs] = total;
}
}
// Updates the weights using the given learning rate and momentum.
// num_samples is the quotient to be used in the adam computation iff
// use_adam_ is true.
void WeightMatrix::Update(float learning_rate, float momentum, float adam_beta, int num_samples) {
assert(!int_mode_);
if (use_adam_ && momentum > 0.0f && num_samples > 0 && num_samples < kAdamCorrectionIterations) {
learning_rate *= sqrt(1.0f - pow(adam_beta, num_samples));
learning_rate /= 1.0f - pow(momentum, num_samples);
}
if (use_adam_ && num_samples > 0 && momentum > 0.0f) {
dw_sq_sum_.SumSquares(dw_, adam_beta);
dw_ *= learning_rate * (1.0f - momentum);
updates_ *= momentum;
updates_ += dw_;
wf_.AdamUpdate(updates_, dw_sq_sum_, learning_rate * kAdamEpsilon);
} else {
dw_ *= learning_rate;
updates_ += dw_;
if (momentum > 0.0f) {
wf_ += updates_;
}
if (momentum >= 0.0f) {
updates_ *= momentum;
}
}
wf_t_.Transpose(wf_);
}
// Adds the dw_ in other to the dw_ is *this.
void WeightMatrix::AddDeltas(const WeightMatrix &other) {
assert(dw_.dim1() == other.dw_.dim1());
assert(dw_.dim2() == other.dw_.dim2());
dw_ += other.dw_;
}
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void WeightMatrix::CountAlternators(const WeightMatrix &other, TFloat *same,
TFloat *changed) const {
int num_outputs = updates_.dim1();
int num_inputs = updates_.dim2();
assert(num_outputs == other.updates_.dim1());
assert(num_inputs == other.updates_.dim2());
for (int i = 0; i < num_outputs; ++i) {
const TFloat *this_i = updates_[i];
const TFloat *other_i = other.updates_[i];
for (int j = 0; j < num_inputs; ++j) {
TFloat product = this_i[j] * other_i[j];
if (product < 0.0) {
*changed -= product;
} else {
*same += product;
}
}
}
}
// Helper computes an integer histogram bucket for a weight and adds it
// to the histogram.
const int kHistogramBuckets = 16;
static void HistogramWeight(TFloat weight, STATS *histogram) {
int bucket = kHistogramBuckets - 1;
if (weight != 0.0) {
TFloat logval = -log2(fabs(weight));
bucket = ClipToRange(IntCastRounded(logval), 0, kHistogramBuckets - 1);
}
histogram->add(bucket, 1);
}
void WeightMatrix::Debug2D(const char *msg) {
STATS histogram(0, kHistogramBuckets - 1);
if (int_mode_) {
for (int i = 0; i < wi_.dim1(); ++i) {
for (int j = 0; j < wi_.dim2(); ++j) {
HistogramWeight(wi_[i][j] * scales_[i], &histogram);
}
}
} else {
for (int i = 0; i < wf_.dim1(); ++i) {
for (int j = 0; j < wf_.dim2(); ++j) {
HistogramWeight(wf_[i][j], &histogram);
}
}
}
tprintf("%s\n", msg);
histogram.print();
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/lstm/weightmatrix.cpp
|
C++
|
apache-2.0
| 17,214
|
///////////////////////////////////////////////////////////////////////
// File: weightmatrix.h
// Description: Hides distinction between float/int implementations.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_WEIGHTMATRIX_H_
#define TESSERACT_LSTM_WEIGHTMATRIX_H_
#include <memory>
#include <vector>
#include "intsimdmatrix.h"
#include "matrix.h"
#include "tesstypes.h"
#include "tprintf.h"
namespace tesseract {
// Convenience instantiation of GENERIC_2D_ARRAY<TFloat> with additional
// operations to write a strided vector, so the transposed form of the input
// is memory-contiguous.
class TransposedArray : public GENERIC_2D_ARRAY<TFloat> {
public:
// Copies the whole input transposed, converted to TFloat, into *this.
void Transpose(const GENERIC_2D_ARRAY<TFloat> &input);
// Writes a vector of data representing a timestep (gradients or sources).
// The data is assumed to be of size1 in size (the strided dimension).
~TransposedArray() override;
void WriteStrided(int t, const float *data) {
int size1 = dim1();
for (int i = 0; i < size1; ++i) {
put(i, t, data[i]);
}
}
void WriteStrided(int t, const double *data) {
int size1 = dim1();
for (int i = 0; i < size1; ++i) {
put(i, t, data[i]);
}
}
// Prints the first and last num elements of the un-transposed array.
void PrintUnTransposed(int num) {
int num_features = dim1();
int width = dim2();
for (int y = 0; y < num_features; ++y) {
for (int t = 0; t < width; ++t) {
if (num == 0 || t < num || t + num >= width) {
tprintf(" %g", static_cast<double>((*this)(y, t)));
}
}
tprintf("\n");
}
}
}; // class TransposedArray
// Generic weight matrix for network layers. Can store the matrix as either
// an array of floats or int8_t. Provides functions to compute the forward and
// backward steps with the matrix and updates to the weights.
class WeightMatrix {
public:
WeightMatrix() : int_mode_(false), use_adam_(false) {}
// Sets up the network for training. Initializes weights using weights of
// scale `range` picked according to the random number generator `randomizer`.
// Note the order is outputs, inputs, as this is the order of indices to
// the matrix, so the adjacent elements are multiplied by the input during
// a forward operation.
int InitWeightsFloat(int no, int ni, bool use_adam, float weight_range, TRand *randomizer);
// Changes the number of outputs to the size of the given code_map, copying
// the old weight matrix entries for each output from code_map[output] where
// non-negative, and uses the mean (over all outputs) of the existing weights
// for all outputs with negative code_map entries. Returns the new number of
// weights.
int RemapOutputs(const std::vector<int> &code_map);
// Converts a float network to an int network. Each set of input weights that
// corresponds to a single output weight is converted independently:
// Compute the max absolute value of the weight set.
// Scale so the max absolute value becomes INT8_MAX.
// Round to integer.
// Store a multiplicative scale factor (as a float) that will reproduce
// the original value, subject to rounding errors.
void ConvertToInt();
// Returns the size rounded up to an internal factor used by the SIMD
// implementation for its input.
int RoundInputs(int size) const {
if (!int_mode_ || !IntSimdMatrix::intSimdMatrix) {
return size;
}
return IntSimdMatrix::intSimdMatrix->RoundInputs(size);
}
// Accessors.
bool is_int_mode() const {
return int_mode_;
}
int NumOutputs() const {
return int_mode_ ? wi_.dim1() : wf_.dim1();
}
// Provides one set of weights. Only used by peep weight maxpool.
const TFloat *GetWeights(int index) const {
return wf_[index];
}
// Provides access to the deltas (dw_).
TFloat GetDW(int i, int j) const {
return dw_(i, j);
}
// Allocates any needed memory for running Backward, and zeroes the deltas,
// thus eliminating any existing momentum.
void InitBackward();
// Writes to the given file. Returns false in case of error.
bool Serialize(bool training, TFile *fp) const;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(bool training, TFile *fp);
// As DeSerialize, but reads an old (float) format WeightMatrix for
// backward compatibility.
bool DeSerializeOld(bool training, TFile *fp);
// Computes matrix.vector v = Wu.
// u is of size W.dim2() - 1 and the output v is of size W.dim1().
// u is imagined to have an extra element at the end with value 1, to
// implement the bias, but it doesn't actually have it.
// Asserts that the call matches what we have.
void MatrixDotVector(const TFloat *u, TFloat *v) const;
void MatrixDotVector(const int8_t *u, TFloat *v) const;
// MatrixDotVector for peep weights, MultiplyAccumulate adds the
// component-wise products of *this[0] and v to inout.
void MultiplyAccumulate(const TFloat *v, TFloat *inout);
// Computes vector.matrix v = uW.
// u is of size W.dim1() and the output v is of size W.dim2() - 1.
// The last result is discarded, as v is assumed to have an imaginary
// last value of 1, as with MatrixDotVector.
void VectorDotMatrix(const TFloat *u, TFloat *v) const;
// Fills dw_[i][j] with the dot product u[i][] . v[j][], using elements
// from u and v, starting with u[i][offset] and v[j][offset].
// Note that (matching MatrixDotVector) v[last][] is missing, presumed 1.0.
// Runs parallel if requested. Note that inputs must be transposed.
void SumOuterTransposed(const TransposedArray &u, const TransposedArray &v, bool parallel);
// Updates the weights using the given learning rate, momentum and adam_beta.
// num_samples is used in the Adam correction factor.
void Update(float learning_rate, float momentum, float adam_beta, int num_samples);
// Adds the dw_ in other to the dw_ is *this.
void AddDeltas(const WeightMatrix &other);
// Sums the products of weight updates in *this and other, splitting into
// positive (same direction) in *same and negative (different direction) in
// *changed.
void CountAlternators(const WeightMatrix &other, TFloat *same, TFloat *changed) const;
void Debug2D(const char *msg);
private:
// Choice between float and 8 bit int implementations.
GENERIC_2D_ARRAY<TFloat> wf_;
GENERIC_2D_ARRAY<int8_t> wi_;
// Transposed copy of wf_, used only for Backward, and set with each Update.
TransposedArray wf_t_;
// Which of wf_ and wi_ are we actually using.
bool int_mode_;
// True if we are running adam in this weight matrix.
bool use_adam_;
// If we are using wi_, then scales_ is a factor to restore the row product
// with a vector to the correct range.
std::vector<TFloat> scales_;
// Weight deltas. dw_ is the new delta, and updates_ the momentum-decaying
// amount to be added to wf_/wi_.
GENERIC_2D_ARRAY<TFloat> dw_;
GENERIC_2D_ARRAY<TFloat> updates_;
// Iff use_adam_, the sum of squares of dw_. The number of samples is
// given to Update(). Serialized iff use_adam_.
GENERIC_2D_ARRAY<TFloat> dw_sq_sum_;
// The weights matrix reorganized in whatever way suits this instance.
std::vector<int8_t> shaped_w_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_WEIGHTMATRIX_H_
|
2301_81045437/tesseract
|
src/lstm/weightmatrix.h
|
C++
|
apache-2.0
| 8,026
|
// Copyright 2007 Google Inc. All Rights Reserved.
//
// Author: Joern Wanke
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Simple drawing program to illustrate ScrollView capabilities.
//
// Functionality:
// - The menubar is used to select from different sample styles of input.
// - With the RMB it is possible to change the RGB values in different
// popup menus.
// - A LMB click either draws point-to-point, point or text.
// - A LMB dragging either draws a line, a rectangle or ellipse.
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
# include "scrollview.h"
# include "svmnode.h"
# include <cstdlib>
# include <iostream>
namespace tesseract {
// The current color values we use, initially white (== ScrollView::WHITE).
static int rgb[3] = {255, 255, 255};
class SVPaint : public SVEventHandler {
public:
explicit SVPaint(const char *server_name);
// This is the main event handling function that we need to overwrite, defined
// in SVEventHandler.
void Notify(const SVEvent *sv_event) override;
private:
// The Handler take care of the SVET_POPUP, SVET_MENU, SVET_CLICK and
// SVET_SELECTION events.
void PopupHandler(const SVEvent *sv_event);
void MenuBarHandler(const SVEvent *sv_event);
void ClickHandler(const SVEvent *sv_event);
void SelectionHandler(const SVEvent *sv_event);
// Convenience functions to build little menus.
SVMenuNode *BuildPopupMenu();
SVMenuNode *BuildMenuBar();
// Our window.
ScrollView *window_;
// The mode we are in when an SVET_CLICK or an SVET_SELECTION event occurs.
int click_mode_;
int drag_mode_;
// In the point-to-point drawing mode, we need to set a start-point the first
// time we call it (e.g. call SetCursor).
bool has_start_point_;
};
// Build a sample popup menu.
SVMenuNode *SVPaint::BuildPopupMenu() {
auto *root = new SVMenuNode(); // Empty root node
// Initial color is white, so we all values to 255.
root->AddChild("R", // Shown caption.
1, // assoc. command_id.
"255", // initial value.
"Red Color Value?"); // Shown description.
root->AddChild("G", 2, "255", "Green Color Value?");
root->AddChild("B", 3, "255", "Blue Color Value?");
return root;
}
// Build a sample menu bar.
SVMenuNode *SVPaint::BuildMenuBar() {
auto *root = new SVMenuNode(); // Empty root node
// Create some submenus and add them to the root.
SVMenuNode *click = root->AddChild("Clicking");
SVMenuNode *drag = root->AddChild("Dragging");
// Put some nodes into the submenus.
click->AddChild("Point to Point Drawing", // Caption.
1); // command_id.
click->AddChild("Point Drawing", 2);
click->AddChild("Text Drawing", 3);
drag->AddChild("Line Drawing", 4);
drag->AddChild("Rectangle Drawing", 5);
drag->AddChild("Ellipse Drawing", 6);
return root;
}
// Takes care of the SVET_POPUP events.
// In our case, SVET_POPUP is used to set RGB values.
void SVPaint::PopupHandler(const SVEvent *sv_event) {
// Since we only have the RGB values as popup items,
// we take a shortcut to not bloat up code:
rgb[sv_event->command_id - 1] = atoi(sv_event->parameter);
window_->Pen(rgb[0], rgb[1], rgb[2]);
}
// Takes care of the SVET_MENU events.
// In our case, we change either the click_mode_ (commands 1-3)
// or the drag_mode_ (commands 4-6).
void SVPaint::MenuBarHandler(const SVEvent *sv_event) {
if ((sv_event->command_id > 0) && (sv_event->command_id < 4)) {
click_mode_ = sv_event->command_id;
has_start_point_ = false;
} else {
drag_mode_ = sv_event->command_id;
}
}
// Takes care of the SVET_CLICK events.
// Depending on the click_mode_ we are in, either do Point-to-Point drawing,
// point drawing, or draw text.
void SVPaint::ClickHandler(const SVEvent *sv_event) {
switch (click_mode_) {
case 1: // Point to Point
if (has_start_point_) {
window_->DrawTo(sv_event->x, sv_event->y);
} else {
has_start_point_ = true;
window_->SetCursor(sv_event->x, sv_event->y);
}
break;
case 2: // Point Drawing..simulated by drawing a 1 pixel line.
window_->Line(sv_event->x, sv_event->y, sv_event->x, sv_event->y);
break;
case 3: // Text
// We show a modal input dialog on our window, then draw the input and
// finally delete the input pointer.
char *p = window_->ShowInputDialog("Text:");
window_->Text(sv_event->x, sv_event->y, p);
delete[] p;
break;
}
}
// Takes care of the SVET_SELECTION events.
// Depending on the drag_mode_ we are in, either draw a line, a rectangle or
// an ellipse.
void SVPaint::SelectionHandler(const SVEvent *sv_event) {
switch (drag_mode_) {
// FIXME inversed x_size, y_size
case 4: // Line
window_->Line(sv_event->x, sv_event->y, sv_event->x - sv_event->x_size,
sv_event->y - sv_event->y_size);
break;
case 5: // Rectangle
window_->Rectangle(sv_event->x, sv_event->y, sv_event->x - sv_event->x_size,
sv_event->y - sv_event->y_size);
break;
case 6: // Ellipse
window_->Ellipse(sv_event->x - sv_event->x_size, sv_event->y - sv_event->y_size,
sv_event->x_size, sv_event->y_size);
break;
}
}
// The event handling function from ScrollView which we have to overwrite.
// We handle CLICK, SELECTION, MENU and POPUP and throw away all other events.
void SVPaint::Notify(const SVEvent *sv_event) {
if (sv_event->type == SVET_CLICK) {
ClickHandler(sv_event);
} else if (sv_event->type == SVET_SELECTION) {
SelectionHandler(sv_event);
} else if (sv_event->type == SVET_MENU) {
MenuBarHandler(sv_event);
} else if (sv_event->type == SVET_POPUP) {
PopupHandler(sv_event);
}
// throw other events away
}
// Builds a new window, initializes the variables and event handler and builds
// the menu.
SVPaint::SVPaint(const char *server_name) {
window_ = new ScrollView("ScrollView Paint Example", // window caption
0, 0, // x,y window position
500, 500, // window size
500, 500, // canvas size
false, // whether the Y axis is inversed.
// this is included due to legacy
// reasons for tesseract and enables
// us to have (0,0) as the LOWER left
// of the coordinate system.
server_name); // the server address.
// Set the start modes to point-to-point and line drawing.
click_mode_ = 1;
drag_mode_ = 4;
has_start_point_ = false;
// Bild our menus and add them to the window. The flag illustrates whether
// this is a menu bar.
SVMenuNode *popup_menu = BuildPopupMenu();
popup_menu->BuildMenu(window_, false);
SVMenuNode *bar_menu = BuildMenuBar();
bar_menu->BuildMenu(window_, true);
// Set the initial color values to White (could also be done by
// passing (rgb[0], rgb[1], rgb[2]).
window_->Pen(ScrollView::WHITE);
window_->Brush(ScrollView::WHITE);
// Adds the event handler to the window. This actually ensures that Notify
// gets called when events occur.
window_->AddEventHandler(this);
// Set the window visible (calling this is important to actually render
// everything. Without this call, the window would also be drawn, but the
// menu bars would be missing.
window_->SetVisible(true);
// Rest this thread until its window is destroyed.
// Note that a special eventhandling thread was created when constructing
// the window. Due to this, the application will not deadlock here.
window_->AwaitEvent(SVET_DESTROY);
// We now have 3 Threads running:
// (1) The MessageReceiver thread which fetches messages and distributes them
// (2) The EventHandler thread which handles all events for window_
// (3) The main thread which waits on window_ for a DESTROY event (blocked)
}
} // namespace tesseract
// If a parameter is given, we try to connect to the given server.
// This enables us to test the remote capabilities of ScrollView.
int main(int argc, char **argv) {
const char *server_name;
if (argc > 1) {
server_name = argv[1];
} else {
server_name = "localhost";
}
tesseract::SVPaint svp(server_name);
}
#endif // !GRAPHICS_DISABLED
|
2301_81045437/tesseract
|
src/svpaint.cpp
|
C++
|
apache-2.0
| 9,267
|
/**********************************************************************
* File: tesseract.cpp
* Description: Main program for merge of tess and editor.
* Author: Ray Smith
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include <cerrno> // for errno
#if defined(__USE_GNU)
# include <cfenv> // for feenableexcept
#endif
#include <climits> // for INT_MIN, INT_MAX
#include <cstdlib> // for std::getenv
#include <iostream>
#include <map> // for std::map
#include <memory> // std::unique_ptr
#include <allheaders.h>
#include <tesseract/baseapi.h>
#include "dict.h"
#include <tesseract/renderer.h>
#include "simddetect.h"
#include "tesseractclass.h" // for AnyTessLang
#include "tprintf.h" // for tprintf
#ifdef _OPENMP
# include <omp.h>
#endif
#if defined(HAVE_LIBARCHIVE)
# include <archive.h>
#endif
#if defined(HAVE_LIBCURL)
# include <curl/curl.h>
#endif
#if defined(_WIN32)
# include <fcntl.h>
# include <io.h>
# if defined(HAVE_TIFFIO_H)
# include <tiffio.h>
static void Win32ErrorHandler(const char *module, const char *fmt, va_list ap) {
if (module != nullptr) {
fprintf(stderr, "%s: ", module);
}
vfprintf(stderr, fmt, ap);
fprintf(stderr, ".\n");
}
static void Win32WarningHandler(const char *module, const char *fmt, va_list ap) {
if (module != nullptr) {
fprintf(stderr, "%s: ", module);
}
fprintf(stderr, "Warning, ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, ".\n");
}
# endif /* HAVE_TIFFIO_H */
class AutoWin32ConsoleOutputCP {
public:
explicit AutoWin32ConsoleOutputCP(UINT codeCP) {
oldCP_ = GetConsoleOutputCP();
SetConsoleOutputCP(codeCP);
}
~AutoWin32ConsoleOutputCP() {
SetConsoleOutputCP(oldCP_);
}
private:
UINT oldCP_;
};
static AutoWin32ConsoleOutputCP autoWin32ConsoleOutputCP(CP_UTF8);
#endif // _WIN32
using namespace tesseract;
static void PrintVersionInfo() {
char *versionStrP;
printf("tesseract %s\n", tesseract::TessBaseAPI::Version());
versionStrP = getLeptonicaVersion();
printf(" %s\n", versionStrP);
lept_free(versionStrP);
versionStrP = getImagelibVersions();
printf(" %s\n", versionStrP);
lept_free(versionStrP);
#if defined(HAVE_NEON) || defined(__aarch64__)
if (tesseract::SIMDDetect::IsNEONAvailable())
printf(" Found NEON\n");
#else
if (tesseract::SIMDDetect::IsAVX512BWAvailable()) {
printf(" Found AVX512BW\n");
}
if (tesseract::SIMDDetect::IsAVX512FAvailable()) {
printf(" Found AVX512F\n");
}
if (tesseract::SIMDDetect::IsAVX512VNNIAvailable()) {
printf(" Found AVX512VNNI\n");
}
if (tesseract::SIMDDetect::IsAVX2Available()) {
printf(" Found AVX2\n");
}
if (tesseract::SIMDDetect::IsAVXAvailable()) {
printf(" Found AVX\n");
}
if (tesseract::SIMDDetect::IsFMAAvailable()) {
printf(" Found FMA\n");
}
if (tesseract::SIMDDetect::IsSSEAvailable()) {
printf(" Found SSE4.1\n");
}
#endif
#ifdef _OPENMP
printf(" Found OpenMP %d\n", _OPENMP);
#endif
#if defined(HAVE_LIBARCHIVE)
# if ARCHIVE_VERSION_NUMBER >= 3002000
printf(" Found %s\n", archive_version_details());
# else
printf(" Found %s\n", archive_version_string());
# endif // ARCHIVE_VERSION_NUMBER
#endif // HAVE_LIBARCHIVE
#if defined(HAVE_LIBCURL)
printf(" Found %s\n", curl_version());
#endif
}
static void PrintHelpForPSM() {
const char *msg =
"Page segmentation modes:\n"
" 0 Orientation and script detection (OSD) only.\n"
" 1 Automatic page segmentation with OSD.\n"
" 2 Automatic page segmentation, but no OSD, or OCR. (not "
"implemented)\n"
" 3 Fully automatic page segmentation, but no OSD. (Default)\n"
" 4 Assume a single column of text of variable sizes.\n"
" 5 Assume a single uniform block of vertically aligned text.\n"
" 6 Assume a single uniform block of text.\n"
" 7 Treat the image as a single text line.\n"
" 8 Treat the image as a single word.\n"
" 9 Treat the image as a single word in a circle.\n"
" 10 Treat the image as a single character.\n"
" 11 Sparse text. Find as much text as possible in no"
" particular order.\n"
" 12 Sparse text with OSD.\n"
" 13 Raw line. Treat the image as a single text line,\n"
" bypassing hacks that are Tesseract-specific.\n";
#ifdef DISABLED_LEGACY_ENGINE
const char *disabled_osd_msg = "\nNOTE: The OSD modes are currently disabled.\n";
printf("%s%s", msg, disabled_osd_msg);
#else
printf("%s", msg);
#endif
}
#ifndef DISABLED_LEGACY_ENGINE
static void PrintHelpForOEM() {
const char *msg =
"OCR Engine modes:\n"
" 0 Legacy engine only.\n"
" 1 Neural nets LSTM engine only.\n"
" 2 Legacy + LSTM engines.\n"
" 3 Default, based on what is available.\n";
printf("%s", msg);
}
#endif // ndef DISABLED_LEGACY_ENGINE
static void PrintHelpExtra(const char *program) {
printf(
"Usage:\n"
" %s --help | --help-extra | --help-psm | "
#ifndef DISABLED_LEGACY_ENGINE
"--help-oem | "
#endif
"--version\n"
" %s --list-langs [--tessdata-dir PATH]\n"
#ifndef DISABLED_LEGACY_ENGINE
" %s --print-fonts-table [options...] [configfile...]\n"
#endif // ndef DISABLED_LEGACY_ENGINE
" %s --print-parameters [options...] [configfile...]\n"
" %s imagename|imagelist|stdin outputbase|stdout [options...] "
"[configfile...]\n"
"\n"
"OCR options:\n"
" --tessdata-dir PATH Specify the location of tessdata path.\n"
" --user-words PATH Specify the location of user words file.\n"
" --user-patterns PATH Specify the location of user patterns file.\n"
" --dpi VALUE Specify DPI for input image.\n"
" --loglevel LEVEL Specify logging level. LEVEL can be\n"
" ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL or OFF.\n"
" -l LANG[+LANG] Specify language(s) used for OCR.\n"
" -c VAR=VALUE Set value for config variables.\n"
" Multiple -c arguments are allowed.\n"
" --psm NUM Specify page segmentation mode.\n"
#ifndef DISABLED_LEGACY_ENGINE
" --oem NUM Specify OCR Engine mode.\n"
#endif
"NOTE: These options must occur before any configfile.\n"
"\n",
program, program, program, program
#ifndef DISABLED_LEGACY_ENGINE
, program
#endif // ndef DISABLED_LEGACY_ENGINE
);
PrintHelpForPSM();
#ifndef DISABLED_LEGACY_ENGINE
printf("\n");
PrintHelpForOEM();
#endif
printf(
"\n"
"Single options:\n"
" -h, --help Show minimal help message.\n"
" --help-extra Show extra help for advanced users.\n"
" --help-psm Show page segmentation modes.\n"
#ifndef DISABLED_LEGACY_ENGINE
" --help-oem Show OCR Engine modes.\n"
#endif
" -v, --version Show version information.\n"
" --list-langs List available languages for tesseract engine.\n"
#ifndef DISABLED_LEGACY_ENGINE
" --print-fonts-table Print tesseract fonts table.\n"
#endif // ndef DISABLED_LEGACY_ENGINE
" --print-parameters Print tesseract parameters.\n");
}
static void PrintHelpMessage(const char *program) {
printf(
"Usage:\n"
" %s --help | --help-extra | --version\n"
" %s --list-langs\n"
" %s imagename outputbase [options...] [configfile...]\n"
"\n"
"OCR options:\n"
" -l LANG[+LANG] Specify language(s) used for OCR.\n"
"NOTE: These options must occur before any configfile.\n"
"\n"
"Single options:\n"
" --help Show this help message.\n"
" --help-extra Show extra help for advanced users.\n"
" --version Show version information.\n"
" --list-langs List available languages for tesseract "
"engine.\n",
program, program, program);
}
static bool SetVariablesFromCLArgs(tesseract::TessBaseAPI &api, int argc, char **argv) {
bool success = true;
char opt1[256], opt2[255];
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i], "-c") == 0 && i + 1 < argc) {
strncpy(opt1, argv[i + 1], 255);
opt1[255] = '\0';
char *p = strchr(opt1, '=');
if (!p) {
fprintf(stderr, "Missing = in configvar assignment\n");
success = false;
break;
}
*p = 0;
strncpy(opt2, strchr(argv[i + 1], '=') + 1, sizeof(opt2) - 1);
opt2[254] = 0;
++i;
if (!api.SetVariable(opt1, opt2)) {
fprintf(stderr, "Could not set option: %s=%s\n", opt1, opt2);
}
}
}
return success;
}
static void PrintLangsList(tesseract::TessBaseAPI &api) {
std::vector<std::string> languages;
api.GetAvailableLanguagesAsVector(&languages);
printf("List of available languages in \"%s\" (%zu):\n",
api.GetDatapath(), languages.size());
for (const auto &language : languages) {
printf("%s\n", language.c_str());
}
api.End();
}
/**
* We have 2 possible sources of pagesegmode: a config file and
* the command line. For backwards compatibility reasons, the
* default in tesseract is tesseract::PSM_SINGLE_BLOCK, but the
* default for this program is tesseract::PSM_AUTO. We will let
* the config file take priority, so the command-line default
* can take priority over the tesseract default, so we use the
* value from the command line only if the retrieved mode
* is still tesseract::PSM_SINGLE_BLOCK, indicating no change
* in any config file. Therefore the only way to force
* tesseract::PSM_SINGLE_BLOCK is from the command line.
* It would be simpler if we could set the value before Init,
* but that doesn't work.
*/
static void FixPageSegMode(tesseract::TessBaseAPI &api, tesseract::PageSegMode pagesegmode) {
if (api.GetPageSegMode() == tesseract::PSM_SINGLE_BLOCK) {
api.SetPageSegMode(pagesegmode);
}
}
static bool checkArgValues(int arg, const char *mode, int count) {
if (arg >= count || arg < 0) {
printf("Invalid %s value, please enter a number between 0-%d\n", mode, count - 1);
return false;
}
return true;
}
// NOTE: arg_i is used here to avoid ugly *i so many times in this function
static bool ParseArgs(int argc, char **argv, const char **lang, const char **image,
const char **outputbase, const char **datapath, l_int32 *dpi,
bool *list_langs, bool *print_parameters, bool *print_fonts_table,
std::vector<std::string> *vars_vec, std::vector<std::string> *vars_values,
l_int32 *arg_i, tesseract::PageSegMode *pagesegmode,
tesseract::OcrEngineMode *enginemode) {
bool noocr = false;
int i;
for (i = 1; i < argc && (*outputbase == nullptr || argv[i][0] == '-'); i++) {
if (*image != nullptr && *outputbase == nullptr) {
// outputbase follows image, don't allow options at that position.
*outputbase = argv[i];
} else if ((strcmp(argv[i], "-h") == 0) || (strcmp(argv[i], "--help") == 0)) {
PrintHelpMessage(argv[0]);
noocr = true;
} else if (strcmp(argv[i], "--help-extra") == 0) {
PrintHelpExtra(argv[0]);
noocr = true;
} else if ((strcmp(argv[i], "--help-psm") == 0)) {
PrintHelpForPSM();
noocr = true;
#ifndef DISABLED_LEGACY_ENGINE
} else if ((strcmp(argv[i], "--help-oem") == 0)) {
PrintHelpForOEM();
noocr = true;
#endif
} else if ((strcmp(argv[i], "-v") == 0) || (strcmp(argv[i], "--version") == 0)) {
PrintVersionInfo();
noocr = true;
} else if (strcmp(argv[i], "-l") == 0 && i + 1 < argc) {
*lang = argv[i + 1];
++i;
} else if (strcmp(argv[i], "--tessdata-dir") == 0 && i + 1 < argc) {
*datapath = argv[i + 1];
++i;
} else if (strcmp(argv[i], "--dpi") == 0 && i + 1 < argc) {
*dpi = atoi(argv[i + 1]);
++i;
} else if (strcmp(argv[i], "--loglevel") == 0 && i + 1 < argc) {
// Allow the log levels which are used by log4cxx.
const std::string loglevel_string = argv[++i];
static const std::map<const std::string, int> loglevels {
{"ALL", INT_MIN},
{"TRACE", 5000},
{"DEBUG", 10000},
{"INFO", 20000},
{"WARN", 30000},
{"ERROR", 40000},
{"FATAL", 50000},
{"OFF", INT_MAX},
};
try {
auto loglevel = loglevels.at(loglevel_string);
log_level = loglevel;
} catch (const std::out_of_range &e) {
// TODO: Allow numeric argument?
tprintf("Error, unsupported --loglevel %s\n", loglevel_string.c_str());
return false;
}
} else if (strcmp(argv[i], "--user-words") == 0 && i + 1 < argc) {
vars_vec->push_back("user_words_file");
vars_values->push_back(argv[i + 1]);
++i;
} else if (strcmp(argv[i], "--user-patterns") == 0 && i + 1 < argc) {
vars_vec->push_back("user_patterns_file");
vars_values->push_back(argv[i + 1]);
++i;
} else if (strcmp(argv[i], "--list-langs") == 0) {
noocr = true;
*list_langs = true;
} else if (strcmp(argv[i], "--psm") == 0 && i + 1 < argc) {
if (!checkArgValues(atoi(argv[i + 1]), "PSM", tesseract::PSM_COUNT)) {
return false;
}
*pagesegmode = static_cast<tesseract::PageSegMode>(atoi(argv[i + 1]));
++i;
} else if (strcmp(argv[i], "--oem") == 0 && i + 1 < argc) {
#ifndef DISABLED_LEGACY_ENGINE
int oem = atoi(argv[i + 1]);
if (!checkArgValues(oem, "OEM", tesseract::OEM_COUNT)) {
return false;
}
*enginemode = static_cast<tesseract::OcrEngineMode>(oem);
#endif
++i;
} else if (strcmp(argv[i], "--print-parameters") == 0) {
noocr = true;
*print_parameters = true;
#ifndef DISABLED_LEGACY_ENGINE
} else if (strcmp(argv[i], "--print-fonts-table") == 0) {
noocr = true;
*print_fonts_table = true;
#endif // ndef DISABLED_LEGACY_ENGINE
} else if (strcmp(argv[i], "-c") == 0 && i + 1 < argc) {
// handled properly after api init
++i;
} else if (*image == nullptr) {
*image = argv[i];
} else {
// Unexpected argument.
fprintf(stderr, "Error, unknown command line argument '%s'\n", argv[i]);
return false;
}
}
*arg_i = i;
if (*pagesegmode == tesseract::PSM_OSD_ONLY) {
// OSD = orientation and script detection.
if (*lang != nullptr && strcmp(*lang, "osd")) {
// If the user explicitly specifies a language (other than osd)
// or a script, only orientation can be detected.
fprintf(stderr, "Warning, detects only orientation with -l %s\n", *lang);
} else {
// That mode requires osd.traineddata to detect orientation and script.
*lang = "osd";
}
}
if (*outputbase == nullptr && noocr == false) {
PrintHelpMessage(argv[0]);
return false;
}
return true;
}
static void PreloadRenderers(tesseract::TessBaseAPI &api,
std::vector<std::unique_ptr<TessResultRenderer>> &renderers,
tesseract::PageSegMode pagesegmode, const char *outputbase) {
if (pagesegmode == tesseract::PSM_OSD_ONLY) {
#ifndef DISABLED_LEGACY_ENGINE
renderers.push_back(std::make_unique<tesseract::TessOsdRenderer>(outputbase));
#endif // ndef DISABLED_LEGACY_ENGINE
} else {
bool error = false;
bool b;
api.GetBoolVariable("tessedit_create_hocr", &b);
if (b) {
bool font_info;
api.GetBoolVariable("hocr_font_info", &font_info);
auto renderer = std::make_unique<tesseract::TessHOcrRenderer>(outputbase, font_info);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create hOCR output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_alto", &b);
if (b) {
auto renderer = std::make_unique<tesseract::TessAltoRenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create ALTO output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_page_xml", &b);
if (b) {
auto renderer = std::make_unique<tesseract::TessPAGERenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create PAGE output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_tsv", &b);
if (b) {
bool font_info;
api.GetBoolVariable("hocr_font_info", &font_info);
auto renderer = std::make_unique<tesseract::TessTsvRenderer>(outputbase, font_info);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create TSV output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_pdf", &b);
if (b) {
#ifdef WIN32
if (_setmode(_fileno(stdout), _O_BINARY) == -1)
tprintf("ERROR: cin to binary: %s", strerror(errno));
#endif // WIN32
bool textonly;
api.GetBoolVariable("textonly_pdf", &textonly);
auto renderer = std::make_unique<tesseract::TessPDFRenderer>(outputbase, api.GetDatapath(), textonly);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create PDF output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_write_unlv", &b);
if (b) {
api.SetVariable("unlv_tilde_crunching", "true");
auto renderer = std::make_unique<tesseract::TessUnlvRenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create UNLV output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_lstmbox", &b);
if (b) {
auto renderer = std::make_unique<tesseract::TessLSTMBoxRenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create LSTM BOX output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_boxfile", &b);
if (b) {
auto renderer = std::make_unique<tesseract::TessBoxTextRenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create BOX output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_wordstrbox", &b);
if (b) {
auto renderer = std::make_unique<tesseract::TessWordStrBoxRenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create WordStr BOX output file: %s\n", strerror(errno));
error = true;
}
}
api.GetBoolVariable("tessedit_create_txt", &b);
if (b || (!error && renderers.empty())) {
// Create text output if no other output was requested
// even if text output was not explicitly requested unless
// there was an error.
auto renderer = std::make_unique<tesseract::TessTextRenderer>(outputbase);
if (renderer->happy()) {
renderers.push_back(std::move(renderer));
} else {
tprintf("Error, could not create TXT output file: %s\n", strerror(errno));
}
}
}
// Null-out the renderers that are
// added to the root, and leave the root in the vector.
for (size_t r = 1; r < renderers.size(); ++r) {
renderers[0]->insert(renderers[r].get());
renderers[r].release(); // at the moment insert() is owning
}
}
/**********************************************************************
* main()
*
**********************************************************************/
int main(int argc, char **argv) {
#if defined(__USE_GNU) && defined(HAVE_FEENABLEEXCEPT)
// Raise SIGFPE.
# if defined(__clang__)
// clang creates code which causes some FP exceptions, so don't enable those.
feenableexcept(FE_DIVBYZERO);
# else
feenableexcept(FE_DIVBYZERO | FE_OVERFLOW | FE_INVALID);
# endif
#endif
const char *lang = nullptr;
const char *image = nullptr;
const char *outputbase = nullptr;
const char *datapath = nullptr;
bool list_langs = false;
bool print_parameters = false;
bool print_fonts_table = false;
l_int32 dpi = 0;
int arg_i = 1;
tesseract::PageSegMode pagesegmode = tesseract::PSM_AUTO;
#ifdef DISABLED_LEGACY_ENGINE
auto enginemode = tesseract::OEM_LSTM_ONLY;
#else
tesseract::OcrEngineMode enginemode = tesseract::OEM_DEFAULT;
#endif
std::vector<std::string> vars_vec;
std::vector<std::string> vars_values;
if (std::getenv("LEPT_MSG_SEVERITY")) {
// Get Leptonica message level from environment variable.
setMsgSeverity(L_SEVERITY_EXTERNAL);
} else {
// Disable debugging and informational messages from Leptonica.
setMsgSeverity(L_SEVERITY_ERROR);
}
#if defined(HAVE_TIFFIO_H) && defined(_WIN32)
/* Show libtiff errors and warnings on console (not in GUI). */
TIFFSetErrorHandler(Win32ErrorHandler);
TIFFSetWarningHandler(Win32WarningHandler);
#endif // HAVE_TIFFIO_H && _WIN32
if (!ParseArgs(argc, argv, &lang, &image, &outputbase, &datapath, &dpi, &list_langs,
&print_parameters, &print_fonts_table, &vars_vec, &vars_values, &arg_i,
&pagesegmode, &enginemode)) {
return EXIT_FAILURE;
}
bool in_recognition_mode = !list_langs && !print_parameters && !print_fonts_table;
if (lang == nullptr && in_recognition_mode) {
// Set default language model if none was given and a model file is needed.
lang = "eng";
}
if (image == nullptr && in_recognition_mode) {
return EXIT_SUCCESS;
}
// Call GlobalDawgCache here to create the global DawgCache object before
// the TessBaseAPI object. This fixes the order of destructor calls:
// first TessBaseAPI must be destructed, DawgCache must be the last object.
tesseract::Dict::GlobalDawgCache();
TessBaseAPI api;
api.SetOutputName(outputbase);
const int init_failed = api.Init(datapath, lang, enginemode, &(argv[arg_i]), argc - arg_i,
&vars_vec, &vars_values, false);
if (!SetVariablesFromCLArgs(api, argc, argv)) {
return EXIT_FAILURE;
}
// SIMD settings might be overridden by config variable.
tesseract::SIMDDetect::Update();
if (list_langs) {
PrintLangsList(api);
return EXIT_SUCCESS;
}
if (init_failed) {
fprintf(stderr, "Could not initialize tesseract.\n");
return EXIT_FAILURE;
}
if (print_parameters) {
FILE *fout = stdout;
fprintf(stdout, "Tesseract parameters:\n");
api.PrintVariables(fout);
api.End();
return EXIT_SUCCESS;
}
#ifndef DISABLED_LEGACY_ENGINE
if (print_fonts_table) {
FILE *fout = stdout;
fprintf(stdout, "Tesseract fonts table:\n");
api.PrintFontsTable(fout);
api.End();
return EXIT_SUCCESS;
}
#endif // ndef DISABLED_LEGACY_ENGINE
FixPageSegMode(api, pagesegmode);
if (dpi) {
auto dpi_string = std::to_string(dpi);
api.SetVariable("user_defined_dpi", dpi_string.c_str());
}
int ret_val = EXIT_SUCCESS;
if (pagesegmode == tesseract::PSM_AUTO_ONLY) {
Pix *pixs = pixRead(image);
if (!pixs) {
fprintf(stderr, "Leptonica can't process input file: %s\n", image);
return 2;
}
api.SetImage(pixs);
tesseract::Orientation orientation;
tesseract::WritingDirection direction;
tesseract::TextlineOrder order;
float deskew_angle;
const std::unique_ptr<const tesseract::PageIterator> it(api.AnalyseLayout());
if (it) {
// TODO: Implement output of page segmentation, see documentation
// ("Automatic page segmentation, but no OSD, or OCR").
it->Orientation(&orientation, &direction, &order, &deskew_angle);
tprintf(
"Orientation: %d\nWritingDirection: %d\nTextlineOrder: %d\n"
"Deskew angle: %.4f\n",
orientation, direction, order, deskew_angle);
} else {
ret_val = EXIT_FAILURE;
}
pixDestroy(&pixs);
return ret_val;
}
// Set in_training_mode to true when using one of these configs:
// ambigs.train, box.train, box.train.stderr, linebox, rebox, lstm.train.
// In this mode no other OCR result files are written.
bool b = false;
bool in_training_mode = (api.GetBoolVariable("tessedit_ambigs_training", &b) && b) ||
(api.GetBoolVariable("tessedit_resegment_from_boxes", &b) && b) ||
(api.GetBoolVariable("tessedit_make_boxes_from_boxes", &b) && b) ||
(api.GetBoolVariable("tessedit_train_line_recognizer", &b) && b);
if (api.GetPageSegMode() == tesseract::PSM_OSD_ONLY) {
if (!api.tesseract()->AnyTessLang()) {
fprintf(stderr, "Error, OSD requires a model for the legacy engine\n");
return EXIT_FAILURE;
}
}
#ifdef DISABLED_LEGACY_ENGINE
auto cur_psm = api.GetPageSegMode();
auto osd_warning = std::string("");
if (cur_psm == tesseract::PSM_OSD_ONLY) {
const char *disabled_osd_msg =
"\nERROR: The page segmentation mode 0 (OSD Only) is currently "
"disabled.\n\n";
fprintf(stderr, "%s", disabled_osd_msg);
return EXIT_FAILURE;
} else if (cur_psm == tesseract::PSM_AUTO_OSD) {
api.SetPageSegMode(tesseract::PSM_AUTO);
osd_warning +=
"\nWarning: The page segmentation mode 1 (Auto+OSD) is currently "
"disabled. "
"Using PSM 3 (Auto) instead.\n\n";
} else if (cur_psm == tesseract::PSM_SPARSE_TEXT_OSD) {
api.SetPageSegMode(tesseract::PSM_SPARSE_TEXT);
osd_warning +=
"\nWarning: The page segmentation mode 12 (Sparse text + OSD) is "
"currently disabled. "
"Using PSM 11 (Sparse text) instead.\n\n";
}
#endif // def DISABLED_LEGACY_ENGINE
std::vector<std::unique_ptr<TessResultRenderer>> renderers;
if (in_training_mode) {
renderers.push_back(nullptr);
} else if (outputbase != nullptr) {
PreloadRenderers(api, renderers, pagesegmode, outputbase);
}
if (!renderers.empty()) {
#ifdef DISABLED_LEGACY_ENGINE
if (!osd_warning.empty()) {
fprintf(stderr, "%s", osd_warning.c_str());
}
#endif
bool succeed = api.ProcessPages(image, nullptr, 0, renderers[0].get());
if (!succeed) {
fprintf(stderr, "Error during processing.\n");
ret_val = EXIT_FAILURE;
}
}
return ret_val;
}
|
2301_81045437/tesseract
|
src/tesseract.cpp
|
C++
|
apache-2.0
| 27,650
|
///////////////////////////////////////////////////////////////////////
// File: alignedblob.cpp
// Description: Subclass of BBGrid to find vertically aligned blobs.
// Author: Ray Smith
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "alignedblob.h"
#include <algorithm>
namespace tesseract {
INT_VAR(textord_debug_tabfind, 0, "Debug tab finding");
INT_VAR(textord_debug_bugs, 0, "Turn on output related to bugs in tab finding");
static INT_VAR(textord_testregion_left, -1,
"Left edge of debug reporting rectangle in Leptonica coords "
"(bottom=0/top=height), with horizontal lines x/y-flipped");
static INT_VAR(textord_testregion_top, INT32_MAX,
"Top edge of debug reporting rectangle in Leptonica coords "
"(bottom=0/top=height), with horizontal lines x/y-flipped");
static INT_VAR(textord_testregion_right, INT32_MAX,
"Right edge of debug rectangle in Leptonica coords "
"(bottom=0/top=height), with horizontal lines x/y-flipped");
static INT_VAR(textord_testregion_bottom, -1,
"Bottom edge of debug rectangle in Leptonica coords "
"(bottom=0/top=height), with horizontal lines x/y-flipped");
BOOL_VAR(textord_debug_printable, false, "Make debug windows printable");
// Fraction of resolution used as alignment tolerance for aligned tabs.
const double kAlignedFraction = 0.03125;
// Fraction of resolution used as alignment tolerance for ragged tabs.
const double kRaggedFraction = 2.5;
// Fraction of height used as a minimum gutter gap for aligned blobs.
const double kAlignedGapFraction = 0.75;
// Fraction of height used as a minimum gutter gap for ragged tabs.
const double kRaggedGapFraction = 1.0;
// Constant number of pixels used as alignment tolerance for line finding.
const int kVLineAlignment = 3;
// Constant number of pixels used as gutter gap tolerance for line finding.
const int kVLineGutter = 1;
// Constant number of pixels used as the search size for line finding.
const int kVLineSearchSize = 150;
// Min number of points to accept for a ragged tab stop.
const int kMinRaggedTabs = 5;
// Min number of points to accept for an aligned tab stop.
const int kMinAlignedTabs = 4;
// Constant number of pixels minimum height of a vertical line.
const int kVLineMinLength = 300;
// Minimum gradient for a vertical tab vector. Used to prune away junk
// tab vectors with what would be a ridiculously large skew angle.
// Value corresponds to tan(90 - max allowed skew angle)
const double kMinTabGradient = 4.0;
// Tolerance to skew on top of current estimate of skew. Divide x or y length
// by kMaxSkewFactor to get the y or x skew distance.
// If the angle is small, the angle in degrees is roughly 60/kMaxSkewFactor.
const int kMaxSkewFactor = 15;
// Constructor to set the parameters for finding aligned and ragged tabs.
// Vertical_x and vertical_y are the current estimates of the true vertical
// direction (up) in the image. Height is the height of the starter blob.
// v_gap_multiple is the multiple of height that will be used as a limit
// on vertical gap before giving up and calling the line ended.
// resolution is the original image resolution, and align0 indicates the
// type of tab stop to be found.
AlignedBlobParams::AlignedBlobParams(int vertical_x, int vertical_y, int height, int v_gap_multiple,
int min_gutter_width, int resolution, TabAlignment align0)
: right_tab(align0 == TA_RIGHT_RAGGED || align0 == TA_RIGHT_ALIGNED)
, ragged(align0 == TA_LEFT_RAGGED || align0 == TA_RIGHT_RAGGED)
, alignment(align0)
, confirmed_type(TT_CONFIRMED)
, min_length(0) {
// Set the tolerances according to the type of line sought.
// For tab search, these are based on the image resolution for most, or
// the height of the starting blob for the maximum vertical gap.
max_v_gap = height * v_gap_multiple;
if (ragged) {
// In the case of a ragged edge, we are much more generous with the
// inside alignment fraction, but also require a much bigger gutter.
gutter_fraction = kRaggedGapFraction;
if (alignment == TA_RIGHT_RAGGED) {
l_align_tolerance = static_cast<int>(resolution * kRaggedFraction + 0.5);
r_align_tolerance = static_cast<int>(resolution * kAlignedFraction + 0.5);
} else {
l_align_tolerance = static_cast<int>(resolution * kAlignedFraction + 0.5);
r_align_tolerance = static_cast<int>(resolution * kRaggedFraction + 0.5);
}
min_points = kMinRaggedTabs;
} else {
gutter_fraction = kAlignedGapFraction;
l_align_tolerance = static_cast<int>(resolution * kAlignedFraction + 0.5);
r_align_tolerance = static_cast<int>(resolution * kAlignedFraction + 0.5);
min_points = kMinAlignedTabs;
}
min_gutter = static_cast<int>(height * gutter_fraction + 0.5);
if (min_gutter < min_gutter_width) {
min_gutter = min_gutter_width;
}
// Fit the vertical vector into an ICOORD, which is 16 bit.
set_vertical(vertical_x, vertical_y);
}
// Constructor to set the parameters for finding vertical lines.
// Vertical_x and vertical_y are the current estimates of the true vertical
// direction (up) in the image. Width is the width of the starter blob.
AlignedBlobParams::AlignedBlobParams(int vertical_x, int vertical_y, int width)
: gutter_fraction(0.0)
, right_tab(false)
, ragged(false)
, alignment(TA_SEPARATOR)
, confirmed_type(TT_VLINE)
, max_v_gap(kVLineSearchSize)
, min_gutter(kVLineGutter)
, min_points(1)
, min_length(kVLineMinLength) {
// Compute threshold for left and right alignment.
l_align_tolerance = std::max(kVLineAlignment, width);
r_align_tolerance = std::max(kVLineAlignment, width);
// Fit the vertical vector into an ICOORD, which is 16 bit.
set_vertical(vertical_x, vertical_y);
}
// Fit the vertical vector into an ICOORD, which is 16 bit.
void AlignedBlobParams::set_vertical(int vertical_x, int vertical_y) {
int factor = 1;
if (vertical_y > INT16_MAX) {
factor = vertical_y / INT16_MAX + 1;
}
vertical.set_x(vertical_x / factor);
vertical.set_y(vertical_y / factor);
}
AlignedBlob::AlignedBlob(int gridsize, const ICOORD &bleft, const ICOORD &tright)
: BlobGrid(gridsize, bleft, tright) {}
// Return true if the given coordinates are within the test rectangle
// and the debug level is at least the given detail level.
bool AlignedBlob::WithinTestRegion(int detail_level, int x, int y) {
if (textord_debug_tabfind < detail_level) {
return false;
}
return x >= textord_testregion_left && x <= textord_testregion_right &&
y <= textord_testregion_top && y >= textord_testregion_bottom;
}
#ifndef GRAPHICS_DISABLED
// Display the tab codes of the BLOBNBOXes in this grid.
ScrollView *AlignedBlob::DisplayTabs(const char *window_name, ScrollView *tab_win) {
if (tab_win == nullptr) {
tab_win = MakeWindow(0, 50, window_name);
}
// For every tab in the grid, display it.
BlobGridSearch gsearch(this);
gsearch.StartFullSearch();
BLOBNBOX *bbox;
while ((bbox = gsearch.NextFullSearch()) != nullptr) {
const TBOX &box = bbox->bounding_box();
int left_x = box.left();
int right_x = box.right();
int top_y = box.top();
int bottom_y = box.bottom();
TabType tabtype = bbox->left_tab_type();
if (tabtype != TT_NONE) {
if (tabtype == TT_MAYBE_ALIGNED) {
tab_win->Pen(ScrollView::BLUE);
} else if (tabtype == TT_MAYBE_RAGGED) {
tab_win->Pen(ScrollView::YELLOW);
} else if (tabtype == TT_CONFIRMED) {
tab_win->Pen(ScrollView::GREEN);
} else {
tab_win->Pen(ScrollView::GREY);
}
tab_win->Line(left_x, top_y, left_x, bottom_y);
}
tabtype = bbox->right_tab_type();
if (tabtype != TT_NONE) {
if (tabtype == TT_MAYBE_ALIGNED) {
tab_win->Pen(ScrollView::MAGENTA);
} else if (tabtype == TT_MAYBE_RAGGED) {
tab_win->Pen(ScrollView::ORANGE);
} else if (tabtype == TT_CONFIRMED) {
tab_win->Pen(ScrollView::RED);
} else {
tab_win->Pen(ScrollView::GREY);
}
tab_win->Line(right_x, top_y, right_x, bottom_y);
}
}
tab_win->Update();
return tab_win;
}
#endif // !GRAPHICS_DISABLED
// Helper returns true if the total number of line_crossings of all the blobs
// in the list is at least 2.
static bool AtLeast2LineCrossings(BLOBNBOX_CLIST *blobs) {
BLOBNBOX_C_IT it(blobs);
int total_crossings = 0;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
total_crossings += it.data()->line_crossings();
}
return total_crossings >= 2;
}
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
AlignedBlob::~AlignedBlob() = default;
// Finds a vector corresponding to a set of vertically aligned blob edges
// running through the given box. The type of vector returned and the
// search parameters are determined by the AlignedBlobParams.
// vertical_x and y are updated with an estimate of the real
// vertical direction. (skew finding.)
// Returns nullptr if no decent vector can be found.
TabVector *AlignedBlob::FindVerticalAlignment(AlignedBlobParams align_params, BLOBNBOX *bbox,
int *vertical_x, int *vertical_y) {
int ext_start_y, ext_end_y;
BLOBNBOX_CLIST good_points;
// Search up and then down from the starting bbox.
TBOX box = bbox->bounding_box();
bool debug = WithinTestRegion(2, box.left(), box.bottom());
int pt_count = AlignTabs(align_params, false, bbox, &good_points, &ext_end_y);
pt_count += AlignTabs(align_params, true, bbox, &good_points, &ext_start_y);
BLOBNBOX_C_IT it(&good_points);
it.move_to_last();
box = it.data()->bounding_box();
int end_y = box.top();
int end_x = align_params.right_tab ? box.right() : box.left();
it.move_to_first();
box = it.data()->bounding_box();
int start_x = align_params.right_tab ? box.right() : box.left();
int start_y = box.bottom();
// Acceptable tab vectors must have a minimum number of points,
// have a minimum acceptable length, and have a minimum gradient.
// The gradient corresponds to the skew angle.
// Ragged tabs don't need to satisfy the gradient condition, as they
// will always end up parallel to the vertical direction.
bool at_least_2_crossings = AtLeast2LineCrossings(&good_points);
if ((pt_count >= align_params.min_points && end_y - start_y >= align_params.min_length &&
(align_params.ragged || end_y - start_y >= abs(end_x - start_x) * kMinTabGradient)) ||
at_least_2_crossings) {
int confirmed_points = 0;
// Count existing confirmed points to see if vector is acceptable.
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
bbox = it.data();
if (align_params.right_tab) {
if (bbox->right_tab_type() == align_params.confirmed_type) {
++confirmed_points;
}
} else {
if (bbox->left_tab_type() == align_params.confirmed_type) {
++confirmed_points;
}
}
}
// Ragged vectors are not allowed to use too many already used points.
if (!align_params.ragged || confirmed_points + confirmed_points < pt_count) {
const TBOX &box = bbox->bounding_box();
if (debug) {
tprintf("Confirming tab vector of %d pts starting at %d,%d\n", pt_count, box.left(),
box.bottom());
}
// Flag all the aligned neighbours as confirmed .
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
bbox = it.data();
if (align_params.right_tab) {
bbox->set_right_tab_type(align_params.confirmed_type);
} else {
bbox->set_left_tab_type(align_params.confirmed_type);
}
if (debug) {
bbox->bounding_box().print();
}
}
// Now make the vector and return it.
TabVector *result =
TabVector::FitVector(align_params.alignment, align_params.vertical, ext_start_y,
ext_end_y, &good_points, vertical_x, vertical_y);
result->set_intersects_other_lines(at_least_2_crossings);
if (debug) {
tprintf("Box was %d, %d\n", box.left(), box.bottom());
result->Print("After fitting");
}
return result;
} else if (debug) {
tprintf("Ragged tab used too many used points: %d out of %d\n", confirmed_points, pt_count);
}
} else if (debug) {
tprintf(
"Tab vector failed basic tests: pt count %d vs min %d, "
"length %d vs min %d, min grad %g\n",
pt_count, align_params.min_points, end_y - start_y, align_params.min_length,
abs(end_x - start_x) * kMinTabGradient);
}
return nullptr;
}
// Find a set of blobs that are aligned in the given vertical
// direction with the given blob. Returns a list of aligned
// blobs and the number in the list.
// For other parameters see FindAlignedBlob below.
int AlignedBlob::AlignTabs(const AlignedBlobParams ¶ms, bool top_to_bottom, BLOBNBOX *bbox,
BLOBNBOX_CLIST *good_points, int *end_y) {
int ptcount = 0;
BLOBNBOX_C_IT it(good_points);
TBOX box = bbox->bounding_box();
bool debug = WithinTestRegion(2, box.left(), box.bottom());
if (debug) {
tprintf("Starting alignment run at blob:");
box.print();
}
int x_start = params.right_tab ? box.right() : box.left();
while (bbox != nullptr) {
// Add the blob to the list if the appropriate side is a tab candidate,
// or if we are working on a ragged tab.
TabType type = params.right_tab ? bbox->right_tab_type() : bbox->left_tab_type();
if (((type != TT_NONE && type != TT_MAYBE_RAGGED) || params.ragged) &&
(it.empty() || it.data() != bbox)) {
if (top_to_bottom) {
it.add_before_then_move(bbox);
} else {
it.add_after_then_move(bbox);
}
++ptcount;
}
// Find the next blob that is aligned with the current one.
// FindAlignedBlob guarantees that forward progress will be made in the
// top_to_bottom direction, and therefore eventually it will return nullptr,
// making this while (bbox != nullptr) loop safe.
bbox = FindAlignedBlob(params, top_to_bottom, bbox, x_start, end_y);
if (bbox != nullptr) {
box = bbox->bounding_box();
if (!params.ragged) {
x_start = params.right_tab ? box.right() : box.left();
}
}
}
if (debug) {
tprintf("Alignment run ended with %d pts at blob:", ptcount);
box.print();
}
return ptcount;
}
// Search vertically for a blob that is aligned with the input bbox.
// The search parameters are determined by AlignedBlobParams.
// top_to_bottom tells whether to search down or up.
// The return value is nullptr if nothing was found in the search box
// or if a blob was found in the gutter. On a nullptr return, end_y
// is set to the edge of the search box or the leading edge of the
// gutter blob if one was found.
BLOBNBOX *AlignedBlob::FindAlignedBlob(const AlignedBlobParams &p, bool top_to_bottom,
BLOBNBOX *bbox, int x_start, int *end_y) {
TBOX box = bbox->bounding_box();
// If there are separator lines, get the column edges.
int left_column_edge = bbox->left_rule();
int right_column_edge = bbox->right_rule();
// start_y is used to guarantee that forward progress is made and the
// search does not go into an infinite loop. New blobs must extend the
// line beyond start_y.
int start_y = top_to_bottom ? box.bottom() : box.top();
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("Column edges for blob at (%d,%d)->(%d,%d) are [%d, %d]\n", box.left(), box.top(),
box.right(), box.bottom(), left_column_edge, right_column_edge);
}
// Compute skew tolerance.
int skew_tolerance = p.max_v_gap / kMaxSkewFactor;
// Calculate xmin and xmax of the search box so that it contains
// all possibly relevant boxes up to p.max_v_gap above or below according
// to top_to_bottom.
// Start with a notion of vertical with the current estimate.
int x2 = (p.max_v_gap * p.vertical.x() + p.vertical.y() / 2) / p.vertical.y();
if (top_to_bottom) {
x2 = x_start - x2;
*end_y = start_y - p.max_v_gap;
} else {
x2 = x_start + x2;
*end_y = start_y + p.max_v_gap;
}
// Expand the box by an additional skew tolerance
int xmin = std::min(x_start, x2) - skew_tolerance;
int xmax = std::max(x_start, x2) + skew_tolerance;
// Now add direction-specific tolerances.
if (p.right_tab) {
xmax += p.min_gutter;
xmin -= p.l_align_tolerance;
} else {
xmax += p.r_align_tolerance;
xmin -= p.min_gutter;
}
// Setup a vertical search for an aligned blob.
BlobGridSearch vsearch(this);
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("Starting %s %s search at %d-%d,%d, search_size=%d, gutter=%d\n",
p.ragged ? "Ragged" : "Aligned", p.right_tab ? "Right" : "Left", xmin, xmax, start_y,
p.max_v_gap, p.min_gutter);
}
vsearch.StartVerticalSearch(xmin, xmax, start_y);
// result stores the best real return value.
BLOBNBOX *result = nullptr;
// The backup_result is not a tab candidate and can be used if no
// real tab candidate result is found.
BLOBNBOX *backup_result = nullptr;
// neighbour is the blob that is currently being investigated.
BLOBNBOX *neighbour = nullptr;
while ((neighbour = vsearch.NextVerticalSearch(top_to_bottom)) != nullptr) {
if (neighbour == bbox) {
continue;
}
TBOX nbox = neighbour->bounding_box();
int n_y = (nbox.top() + nbox.bottom()) / 2;
if ((!top_to_bottom && n_y > start_y + p.max_v_gap) ||
(top_to_bottom && n_y < start_y - p.max_v_gap)) {
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("Neighbour too far at (%d,%d)->(%d,%d)\n", nbox.left(), nbox.bottom(), nbox.right(),
nbox.top());
}
break; // Gone far enough.
}
// It is CRITICAL to ensure that forward progress is made, (strictly
// in/decreasing n_y) or the caller could loop infinitely, while
// waiting for a sequence of blobs in a line to end.
// NextVerticalSearch alone does not guarantee this, as there may be
// more than one blob in a grid cell. See comment in AlignTabs.
if ((n_y < start_y) != top_to_bottom || nbox.y_overlap(box)) {
continue; // Only look in the required direction.
}
if (result != nullptr && result->bounding_box().y_gap(nbox) > gridsize()) {
return result; // This result is clear.
}
if (backup_result != nullptr && p.ragged && result == nullptr &&
backup_result->bounding_box().y_gap(nbox) > gridsize()) {
return backup_result; // This result is clear.
}
// If the neighbouring blob is the wrong side of a separator line, then it
// "doesn't exist" as far as we are concerned.
int x_at_n_y = x_start + (n_y - start_y) * p.vertical.x() / p.vertical.y();
if (x_at_n_y < neighbour->left_crossing_rule() || x_at_n_y > neighbour->right_crossing_rule()) {
continue; // Separator line in the way.
}
int n_left = nbox.left();
int n_right = nbox.right();
int n_x = p.right_tab ? n_right : n_left;
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("neighbour at (%d,%d)->(%d,%d), n_x=%d, n_y=%d, xatn=%d\n", nbox.left(),
nbox.bottom(), nbox.right(), nbox.top(), n_x, n_y, x_at_n_y);
}
if (p.right_tab && n_left < x_at_n_y + p.min_gutter &&
n_right > x_at_n_y + p.r_align_tolerance &&
(p.ragged || n_left < x_at_n_y + p.gutter_fraction * nbox.height())) {
// In the gutter so end of line.
if (bbox->right_tab_type() >= TT_MAYBE_ALIGNED) {
bbox->set_right_tab_type(TT_DELETED);
}
*end_y = top_to_bottom ? nbox.top() : nbox.bottom();
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("gutter\n");
}
return nullptr;
}
if (!p.right_tab && n_left < x_at_n_y - p.l_align_tolerance &&
n_right > x_at_n_y - p.min_gutter &&
(p.ragged || n_right > x_at_n_y - p.gutter_fraction * nbox.height())) {
// In the gutter so end of line.
if (bbox->left_tab_type() >= TT_MAYBE_ALIGNED) {
bbox->set_left_tab_type(TT_DELETED);
}
*end_y = top_to_bottom ? nbox.top() : nbox.bottom();
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("gutter\n");
}
return nullptr;
}
if ((p.right_tab && neighbour->leader_on_right()) ||
(!p.right_tab && neighbour->leader_on_left())) {
continue; // Neighbours of leaders are not allowed to be used.
}
if (n_x <= x_at_n_y + p.r_align_tolerance && n_x >= x_at_n_y - p.l_align_tolerance) {
// Aligned so keep it. If it is a marked tab save it as result,
// otherwise keep it as backup_result to return in case of later failure.
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("aligned, seeking%d, l=%d, r=%d\n", p.right_tab, neighbour->left_tab_type(),
neighbour->right_tab_type());
}
TabType n_type = p.right_tab ? neighbour->right_tab_type() : neighbour->left_tab_type();
if (n_type != TT_NONE && (p.ragged || n_type != TT_MAYBE_RAGGED)) {
if (result == nullptr) {
result = neighbour;
} else {
// Keep the closest neighbour by Euclidean distance.
// This prevents it from picking a tab blob in another column.
const TBOX &old_box = result->bounding_box();
int x_diff = p.right_tab ? old_box.right() : old_box.left();
x_diff -= x_at_n_y;
int y_diff = (old_box.top() + old_box.bottom()) / 2 - start_y;
int old_dist = x_diff * x_diff + y_diff * y_diff;
x_diff = n_x - x_at_n_y;
y_diff = n_y - start_y;
int new_dist = x_diff * x_diff + y_diff * y_diff;
if (new_dist < old_dist) {
result = neighbour;
}
}
} else if (backup_result == nullptr) {
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("Backup\n");
}
backup_result = neighbour;
} else {
TBOX backup_box = backup_result->bounding_box();
if ((p.right_tab && backup_box.right() < nbox.right()) ||
(!p.right_tab && backup_box.left() > nbox.left())) {
if (WithinTestRegion(2, x_start, start_y)) {
tprintf("Better backup\n");
}
backup_result = neighbour;
}
}
}
}
return result != nullptr ? result : backup_result;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/textord/alignedblob.cpp
|
C++
|
apache-2.0
| 23,380
|
///////////////////////////////////////////////////////////////////////
// File: alignedblob.h
// Description: A class to find vertically aligned blobs in a BBGrid,
// and a struct to hold control parameters.
// Author: Ray Smith
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TEXTORD_ALIGNEDBLOB_H_
#define TESSERACT_TEXTORD_ALIGNEDBLOB_H_
#include "bbgrid.h"
#include "blobbox.h"
#include "tabvector.h"
namespace tesseract {
extern INT_VAR_H(textord_debug_bugs);
extern INT_VAR_H(textord_debug_tabfind);
extern BOOL_VAR_H(textord_debug_printable);
// Simple structure to hold the search parameters for AlignedBlob.
// The members are mostly derived from constants, which are
// conditioned on the alignment parameter.
// For finding vertical lines, a different set of constants are
// used, conditioned on the different constructor.
struct AlignedBlobParams {
// Constructor to set the parameters for finding aligned and ragged tabs.
// Vertical_x and vertical_y are the current estimates of the true vertical
// direction (up) in the image. Height is the height of the starter blob.
// v_gap_multiple is the multiple of height that will be used as a limit
// on vertical gap before giving up and calling the line ended.
// resolution is the original image resolution, and align0 indicates the
// type of tab stop to be found.
AlignedBlobParams(int vertical_x, int vertical_y, int height, int v_gap_multiple,
int min_gutter_width, int resolution, TabAlignment alignment0);
// Constructor to set the parameters for finding vertical lines.
// Vertical_x and vertical_y are the current estimates of the true vertical
// direction (up) in the image. Width is the width of the starter blob.
AlignedBlobParams(int vertical_x, int vertical_y, int width);
// Fit the vertical vector into an ICOORD, which is 16 bit.
void set_vertical(int vertical_x, int vertical_y);
double gutter_fraction; // Multiple of height used for min_gutter.
bool right_tab; // We are looking at right edges.
bool ragged; // We are looking for a ragged (vs aligned) edge.
TabAlignment alignment; // The type we are trying to produce.
TabType confirmed_type; // Type to flag blobs if accepted.
int max_v_gap; // Max vertical gap to be tolerated.
int min_gutter; // Minimum gutter between columns.
// Tolerances allowed on horizontal alignment of aligned edges.
int l_align_tolerance; // Left edges.
int r_align_tolerance; // Right edges.
// Conditions for accepting a line.
int min_points; // Minimum number of points to be OK.
int min_length; // Min length of completed line.
ICOORD vertical; // Current estimate of logical vertical.
};
// The AlignedBlob class contains code to find vertically aligned blobs.
// This is factored out into a separate class, so it can be used by both
// vertical line finding (LineFind) and tabstop finding (TabFind).
class TESS_API AlignedBlob : public BlobGrid {
public:
AlignedBlob(int gridsize, const ICOORD &bleft, const ICOORD &tright);
~AlignedBlob() override;
// Return true if the given coordinates are within the test rectangle
// and the debug level is at least the given detail level.
static bool WithinTestRegion(int detail_level, int x, int y);
// Display the tab codes of the BLOBNBOXes in this grid.
ScrollView *DisplayTabs(const char *window_name, ScrollView *tab_win);
// Finds a vector corresponding to a set of vertically aligned blob edges
// running through the given box. The type of vector returned and the
// search parameters are determined by the AlignedBlobParams.
// vertical_x and y are updated with an estimate of the real
// vertical direction. (skew finding.)
// Returns nullptr if no decent vector can be found.
TabVector *FindVerticalAlignment(AlignedBlobParams align_params, BLOBNBOX *bbox, int *vertical_x,
int *vertical_y);
private:
// Find a set of blobs that are aligned in the given vertical
// direction with the given blob. Returns a list of aligned
// blobs and the number in the list.
// For other parameters see FindAlignedBlob below.
int AlignTabs(const AlignedBlobParams ¶ms, bool top_to_bottom, BLOBNBOX *bbox,
BLOBNBOX_CLIST *good_points, int *end_y);
// Search vertically for a blob that is aligned with the input bbox.
// The search parameters are determined by AlignedBlobParams.
// top_to_bottom tells whether to search down or up.
// The return value is nullptr if nothing was found in the search box
// or if a blob was found in the gutter. On a nullptr return, end_y
// is set to the edge of the search box or the leading edge of the
// gutter blob if one was found.
BLOBNBOX *FindAlignedBlob(const AlignedBlobParams &p, bool top_to_bottom, BLOBNBOX *bbox,
int x_start, int *end_y);
};
} // namespace tesseract.
#endif // TESSERACT_TEXTORD_ALIGNEDBLOB_H_
|
2301_81045437/tesseract
|
src/textord/alignedblob.h
|
C++
|
apache-2.0
| 5,632
|
///////////////////////////////////////////////////////////////////////
// File: baselinedetect.cpp
// Description: Initial Baseline Determination.
// Copyright 2012 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#define _USE_MATH_DEFINES // for M_PI
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "baselinedetect.h"
#include <allheaders.h>
#include <algorithm>
#include <cfloat> // for FLT_MAX
#include <cmath> // for M_PI
#include "blobbox.h"
#include "detlinefit.h"
#include "drawtord.h"
#include "helpers.h"
#include "linlsq.h"
#include "makerow.h"
#include "textord.h"
#include "tprintf.h"
#include "underlin.h"
// Number of displacement modes kept in displacement_modes_;
const int kMaxDisplacementsModes = 3;
// Number of points to skip when retrying initial fit.
const int kNumSkipPoints = 3;
// Max angle deviation (in radians) allowed to keep the independent baseline.
const double kMaxSkewDeviation = 1.0 / 64;
// Fraction of line spacing estimate for quantization of blob displacements.
const double kOffsetQuantizationFactor = 3.0 / 64;
// Fraction of line spacing estimate for computing blob fit error.
const double kFitHalfrangeFactor = 6.0 / 64;
// Max fraction of line spacing allowed before a baseline counts as badly
// fitting.
const double kMaxBaselineError = 3.0 / 64;
// Multiple of linespacing that sets max_blob_size in TO_BLOCK.
// Copied from textord_excess_blobsize.
const double kMaxBlobSizeMultiple = 1.3;
// Min fraction of linespacing gaps that should be close to the model before
// we will force the linespacing model on all the lines.
const double kMinFittingLinespacings = 0.25;
// A y-coordinate within a textline that is to be debugged.
//#define kDebugYCoord 1525
namespace tesseract {
BaselineRow::BaselineRow(double line_spacing, TO_ROW *to_row)
: blobs_(to_row->blob_list()),
baseline_pt1_(0.0f, 0.0f),
baseline_pt2_(0.0f, 0.0f),
baseline_error_(0.0),
good_baseline_(false) {
ComputeBoundingBox();
// Compute a scale factor for rounding to ints.
disp_quant_factor_ = kOffsetQuantizationFactor * line_spacing;
fit_halfrange_ = kFitHalfrangeFactor * line_spacing;
max_baseline_error_ = kMaxBaselineError * line_spacing;
}
// Sets the TO_ROW with the output straight line.
void BaselineRow::SetupOldLineParameters(TO_ROW *row) const {
// TODO(rays) get rid of this when m and c are no longer used.
double gradient = tan(BaselineAngle());
// para_c is the actual intercept of the baseline on the y-axis.
float para_c = StraightYAtX(0.0);
row->set_line(gradient, para_c, baseline_error_);
row->set_parallel_line(gradient, para_c, baseline_error_);
}
// Outputs diagnostic information.
void BaselineRow::Print() const {
tprintf("Baseline (%g,%g)->(%g,%g), angle=%g, intercept=%g\n",
baseline_pt1_.x(), baseline_pt1_.y(), baseline_pt2_.x(),
baseline_pt2_.y(), BaselineAngle(), StraightYAtX(0.0));
tprintf("Quant factor=%g, error=%g, good=%d, box:", disp_quant_factor_,
baseline_error_, good_baseline_);
bounding_box_.print();
}
// Returns the skew angle (in radians) of the current baseline in [-pi,pi].
double BaselineRow::BaselineAngle() const {
FCOORD baseline_dir(baseline_pt2_ - baseline_pt1_);
double angle = baseline_dir.angle();
// Baseline directions are only unique in a range of pi so constrain to
// [-pi/2, pi/2].
return fmod(angle + M_PI * 1.5, M_PI) - M_PI * 0.5;
}
// Computes and returns the linespacing at the middle of the overlap
// between this and other.
double BaselineRow::SpaceBetween(const BaselineRow &other) const {
// Find the x-centre of overlap of the lines.
float x = (std::max(bounding_box_.left(), other.bounding_box_.left()) +
std::min(bounding_box_.right(), other.bounding_box_.right())) /
2.0f;
// Find the vertical centre between them.
float y = (StraightYAtX(x) + other.StraightYAtX(x)) / 2.0f;
// Find the perpendicular distance of (x,y) from each line.
FCOORD pt(x, y);
return PerpDistanceFromBaseline(pt) + other.PerpDistanceFromBaseline(pt);
}
// Computes and returns the displacement of the center of the line
// perpendicular to the given direction.
double BaselineRow::PerpDisp(const FCOORD &direction) const {
float middle_x = (bounding_box_.left() + bounding_box_.right()) / 2.0f;
FCOORD middle_pos(middle_x, StraightYAtX(middle_x));
return direction * middle_pos / direction.length();
}
// Computes the y coordinate at the given x using the straight baseline
// defined by baseline_pt1_ and baseline_pt2__.
double BaselineRow::StraightYAtX(double x) const {
double denominator = baseline_pt2_.x() - baseline_pt1_.x();
if (denominator == 0.0) {
return (baseline_pt1_.y() + baseline_pt2_.y()) / 2.0;
}
return baseline_pt1_.y() + (x - baseline_pt1_.x()) *
(baseline_pt2_.y() - baseline_pt1_.y()) /
denominator;
}
// Fits a straight baseline to the points. Returns true if it had enough
// points to be reasonably sure of the fitted baseline.
// If use_box_bottoms is false, baselines positions are formed by
// considering the outlines of the blobs.
bool BaselineRow::FitBaseline(bool use_box_bottoms) {
// Deterministic fitting is used wherever possible.
fitter_.Clear();
// Linear least squares is a backup if the DetLineFit produces a bad line.
LLSQ llsq;
BLOBNBOX_IT blob_it(blobs_);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX *blob = blob_it.data();
if (!use_box_bottoms) {
blob->EstimateBaselinePosition();
}
const TBOX &box = blob->bounding_box();
int x_middle = (box.left() + box.right()) / 2;
#ifdef kDebugYCoord
if (box.bottom() < kDebugYCoord && box.top() > kDebugYCoord) {
tprintf("Box bottom = %d, baseline pos=%d for box at:", box.bottom(),
blob->baseline_position());
box.print();
}
#endif
fitter_.Add(ICOORD(x_middle, blob->baseline_position()), box.width() / 2);
llsq.add(x_middle, blob->baseline_position());
}
// Fit the line.
ICOORD pt1, pt2;
baseline_error_ = fitter_.Fit(&pt1, &pt2);
baseline_pt1_ = pt1;
baseline_pt2_ = pt2;
if (baseline_error_ > max_baseline_error_ &&
fitter_.SufficientPointsForIndependentFit()) {
// The fit was bad but there were plenty of points, so try skipping
// the first and last few, and use the new line if it dramatically improves
// the error of fit.
double error = fitter_.Fit(kNumSkipPoints, kNumSkipPoints, &pt1, &pt2);
if (error < baseline_error_ / 2.0) {
baseline_error_ = error;
baseline_pt1_ = pt1;
baseline_pt2_ = pt2;
}
}
int debug = 0;
#ifdef kDebugYCoord
Print();
debug = bounding_box_.bottom() < kDebugYCoord &&
bounding_box_.top() > kDebugYCoord
? 3
: 2;
#endif
// Now we obtained a direction from that fit, see if we can improve the
// fit using the same direction and some other start point.
FCOORD direction(pt2 - pt1);
double target_offset = direction * pt1;
good_baseline_ = false;
FitConstrainedIfBetter(debug, direction, 0.0, target_offset);
// Wild lines can be produced because DetLineFit allows vertical lines, but
// vertical text has been rotated so angles over pi/4 should be disallowed.
// Near vertical lines can still be produced by vertically aligned components
// on very short lines.
double angle = BaselineAngle();
if (fabs(angle) > M_PI * 0.25) {
// Use the llsq fit as a backup.
baseline_pt1_ = llsq.mean_point();
baseline_pt2_ = baseline_pt1_ + FCOORD(1.0f, llsq.m());
// TODO(rays) get rid of this when m and c are no longer used.
double m = llsq.m();
double c = llsq.c(m);
baseline_error_ = llsq.rms(m, c);
good_baseline_ = false;
}
return good_baseline_;
}
// Modifies an existing result of FitBaseline to be parallel to the given
// direction vector if that produces a better result.
void BaselineRow::AdjustBaselineToParallel(int debug, const FCOORD &direction) {
SetupBlobDisplacements(direction);
if (displacement_modes_.empty()) {
return;
}
#ifdef kDebugYCoord
if (bounding_box_.bottom() < kDebugYCoord &&
bounding_box_.top() > kDebugYCoord && debug < 3)
debug = 3;
#endif
FitConstrainedIfBetter(debug, direction, 0.0, displacement_modes_[0]);
}
// Modifies the baseline to snap to the textline grid if the existing
// result is not good enough.
double BaselineRow::AdjustBaselineToGrid(int debug, const FCOORD &direction,
double line_spacing,
double line_offset) {
if (blobs_->empty()) {
if (debug > 1) {
tprintf("Row empty at:");
bounding_box_.print();
}
return line_offset;
}
// Find the displacement_modes_ entry nearest to the grid.
double best_error = 0.0;
int best_index = -1;
for (unsigned i = 0; i < displacement_modes_.size(); ++i) {
double blob_y = displacement_modes_[i];
double error =
BaselineBlock::SpacingModelError(blob_y, line_spacing, line_offset);
if (debug > 1) {
tprintf("Mode at %g has error %g from model \n", blob_y, error);
}
if (best_index < 0 || error < best_error) {
best_error = error;
best_index = i;
}
}
// We will move the baseline only if the chosen mode is close enough to the
// model.
double model_margin = max_baseline_error_ - best_error;
if (best_index >= 0 && model_margin > 0.0) {
// But if the current baseline is already close to the mode there is no
// point, and only the potential to damage accuracy by changing its angle.
double perp_disp = PerpDisp(direction);
double shift = displacement_modes_[best_index] - perp_disp;
if (fabs(shift) > max_baseline_error_) {
if (debug > 1) {
tprintf("Attempting linespacing model fit with mode %g to row at:",
displacement_modes_[best_index]);
bounding_box_.print();
}
FitConstrainedIfBetter(debug, direction, model_margin,
displacement_modes_[best_index]);
} else if (debug > 1) {
tprintf("Linespacing model only moves current line by %g for row at:",
shift);
bounding_box_.print();
}
} else if (debug > 1) {
tprintf("Linespacing model not close enough to any mode for row at:");
bounding_box_.print();
}
return fmod(PerpDisp(direction), line_spacing);
}
// Sets up displacement_modes_ with the top few modes of the perpendicular
// distance of each blob from the given direction vector, after rounding.
void BaselineRow::SetupBlobDisplacements(const FCOORD &direction) {
// Set of perpendicular displacements of the blob bottoms from the required
// baseline direction.
std::vector<double> perp_blob_dists;
displacement_modes_.clear();
// Gather the skew-corrected position of every blob.
double min_dist = FLT_MAX;
double max_dist = -FLT_MAX;
BLOBNBOX_IT blob_it(blobs_);
#ifdef kDebugYCoord
bool debug = false;
#endif
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
BLOBNBOX *blob = blob_it.data();
const TBOX &box = blob->bounding_box();
#ifdef kDebugYCoord
if (box.bottom() < kDebugYCoord && box.top() > kDebugYCoord)
debug = true;
#endif
FCOORD blob_pos((box.left() + box.right()) / 2.0f,
blob->baseline_position());
double offset = direction * blob_pos;
perp_blob_dists.push_back(offset);
#ifdef kDebugYCoord
if (debug) {
tprintf("Displacement %g for blob at:", offset);
box.print();
}
#endif
UpdateRange(offset, &min_dist, &max_dist);
}
// Set up a histogram using disp_quant_factor_ as the bucket size.
STATS dist_stats(IntCastRounded(min_dist / disp_quant_factor_),
IntCastRounded(max_dist / disp_quant_factor_));
for (double perp_blob_dist : perp_blob_dists) {
dist_stats.add(IntCastRounded(perp_blob_dist / disp_quant_factor_), 1);
}
std::vector<KDPairInc<float, int>> scaled_modes;
dist_stats.top_n_modes(kMaxDisplacementsModes, scaled_modes);
#ifdef kDebugYCoord
if (debug) {
for (int i = 0; i < scaled_modes.size(); ++i) {
tprintf("Top mode = %g * %d\n", scaled_modes[i].key * disp_quant_factor_,
scaled_modes[i].data());
}
}
#endif
for (auto &scaled_mode : scaled_modes) {
displacement_modes_.push_back(disp_quant_factor_ * scaled_mode.key());
}
}
// Fits a line in the given direction to blobs that are close to the given
// target_offset perpendicular displacement from the direction. The fit
// error is allowed to be cheat_allowance worse than the existing fit, and
// will still be used.
// If cheat_allowance > 0, the new fit will be good and replace the current
// fit if it has better fit (with cheat) OR its error is below
// max_baseline_error_ and the old fit is marked bad.
// Otherwise the new fit will only replace the old if it is really better,
// or the old fit is marked bad and the new fit has sufficient points, as
// well as being within the max_baseline_error_.
void BaselineRow::FitConstrainedIfBetter(int debug, const FCOORD &direction,
double cheat_allowance,
double target_offset) {
double halfrange = fit_halfrange_ * direction.length();
double min_dist = target_offset - halfrange;
double max_dist = target_offset + halfrange;
ICOORD line_pt;
double new_error = fitter_.ConstrainedFit(direction, min_dist, max_dist,
debug > 2, &line_pt);
// Allow cheat_allowance off the new error
new_error -= cheat_allowance;
double old_angle = BaselineAngle();
double new_angle = direction.angle();
if (debug > 1) {
tprintf("Constrained error = %g, original = %g", new_error,
baseline_error_);
tprintf(" angles = %g, %g, delta=%g vs threshold %g\n", old_angle,
new_angle, new_angle - old_angle, kMaxSkewDeviation);
}
bool new_good_baseline =
new_error <= max_baseline_error_ &&
(cheat_allowance > 0.0 || fitter_.SufficientPointsForIndependentFit());
// The new will replace the old if any are true:
// 1. the new error is better
// 2. the old is NOT good, but the new is
// 3. there is a wild angular difference between them (assuming that the new
// is a better guess at the angle.)
if (new_error <= baseline_error_ || (!good_baseline_ && new_good_baseline) ||
fabs(new_angle - old_angle) > kMaxSkewDeviation) {
baseline_error_ = new_error;
baseline_pt1_ = line_pt;
baseline_pt2_ = baseline_pt1_ + direction;
good_baseline_ = new_good_baseline;
if (debug > 1) {
tprintf("Replacing with constrained baseline, good = %d\n",
good_baseline_);
}
} else if (debug > 1) {
tprintf("Keeping old baseline\n");
}
}
// Returns the perpendicular distance of the point from the straight
// baseline.
float BaselineRow::PerpDistanceFromBaseline(const FCOORD &pt) const {
FCOORD baseline_vector(baseline_pt2_ - baseline_pt1_);
FCOORD offset_vector(pt - baseline_pt1_);
float distance = baseline_vector * offset_vector;
float sqlength = baseline_vector.sqlength();
if (sqlength == 0.0f) {
tprintf("unexpected baseline vector (0,0)\n");
return 0.0f;
}
return std::sqrt(distance * distance / sqlength);
}
// Computes the bounding box of the row.
void BaselineRow::ComputeBoundingBox() {
BLOBNBOX_IT it(blobs_);
TBOX box;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
box += it.data()->bounding_box();
}
bounding_box_ = box;
}
BaselineBlock::BaselineBlock(int debug_level, bool non_text, TO_BLOCK *block)
: block_(block),
debug_level_(debug_level),
non_text_block_(non_text),
good_skew_angle_(false),
skew_angle_(0.0),
line_spacing_(block->line_spacing),
line_offset_(0.0),
model_error_(0.0) {
TO_ROW_IT row_it(block_->get_rows());
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
// Sort the blobs on the rows.
row_it.data()->blob_list()->sort(blob_x_order);
rows_.push_back(new BaselineRow(block->line_spacing, row_it.data()));
}
}
// Computes and returns the absolute error of the given perp_disp from the
// given linespacing model.
double BaselineBlock::SpacingModelError(double perp_disp, double line_spacing,
double line_offset) {
// Round to the nearest multiple of line_spacing + line offset.
int multiple = IntCastRounded((perp_disp - line_offset) / line_spacing);
double model_y = line_spacing * multiple + line_offset;
return fabs(perp_disp - model_y);
}
// Fits straight line baselines and computes the skew angle from the
// median angle. Returns true if a good angle is found.
// If use_box_bottoms is false, baseline positions are formed by
// considering the outlines of the blobs.
bool BaselineBlock::FitBaselinesAndFindSkew(bool use_box_bottoms) {
if (non_text_block_) {
return false;
}
std::vector<double> angles;
for (auto row : rows_) {
if (row->FitBaseline(use_box_bottoms)) {
double angle = row->BaselineAngle();
angles.push_back(angle);
}
if (debug_level_ > 1) {
row->Print();
}
}
if (!angles.empty()) {
skew_angle_ = MedianOfCircularValues(M_PI, angles);
good_skew_angle_ = true;
} else {
skew_angle_ = 0.0f;
good_skew_angle_ = false;
}
if (debug_level_ > 0) {
tprintf("Initial block skew angle = %g, good = %d\n", skew_angle_,
good_skew_angle_);
}
return good_skew_angle_;
}
// Refits the baseline to a constrained angle, using the stored block
// skew if good enough, otherwise the supplied default skew.
void BaselineBlock::ParallelizeBaselines(double default_block_skew) {
if (non_text_block_) {
return;
}
if (!good_skew_angle_) {
skew_angle_ = default_block_skew;
}
if (debug_level_ > 0) {
tprintf("Adjusting block to skew angle %g\n", skew_angle_);
}
FCOORD direction(cos(skew_angle_), sin(skew_angle_));
for (auto row : rows_) {
row->AdjustBaselineToParallel(debug_level_, direction);
if (debug_level_ > 1) {
row->Print();
}
}
if (rows_.size() < 3 || !ComputeLineSpacing()) {
return;
}
// Enforce the line spacing model on all lines that don't yet have a good
// baseline.
// Start by finding the row that is best fitted to the model.
unsigned best_row = 0;
double best_error = SpacingModelError(rows_[0]->PerpDisp(direction),
line_spacing_, line_offset_);
for (unsigned r = 1; r < rows_.size(); ++r) {
double error = SpacingModelError(rows_[r]->PerpDisp(direction),
line_spacing_, line_offset_);
if (error < best_error) {
best_error = error;
best_row = r;
}
}
// Starting at the best fitting row, work outwards, syncing the offset.
double offset = line_offset_;
for (auto r = best_row + 1; r < rows_.size(); ++r) {
offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction,
line_spacing_, offset);
}
offset = line_offset_;
for (int r = best_row - 1; r >= 0; --r) {
offset = rows_[r]->AdjustBaselineToGrid(debug_level_, direction,
line_spacing_, offset);
}
}
// Sets the parameters in TO_BLOCK that are needed by subsequent processes.
void BaselineBlock::SetupBlockParameters() const {
if (line_spacing_ > 0.0) {
// Where was block_line_spacing set before?
float min_spacing =
std::min(block_->line_spacing, static_cast<float>(line_spacing_));
if (min_spacing < block_->line_size) {
block_->line_size = min_spacing;
}
block_->line_spacing = line_spacing_;
block_->baseline_offset = line_offset_;
block_->max_blob_size = line_spacing_ * kMaxBlobSizeMultiple;
}
// Setup the parameters on all the rows.
TO_ROW_IT row_it(block_->get_rows());
for (unsigned r = 0; r < rows_.size(); ++r, row_it.forward()) {
BaselineRow *row = rows_[r];
TO_ROW *to_row = row_it.data();
row->SetupOldLineParameters(to_row);
}
}
// Processing that is required before fitting baseline splines, but requires
// linear baselines in order to be successful:
// Removes noise if required
// Separates out underlines
// Pre-associates blob fragments.
// TODO(rays/joeliu) This entire section of code is inherited from the past
// and could be improved/eliminated.
// page_tr is used to size a debug window.
void BaselineBlock::PrepareForSplineFitting(ICOORD page_tr, bool remove_noise) {
if (non_text_block_) {
return;
}
if (remove_noise) {
vigorous_noise_removal(block_);
}
FCOORD rotation(1.0f, 0.0f);
double gradient = tan(skew_angle_);
separate_underlines(block_, gradient, rotation, true);
pre_associate_blobs(page_tr, block_, rotation, true);
}
// Fits splines to the textlines, or creates fake QSPLINES from the straight
// baselines that are already on the TO_ROWs.
// As a side-effect, computes the xheights of the rows and the block.
// Although x-height estimation is conceptually separate, it is part of
// detecting perspective distortion and therefore baseline fitting.
void BaselineBlock::FitBaselineSplines(bool enable_splines,
bool show_final_rows, Textord *textord) {
double gradient = tan(skew_angle_);
FCOORD rotation(1.0f, 0.0f);
if (enable_splines) {
textord->make_spline_rows(block_, gradient, show_final_rows);
} else {
// Make a fake spline from the existing line.
TBOX block_box = block_->block->pdblk.bounding_box();
TO_ROW_IT row_it = block_->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
TO_ROW *row = row_it.data();
int32_t xstarts[2] = {block_box.left(), block_box.right()};
double coeffs[3] = {0.0, row->line_m(), row->line_c()};
row->baseline = QSPLINE(1, xstarts, coeffs);
textord->compute_row_xheight(row, block_->block->classify_rotation(),
row->line_m(), block_->line_size);
}
}
textord->compute_block_xheight(block_, gradient);
block_->block->set_xheight(block_->xheight);
if (textord_restore_underlines) { // fix underlines
restore_underlined_blobs(block_);
}
}
#ifndef GRAPHICS_DISABLED
// Draws the (straight) baselines and final blobs colored according to
// what was discarded as noise and what is associated with each row.
void BaselineBlock::DrawFinalRows(const ICOORD &page_tr) {
if (non_text_block_) {
return;
}
double gradient = tan(skew_angle_);
FCOORD rotation(1.0f, 0.0f);
int left_edge = block_->block->pdblk.bounding_box().left();
ScrollView *win = create_to_win(page_tr);
ScrollView::Color colour = ScrollView::RED;
TO_ROW_IT row_it = block_->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
plot_parallel_row(row_it.data(), gradient, left_edge, colour, rotation);
colour = static_cast<ScrollView::Color>(colour + 1);
if (colour > ScrollView::MAGENTA) {
colour = ScrollView::RED;
}
}
plot_blob_list(win, &block_->blobs, ScrollView::MAGENTA, ScrollView::WHITE);
// Show discarded blobs.
plot_blob_list(win, &block_->underlines, ScrollView::YELLOW,
ScrollView::CORAL);
if (block_->blobs.length() > 0) {
tprintf("%d blobs discarded as noise\n", block_->blobs.length());
}
draw_meanlines(block_, gradient, left_edge, ScrollView::WHITE, rotation);
}
#endif // !GRAPHICS_DISABLED
void BaselineBlock::DrawPixSpline(Image pix_in) {
if (non_text_block_) {
return;
}
TO_ROW_IT row_it = block_->get_rows();
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
row_it.data()->baseline.plot(pix_in);
}
}
// Top-level line-spacing calculation. Computes an estimate of the line-
// spacing, using the current baselines in the TO_ROWS of the block, and
// then refines it by fitting a regression line to the baseline positions
// as a function of their integer index.
// Returns true if it seems that the model is a reasonable fit to the
// observations.
bool BaselineBlock::ComputeLineSpacing() {
FCOORD direction(cos(skew_angle_), sin(skew_angle_));
std::vector<double> row_positions;
ComputeBaselinePositions(direction, &row_positions);
if (row_positions.size() < 2) {
return false;
}
EstimateLineSpacing();
RefineLineSpacing(row_positions);
// Verify that the model is reasonable.
double max_baseline_error = kMaxBaselineError * line_spacing_;
int non_trivial_gaps = 0;
int fitting_gaps = 0;
for (unsigned i = 1; i < row_positions.size(); ++i) {
double row_gap = fabs(row_positions[i - 1] - row_positions[i]);
if (row_gap > max_baseline_error) {
++non_trivial_gaps;
if (fabs(row_gap - line_spacing_) <= max_baseline_error) {
++fitting_gaps;
}
}
}
if (debug_level_ > 0) {
tprintf("Spacing %g, in %zu rows, %d gaps fitted out of %d non-trivial\n",
line_spacing_, row_positions.size(), fitting_gaps,
non_trivial_gaps);
}
return fitting_gaps > non_trivial_gaps * kMinFittingLinespacings;
}
// Computes the deskewed vertical position of each baseline in the block and
// stores them in the given vector.
// This is calculated as the perpendicular distance of the middle of each
// baseline (in case it has a different skew angle) from the line passing
// through the origin parallel to the block baseline angle.
// NOTE that "distance" above is a signed quantity so we can tell which side
// of the block baseline a line sits, hence the function and argument name
// positions not distances.
void BaselineBlock::ComputeBaselinePositions(const FCOORD &direction,
std::vector<double> *positions) {
positions->clear();
for (auto row : rows_) {
const TBOX &row_box = row->bounding_box();
float x_middle = (row_box.left() + row_box.right()) / 2.0f;
FCOORD row_pos(x_middle, static_cast<float>(row->StraightYAtX(x_middle)));
float offset = direction * row_pos;
positions->push_back(offset);
}
}
// Computes an estimate of the line spacing of the block from the median
// of the spacings between adjacent overlapping textlines.
void BaselineBlock::EstimateLineSpacing() {
std::vector<float> spacings;
for (unsigned r = 0; r < rows_.size(); ++r) {
BaselineRow *row = rows_[r];
// Exclude silly lines.
if (fabs(row->BaselineAngle()) > M_PI * 0.25) {
continue;
}
// Find the first row after row that overlaps it significantly.
const TBOX &row_box = row->bounding_box();
unsigned r2;
for (r2 = r + 1; r2 < rows_.size() &&
!row_box.major_x_overlap(rows_[r2]->bounding_box());
++r2) {
;
}
if (r2 < rows_.size()) {
BaselineRow *row2 = rows_[r2];
// Exclude silly lines.
if (fabs(row2->BaselineAngle()) > M_PI * 0.25) {
continue;
}
float spacing = row->SpaceBetween(*row2);
spacings.push_back(spacing);
}
}
// If we have at least one value, use it, otherwise leave the previous
// value unchanged.
if (!spacings.empty()) {
std::nth_element(spacings.begin(), spacings.begin() + spacings.size() / 2,
spacings.end());
line_spacing_ = spacings[spacings.size() / 2];
if (debug_level_ > 1) {
tprintf("Estimate of linespacing = %g\n", line_spacing_);
}
}
}
// Refines the line spacing of the block by fitting a regression
// line to the deskewed y-position of each baseline as a function of its
// estimated line index, allowing for a small error in the initial linespacing
// and choosing the best available model.
void BaselineBlock::RefineLineSpacing(const std::vector<double> &positions) {
double spacings[3], offsets[3], errors[3];
int index_range;
errors[0] = FitLineSpacingModel(positions, line_spacing_, &spacings[0],
&offsets[0], &index_range);
if (index_range > 1) {
double spacing_plus = line_spacing_ / (1.0 + 1.0 / index_range);
// Try the hypotheses that there might be index_range +/- 1 line spaces.
errors[1] = FitLineSpacingModel(positions, spacing_plus, &spacings[1],
&offsets[1], nullptr);
double spacing_minus = line_spacing_ / (1.0 - 1.0 / index_range);
errors[2] = FitLineSpacingModel(positions, spacing_minus, &spacings[2],
&offsets[2], nullptr);
for (int i = 1; i <= 2; ++i) {
if (errors[i] < errors[0]) {
spacings[0] = spacings[i];
offsets[0] = offsets[i];
errors[0] = errors[i];
}
}
}
if (spacings[0] > 0.0) {
line_spacing_ = spacings[0];
line_offset_ = offsets[0];
model_error_ = errors[0];
if (debug_level_ > 0) {
tprintf("Final linespacing model = %g + offset %g, error %g\n",
line_spacing_, line_offset_, model_error_);
}
}
}
// Given an initial estimate of line spacing (m_in) and the positions of each
// baseline, computes the line spacing of the block more accurately in m_out,
// and the corresponding intercept in c_out, and the number of spacings seen
// in index_delta. Returns the error of fit to the line spacing model.
// Uses a simple linear regression, but optimized the offset using the median.
double BaselineBlock::FitLineSpacingModel(const std::vector<double> &positions,
double m_in, double *m_out,
double *c_out, int *index_delta) {
if (m_in == 0.0f || positions.size() < 2) {
*m_out = m_in;
*c_out = 0.0;
if (index_delta != nullptr) {
*index_delta = 0;
}
return 0.0;
}
std::vector<double> offsets;
// Get the offset (remainder) linespacing for each line and choose the median.
offsets.reserve(positions.size());
for (double position : positions) {
offsets.push_back(fmod(position, m_in));
}
// Get the median offset.
double median_offset = MedianOfCircularValues(m_in, offsets);
// Now fit a line to quantized line number and offset.
LLSQ llsq;
int min_index = INT32_MAX;
int max_index = -INT32_MAX;
for (double y_pos : positions) {
int row_index = IntCastRounded((y_pos - median_offset) / m_in);
UpdateRange(row_index, &min_index, &max_index);
llsq.add(row_index, y_pos);
}
// Get the refined line spacing.
*m_out = llsq.m();
// Use the median offset rather than the mean.
offsets.clear();
if (*m_out != 0.0) {
for (double position : positions) {
offsets.push_back(fmod(position, *m_out));
}
// Get the median offset.
if (debug_level_ > 2) {
for (unsigned i = 0; i < offsets.size(); ++i) {
tprintf("%u: %g\n", i, offsets[i]);
}
}
*c_out = MedianOfCircularValues(*m_out, offsets);
} else {
*c_out = 0.0;
}
if (debug_level_ > 1) {
tprintf("Median offset = %g, compared to mean of %g.\n", *c_out,
llsq.c(*m_out));
}
// Index_delta is the number of hypothesized line gaps present.
if (index_delta != nullptr) {
*index_delta = max_index - min_index;
}
// Use the regression model's intercept to compute the error, as it may be
// a full line-spacing in disagreement with the median.
double rms_error = llsq.rms(*m_out, llsq.c(*m_out));
if (debug_level_ > 1) {
tprintf("Linespacing of y=%g x + %g improved to %g x + %g, rms=%g\n", m_in,
median_offset, *m_out, *c_out, rms_error);
}
return rms_error;
}
BaselineDetect::BaselineDetect(int debug_level, const FCOORD &page_skew,
TO_BLOCK_LIST *blocks)
: page_skew_(page_skew), debug_level_(debug_level) {
TO_BLOCK_IT it(blocks);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
TO_BLOCK *to_block = it.data();
BLOCK *block = to_block->block;
POLY_BLOCK *pb = block->pdblk.poly_block();
// A note about non-text blocks.
// On output, non-text blocks are supposed to contain a single empty word
// in each incoming text line. These mark out the polygonal bounds of the
// block. Ideally no baselines should be required, but currently
// make_words crashes if a baseline and xheight are not provided, so we
// include non-text blocks here, but flag them for special treatment.
bool non_text = pb != nullptr && !pb->IsText();
blocks_.push_back(new BaselineBlock(debug_level_, non_text, to_block));
}
}
// Finds the initial baselines for each TO_ROW in each TO_BLOCK, gathers
// block-wise and page-wise data to smooth small blocks/rows, and applies
// smoothing based on block/page-level skew and block-level linespacing.
void BaselineDetect::ComputeStraightBaselines(bool use_box_bottoms) {
std::vector<double> block_skew_angles;
for (auto bl_block : blocks_) {
if (debug_level_ > 0) {
tprintf("Fitting initial baselines...\n");
}
if (bl_block->FitBaselinesAndFindSkew(use_box_bottoms)) {
block_skew_angles.push_back(bl_block->skew_angle());
}
}
// Compute a page-wide default skew for blocks with too little information.
double default_block_skew = page_skew_.angle();
if (!block_skew_angles.empty()) {
default_block_skew = MedianOfCircularValues(M_PI, block_skew_angles);
}
if (debug_level_ > 0) {
tprintf("Page skew angle = %g\n", default_block_skew);
}
// Set bad lines in each block to the default block skew and then force fit
// a linespacing model where it makes sense to do so.
for (auto bl_block : blocks_) {
bl_block->ParallelizeBaselines(default_block_skew);
bl_block->SetupBlockParameters(); // This replaced compute_row_stats.
}
}
// Computes the baseline splines for each TO_ROW in each TO_BLOCK and
// other associated side-effects, including pre-associating blobs, computing
// x-heights and displaying debug information.
// NOTE that ComputeStraightBaselines must have been called first as this
// sets up data in the TO_ROWs upon which this function depends.
void BaselineDetect::ComputeBaselineSplinesAndXheights(const ICOORD &page_tr,
bool enable_splines,
bool remove_noise,
bool show_final_rows,
Textord *textord) {
for (auto bl_block : blocks_) {
if (enable_splines) {
bl_block->PrepareForSplineFitting(page_tr, remove_noise);
}
bl_block->FitBaselineSplines(enable_splines, show_final_rows, textord);
#ifndef GRAPHICS_DISABLED
if (show_final_rows) {
bl_block->DrawFinalRows(page_tr);
}
#endif
}
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/textord/baselinedetect.cpp
|
C++
|
apache-2.0
| 35,666
|
///////////////////////////////////////////////////////////////////////
// File: baselinedetect.h
// Description: Initial Baseline Determination.
// Copyright 2012 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TEXTORD_BASELINEDETECT_H_
#define TESSERACT_TEXTORD_BASELINEDETECT_H_
#include "detlinefit.h"
#include "points.h"
#include "rect.h"
struct Pix;
namespace tesseract {
class Textord;
class BLOBNBOX_LIST;
class TO_BLOCK;
class TO_BLOCK_LIST;
class TO_ROW;
// Class to compute and hold baseline data for a TO_ROW.
class BaselineRow {
public:
BaselineRow(double line_size, TO_ROW *to_row);
const TBOX &bounding_box() const {
return bounding_box_;
}
// Sets the TO_ROW with the output straight line.
void SetupOldLineParameters(TO_ROW *row) const;
// Outputs diagnostic information.
void Print() const;
// Returns the skew angle (in radians) of the current baseline in [-pi,pi].
double BaselineAngle() const;
// Computes and returns the linespacing at the middle of the overlap
// between this and other.
double SpaceBetween(const BaselineRow &other) const;
// Computes and returns the displacement of the center of the line
// perpendicular to the given direction.
double PerpDisp(const FCOORD &direction) const;
// Computes the y coordinate at the given x using the straight baseline
// defined by baseline1_ and baseline2_.
double StraightYAtX(double x) const;
// Fits a straight baseline to the points. Returns true if it had enough
// points to be reasonably sure of the fitted baseline.
// If use_box_bottoms is false, baselines positions are formed by
// considering the outlines of the blobs.
bool FitBaseline(bool use_box_bottoms);
// Modifies an existing result of FitBaseline to be parallel to the given
// vector if that produces a better result.
void AdjustBaselineToParallel(int debug, const FCOORD &direction);
// Modifies the baseline to snap to the textline grid if the existing
// result is not good enough.
double AdjustBaselineToGrid(int debug, const FCOORD &direction, double line_spacing,
double line_offset);
private:
// Sets up displacement_modes_ with the top few modes of the perpendicular
// distance of each blob from the given direction vector, after rounding.
void SetupBlobDisplacements(const FCOORD &direction);
// Fits a line in the given direction to blobs that are close to the given
// target_offset perpendicular displacement from the direction. The fit
// error is allowed to be cheat_allowance worse than the existing fit, and
// will still be used.
// If cheat_allowance > 0, the new fit will be good and replace the current
// fit if it has better fit (with cheat) OR its error is below
// max_baseline_error_ and the old fit is marked bad.
// Otherwise the new fit will only replace the old if it is really better,
// or the old fit is marked bad and the new fit has sufficient points, as
// well as being within the max_baseline_error_.
void FitConstrainedIfBetter(int debug, const FCOORD &direction, double cheat_allowance,
double target_offset);
// Returns the perpendicular distance of the point from the straight
// baseline.
float PerpDistanceFromBaseline(const FCOORD &pt) const;
// Computes the bounding box of the row.
void ComputeBoundingBox();
// The blobs of the row to which this BaselineRow adds extra information
// during baseline fitting. Note that blobs_ could easily come from either
// a TO_ROW or a ColPartition.
BLOBNBOX_LIST *blobs_;
// Bounding box of all the blobs.
TBOX bounding_box_;
// Fitter used to fit lines to the blobs.
DetLineFit fitter_;
// 2 points on the straight baseline.
FCOORD baseline_pt1_;
FCOORD baseline_pt2_;
// Set of modes of displacements. They indicate preferable baseline positions.
std::vector<double> displacement_modes_;
// Quantization factor used for displacement_modes_.
double disp_quant_factor_;
// Half the acceptance range of blob displacements for computing the
// error during a constrained fit.
double fit_halfrange_;
// Max baseline error before a line is regarded as fitting badly.
double max_baseline_error_;
// The error of fit of the baseline.
double baseline_error_;
// True if this row seems to have a good baseline.
bool good_baseline_;
};
// Class to compute and hold baseline data for a TO_BLOCK.
class BaselineBlock {
public:
BaselineBlock(int debug_level, bool non_text, TO_BLOCK *block);
~BaselineBlock() {
for (auto row : rows_) {
delete row;
}
}
TO_BLOCK *block() const {
return block_;
}
double skew_angle() const {
return skew_angle_;
}
// Computes and returns the absolute error of the given perp_disp from the
// given linespacing model.
static double SpacingModelError(double perp_disp, double line_spacing, double line_offset);
// Fits straight line baselines and computes the skew angle from the
// median angle. Returns true if a good angle is found.
// If use_box_bottoms is false, baseline positions are formed by
// considering the outlines of the blobs.
bool FitBaselinesAndFindSkew(bool use_box_bottoms);
// Refits the baseline to a constrained angle, using the stored block
// skew if good enough, otherwise the supplied default skew.
void ParallelizeBaselines(double default_block_skew);
// Sets the parameters in TO_BLOCK that are needed by subsequent processes.
void SetupBlockParameters() const;
// Processing that is required before fitting baseline splines, but requires
// linear baselines in order to be successful:
// Removes noise if required
// Separates out underlines
// Pre-associates blob fragments.
// TODO(rays/joeliu) This entire section of code is inherited from the past
// and could be improved/eliminated.
// page_tr is used to size a debug window.
void PrepareForSplineFitting(ICOORD page_tr, bool remove_noise);
// Fits splines to the textlines, or creates fake QSPLINES from the straight
// baselines that are already on the TO_ROWs.
// As a side-effect, computes the xheights of the rows and the block.
// Although x-height estimation is conceptually separate, it is part of
// detecting perspective distortion and therefore baseline fitting.
void FitBaselineSplines(bool enable_splines, bool show_final_rows, Textord *textord);
// Draws the (straight) baselines and final blobs colored according to
// what was discarded as noise and what is associated with each row.
void DrawFinalRows(const ICOORD &page_tr);
// Render the generated spline baselines for this block on pix_in.
void DrawPixSpline(Image pix_in);
private:
// Top-level line-spacing calculation. Computes an estimate of the line-
// spacing, using the current baselines in the TO_ROWS of the block, and
// then refines it by fitting a regression line to the baseline positions
// as a function of their integer index.
// Returns true if it seems that the model is a reasonable fit to the
// observations.
bool ComputeLineSpacing();
// Computes the deskewed vertical position of each baseline in the block and
// stores them in the given vector.
void ComputeBaselinePositions(const FCOORD &direction, std::vector<double> *positions);
// Computes an estimate of the line spacing of the block from the median
// of the spacings between adjacent overlapping textlines.
void EstimateLineSpacing();
// Refines the line spacing of the block by fitting a regression
// line to the deskewed y-position of each baseline as a function of its
// estimated line index, allowing for a small error in the initial linespacing
// and choosing the best available model.
void RefineLineSpacing(const std::vector<double> &positions);
// Given an initial estimate of line spacing (m_in) and the positions of each
// baseline, computes the line spacing of the block more accurately in m_out,
// and the corresponding intercept in c_out, and the number of spacings seen
// in index_delta. Returns the error of fit to the line spacing model.
double FitLineSpacingModel(const std::vector<double> &positions, double m_in, double *m_out,
double *c_out, int *index_delta);
// The block to which this class adds extra information used during baseline
// calculation.
TO_BLOCK *block_;
// The rows in the block that we will be working with.
std::vector<BaselineRow *> rows_;
// Amount of debugging output to provide.
int debug_level_;
// True if the block is non-text (graphic).
bool non_text_block_;
// True if the block has at least one good enough baseline to compute the
// skew angle and therefore skew_angle_ is valid.
bool good_skew_angle_;
// Angle of skew in radians using the conventional anticlockwise from x-axis.
double skew_angle_;
// Current best estimate line spacing in pixels perpendicular to skew_angle_.
double line_spacing_;
// Offset for baseline positions, in pixels. Each baseline is at
// line_spacing_ * n + line_offset_ for integer n, which represents
// [textline] line number in a line numbering system that has line 0 on or
// at least near the x-axis. Not equal to the actual line number of a line
// within a block as most blocks are not near the x-axis.
double line_offset_;
// The error of the line spacing model.
double model_error_;
};
class BaselineDetect {
public:
BaselineDetect(int debug_level, const FCOORD &page_skew, TO_BLOCK_LIST *blocks);
~BaselineDetect() {
for (auto block : blocks_) {
delete block;
}
}
// Finds the initial baselines for each TO_ROW in each TO_BLOCK, gathers
// block-wise and page-wise data to smooth small blocks/rows, and applies
// smoothing based on block/page-level skew and block-level linespacing.
void ComputeStraightBaselines(bool use_box_bottoms);
// Computes the baseline splines for each TO_ROW in each TO_BLOCK and
// other associated side-effects, including pre-associating blobs, computing
// x-heights and displaying debug information.
// NOTE that ComputeStraightBaselines must have been called first as this
// sets up data in the TO_ROWs upon which this function depends.
void ComputeBaselineSplinesAndXheights(const ICOORD &page_tr, bool enable_splines,
bool remove_noise, bool show_final_rows, Textord *textord);
private:
// Average (median) skew of the blocks on the page among those that have
// a good angle of their own.
FCOORD page_skew_;
// Amount of debug output to produce.
int debug_level_;
// The blocks that we are working with.
std::vector<BaselineBlock *> blocks_;
};
} // namespace tesseract
#endif // TESSERACT_TEXTORD_BASELINEDETECT_H_
|
2301_81045437/tesseract
|
src/textord/baselinedetect.h
|
C++
|
apache-2.0
| 11,489
|