code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include <algorithm>
#include <allheaders.h>
#include "boxread.h"
#include "fontinfo.h"
//#include "helpers.h"
#include "indexmapbidi.h"
#include "intfeaturedist.h"
#include "intfeaturemap.h"
#include "intfeaturespace.h"
#include "shapetable.h"
#include "trainingsample.h"
#include "trainingsampleset.h"
#include "unicity_table.h"
namespace tesseract {
const int kTestChar = -1; // 37;
// Max number of distances to compute the squared way
const int kSquareLimit = 25;
// Prime numbers for subsampling distances.
const int kPrime1 = 17;
const int kPrime2 = 13;
TrainingSampleSet::FontClassInfo::FontClassInfo()
: num_raw_samples(0), canonical_sample(-1), canonical_dist(0.0f) {}
// Writes to the given file. Returns false in case of error.
bool TrainingSampleSet::FontClassInfo::Serialize(FILE *fp) const {
if (fwrite(&num_raw_samples, sizeof(num_raw_samples), 1, fp) != 1) {
return false;
}
if (fwrite(&canonical_sample, sizeof(canonical_sample), 1, fp) != 1) {
return false;
}
if (fwrite(&canonical_dist, sizeof(canonical_dist), 1, fp) != 1) {
return false;
}
if (!::tesseract::Serialize(fp, samples)) {
return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TrainingSampleSet::FontClassInfo::DeSerialize(bool swap, FILE *fp) {
if (fread(&num_raw_samples, sizeof(num_raw_samples), 1, fp) != 1) {
return false;
}
if (fread(&canonical_sample, sizeof(canonical_sample), 1, fp) != 1) {
return false;
}
if (fread(&canonical_dist, sizeof(canonical_dist), 1, fp) != 1) {
return false;
}
if (!::tesseract::DeSerialize(swap, fp, samples)) {
return false;
}
if (swap) {
ReverseN(&num_raw_samples, sizeof(num_raw_samples));
ReverseN(&canonical_sample, sizeof(canonical_sample));
ReverseN(&canonical_dist, sizeof(canonical_dist));
}
return true;
}
TrainingSampleSet::TrainingSampleSet(const FontInfoTable &font_table)
: num_raw_samples_(0)
, unicharset_size_(0)
, font_class_array_(nullptr)
, fontinfo_table_(font_table) {}
TrainingSampleSet::~TrainingSampleSet() {
for (auto sample : samples_) {
delete sample;
}
delete font_class_array_;
}
// Writes to the given file. Returns false in case of error.
bool TrainingSampleSet::Serialize(FILE *fp) const {
if (!tesseract::Serialize(fp, samples_)) {
return false;
}
if (!unicharset_.save_to_file(fp)) {
return false;
}
if (!font_id_map_.Serialize(fp)) {
return false;
}
int8_t not_null = font_class_array_ != nullptr;
if (fwrite(¬_null, sizeof(not_null), 1, fp) != 1) {
return false;
}
if (not_null) {
if (!font_class_array_->SerializeClasses(fp)) {
return false;
}
}
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TrainingSampleSet::DeSerialize(bool swap, FILE *fp) {
if (!tesseract::DeSerialize(swap, fp, samples_)) {
return false;
}
num_raw_samples_ = samples_.size();
if (!unicharset_.load_from_file(fp)) {
return false;
}
if (!font_id_map_.DeSerialize(swap, fp)) {
return false;
}
delete font_class_array_;
font_class_array_ = nullptr;
int8_t not_null;
if (fread(¬_null, sizeof(not_null), 1, fp) != 1) {
return false;
}
if (not_null) {
FontClassInfo empty;
font_class_array_ = new GENERIC_2D_ARRAY<FontClassInfo>(1, 1, empty);
if (!font_class_array_->DeSerializeClasses(swap, fp)) {
return false;
}
}
unicharset_size_ = unicharset_.size();
return true;
}
// Load an initial unicharset, or set one up if the file cannot be read.
void TrainingSampleSet::LoadUnicharset(const char *filename) {
if (!unicharset_.load_from_file(filename)) {
tprintf(
"Failed to load unicharset from file %s\n"
"Building unicharset from scratch...\n",
filename);
unicharset_.clear();
// Add special characters as they were removed by the clear.
UNICHARSET empty;
unicharset_.AppendOtherUnicharset(empty);
}
unicharset_size_ = unicharset_.size();
}
// Adds a character sample to this sample set.
// If the unichar is not already in the local unicharset, it is added.
// Returns the unichar_id of the added sample, from the local unicharset.
int TrainingSampleSet::AddSample(const char *unichar, TrainingSample *sample) {
if (!unicharset_.contains_unichar(unichar)) {
unicharset_.unichar_insert(unichar);
if (unicharset_.size() > MAX_NUM_CLASSES) {
tprintf(
"Error: Size of unicharset in TrainingSampleSet::AddSample is "
"greater than MAX_NUM_CLASSES\n");
return -1;
}
}
UNICHAR_ID char_id = unicharset_.unichar_to_id(unichar);
AddSample(char_id, sample);
return char_id;
}
// Adds a character sample to this sample set with the given unichar_id,
// which must correspond to the local unicharset (in this).
void TrainingSampleSet::AddSample(int unichar_id, TrainingSample *sample) {
sample->set_class_id(unichar_id);
samples_.push_back(sample);
num_raw_samples_ = samples_.size();
unicharset_size_ = unicharset_.size();
}
// Returns the number of samples for the given font,class pair.
// If randomize is true, returns the number of samples accessible
// with randomizing on. (Increases the number of samples if small.)
// OrganizeByFontAndClass must have been already called.
int TrainingSampleSet::NumClassSamples(int font_id, int class_id, bool randomize) const {
ASSERT_HOST(font_class_array_ != nullptr);
if (font_id < 0 || class_id < 0 || font_id >= font_id_map_.SparseSize() ||
class_id >= unicharset_size_) {
// There are no samples because the font or class doesn't exist.
return 0;
}
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) {
return 0; // The font has no samples.
}
if (randomize) {
return (*font_class_array_)(font_index, class_id).samples.size();
} else {
return (*font_class_array_)(font_index, class_id).num_raw_samples;
}
}
// Gets a sample by its index.
const TrainingSample *TrainingSampleSet::GetSample(int index) const {
return samples_[index];
}
// Gets a sample by its font, class, index.
// OrganizeByFontAndClass must have been already called.
const TrainingSample *TrainingSampleSet::GetSample(int font_id, int class_id, int index) const {
ASSERT_HOST(font_class_array_ != nullptr);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) {
return nullptr;
}
int sample_index = (*font_class_array_)(font_index, class_id).samples[index];
return samples_[sample_index];
}
// Get a sample by its font, class, index. Does not randomize.
// OrganizeByFontAndClass must have been already called.
TrainingSample *TrainingSampleSet::MutableSample(int font_id, int class_id, int index) {
ASSERT_HOST(font_class_array_ != nullptr);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) {
return nullptr;
}
int sample_index = (*font_class_array_)(font_index, class_id).samples[index];
return samples_[sample_index];
}
// Returns a string debug representation of the given sample:
// font, unichar_str, bounding box, page.
std::string TrainingSampleSet::SampleToString(const TrainingSample &sample) const {
std::string boxfile_str;
MakeBoxFileStr(unicharset_.id_to_unichar(sample.class_id()), sample.bounding_box(),
sample.page_num(), boxfile_str);
return std::string(fontinfo_table_.at(sample.font_id()).name) + " " + boxfile_str;
}
// Gets the combined set of features used by all the samples of the given
// font/class combination.
const BitVector &TrainingSampleSet::GetCloudFeatures(int font_id, int class_id) const {
int font_index = font_id_map_.SparseToCompact(font_id);
ASSERT_HOST(font_index >= 0);
return (*font_class_array_)(font_index, class_id).cloud_features;
}
// Gets the indexed features of the canonical sample of the given
// font/class combination.
const std::vector<int> &TrainingSampleSet::GetCanonicalFeatures(int font_id, int class_id) const {
int font_index = font_id_map_.SparseToCompact(font_id);
ASSERT_HOST(font_index >= 0);
return (*font_class_array_)(font_index, class_id).canonical_features;
}
// Returns the distance between the given UniCharAndFonts pair.
// If matched_fonts, only matching fonts, are considered, unless that yields
// the empty set.
// OrganizeByFontAndClass must have been already called.
float TrainingSampleSet::UnicharDistance(const UnicharAndFonts &uf1, const UnicharAndFonts &uf2,
bool matched_fonts, const IntFeatureMap &feature_map) {
int num_fonts1 = uf1.font_ids.size();
int c1 = uf1.unichar_id;
int num_fonts2 = uf2.font_ids.size();
int c2 = uf2.unichar_id;
double dist_sum = 0.0;
int dist_count = 0;
const bool debug = false;
if (matched_fonts) {
// Compute distances only where fonts match.
for (int i = 0; i < num_fonts1; ++i) {
int f1 = uf1.font_ids[i];
for (int j = 0; j < num_fonts2; ++j) {
int f2 = uf2.font_ids[j];
if (f1 == f2) {
dist_sum += ClusterDistance(f1, c1, f2, c2, feature_map);
++dist_count;
}
}
}
} else if (num_fonts1 * num_fonts2 <= kSquareLimit) {
// Small enough sets to compute all the distances.
for (int i = 0; i < num_fonts1; ++i) {
int f1 = uf1.font_ids[i];
for (int j = 0; j < num_fonts2; ++j) {
int f2 = uf2.font_ids[j];
dist_sum += ClusterDistance(f1, c1, f2, c2, feature_map);
if (debug) {
tprintf("Cluster dist %d %d %d %d = %g\n", f1, c1, f2, c2,
ClusterDistance(f1, c1, f2, c2, feature_map));
}
++dist_count;
}
}
} else {
// Subsample distances, using the largest set once, and stepping through
// the smaller set so as to ensure that all the pairs are different.
int increment = kPrime1 != num_fonts2 ? kPrime1 : kPrime2;
int index = 0;
int num_samples = std::max(num_fonts1, num_fonts2);
for (int i = 0; i < num_samples; ++i, index += increment) {
int f1 = uf1.font_ids[i % num_fonts1];
int f2 = uf2.font_ids[index % num_fonts2];
if (debug) {
tprintf("Cluster dist %d %d %d %d = %g\n", f1, c1, f2, c2,
ClusterDistance(f1, c1, f2, c2, feature_map));
}
dist_sum += ClusterDistance(f1, c1, f2, c2, feature_map);
++dist_count;
}
}
if (dist_count == 0) {
if (matched_fonts) {
return UnicharDistance(uf1, uf2, false, feature_map);
}
return 0.0f;
}
return dist_sum / dist_count;
}
// Returns the distance between the given pair of font/class pairs.
// Finds in cache or computes and caches.
// OrganizeByFontAndClass must have been already called.
float TrainingSampleSet::ClusterDistance(int font_id1, int class_id1, int font_id2, int class_id2,
const IntFeatureMap &feature_map) {
ASSERT_HOST(font_class_array_ != nullptr);
int font_index1 = font_id_map_.SparseToCompact(font_id1);
int font_index2 = font_id_map_.SparseToCompact(font_id2);
if (font_index1 < 0 || font_index2 < 0) {
return 0.0f;
}
FontClassInfo &fc_info = (*font_class_array_)(font_index1, class_id1);
if (font_id1 == font_id2) {
// Special case cache for speed.
if (fc_info.unichar_distance_cache.empty()) {
fc_info.unichar_distance_cache.resize(unicharset_size_, -1.0f);
}
if (fc_info.unichar_distance_cache[class_id2] < 0) {
// Distance has to be calculated.
float result = ComputeClusterDistance(font_id1, class_id1, font_id2, class_id2, feature_map);
fc_info.unichar_distance_cache[class_id2] = result;
// Copy to the symmetric cache entry.
FontClassInfo &fc_info2 = (*font_class_array_)(font_index2, class_id2);
if (fc_info2.unichar_distance_cache.empty()) {
fc_info2.unichar_distance_cache.resize(unicharset_size_, -1.0f);
}
fc_info2.unichar_distance_cache[class_id1] = result;
}
return fc_info.unichar_distance_cache[class_id2];
} else if (class_id1 == class_id2) {
// Another special-case cache for equal class-id.
if (fc_info.font_distance_cache.empty()) {
fc_info.font_distance_cache.resize(font_id_map_.CompactSize(), -1.0f);
}
if (fc_info.font_distance_cache[font_index2] < 0) {
// Distance has to be calculated.
float result = ComputeClusterDistance(font_id1, class_id1, font_id2, class_id2, feature_map);
fc_info.font_distance_cache[font_index2] = result;
// Copy to the symmetric cache entry.
FontClassInfo &fc_info2 = (*font_class_array_)(font_index2, class_id2);
if (fc_info2.font_distance_cache.empty()) {
fc_info2.font_distance_cache.resize(font_id_map_.CompactSize(), -1.0f);
}
fc_info2.font_distance_cache[font_index1] = result;
}
return fc_info.font_distance_cache[font_index2];
}
// Both font and class are different. Linear search for class_id2/font_id2
// in what is a hopefully short list of distances.
size_t cache_index = 0;
while (cache_index < fc_info.distance_cache.size() &&
(fc_info.distance_cache[cache_index].unichar_id != class_id2 ||
fc_info.distance_cache[cache_index].font_id != font_id2)) {
++cache_index;
}
if (cache_index == fc_info.distance_cache.size()) {
// Distance has to be calculated.
float result = ComputeClusterDistance(font_id1, class_id1, font_id2, class_id2, feature_map);
FontClassDistance fc_dist = {class_id2, font_id2, result};
fc_info.distance_cache.push_back(fc_dist);
// Copy to the symmetric cache entry. We know it isn't there already, as
// we always copy to the symmetric entry.
FontClassInfo &fc_info2 = (*font_class_array_)(font_index2, class_id2);
fc_dist.unichar_id = class_id1;
fc_dist.font_id = font_id1;
fc_info2.distance_cache.push_back(fc_dist);
}
return fc_info.distance_cache[cache_index].distance;
}
// Computes the distance between the given pair of font/class pairs.
float TrainingSampleSet::ComputeClusterDistance(int font_id1, int class_id1, int font_id2,
int class_id2,
const IntFeatureMap &feature_map) const {
int dist = ReliablySeparable(font_id1, class_id1, font_id2, class_id2, feature_map, false);
dist += ReliablySeparable(font_id2, class_id2, font_id1, class_id1, feature_map, false);
int denominator = GetCanonicalFeatures(font_id1, class_id1).size();
denominator += GetCanonicalFeatures(font_id2, class_id2).size();
return static_cast<float>(dist) / denominator;
}
// Helper to add a feature and its near neighbors to the good_features.
// levels indicates how many times to compute the offset features of what is
// already there. This is done by iteration rather than recursion.
static void AddNearFeatures(const IntFeatureMap &feature_map, int f, int levels,
std::vector<int> *good_features) {
int prev_num_features = 0;
good_features->push_back(f);
int num_features = 1;
for (int level = 0; level < levels; ++level) {
for (int i = prev_num_features; i < num_features; ++i) {
int feature = (*good_features)[i];
for (int dir = -kNumOffsetMaps; dir <= kNumOffsetMaps; ++dir) {
if (dir == 0) {
continue;
}
int f1 = feature_map.OffsetFeature(feature, dir);
if (f1 >= 0) {
good_features->push_back(f1);
}
}
}
prev_num_features = num_features;
num_features = good_features->size();
}
}
// Returns the number of canonical features of font/class 2 for which
// neither the feature nor any of its near neighbors occurs in the cloud
// of font/class 1. Each such feature is a reliable separation between
// the classes, ASSUMING that the canonical sample is sufficiently
// representative that every sample has a feature near that particular
// feature. To check that this is so on the fly would be prohibitively
// expensive, but it might be possible to pre-qualify the canonical features
// to include only those for which this assumption is true.
// ComputeCanonicalFeatures and ComputeCloudFeatures must have been called
// first, or the results will be nonsense.
int TrainingSampleSet::ReliablySeparable(int font_id1, int class_id1, int font_id2, int class_id2,
const IntFeatureMap &feature_map, bool thorough) const {
int result = 0;
const TrainingSample *sample2 = GetCanonicalSample(font_id2, class_id2);
if (sample2 == nullptr) {
return 0; // There are no canonical features.
}
const std::vector<int> &canonical2 = GetCanonicalFeatures(font_id2, class_id2);
const BitVector &cloud1 = GetCloudFeatures(font_id1, class_id1);
if (cloud1.empty()) {
return canonical2.size(); // There are no cloud features.
}
// Find a canonical2 feature that is not in cloud1.
for (int feature : canonical2) {
if (cloud1[feature]) {
continue;
}
// Gather the near neighbours of f.
std::vector<int> good_features;
AddNearFeatures(feature_map, feature, 1, &good_features);
// Check that none of the good_features are in the cloud.
bool found = false;
for (auto good_f : good_features) {
if (cloud1[good_f]) {
found = true;
break;
}
}
if (found) {
continue; // Found one in the cloud.
}
++result;
}
return result;
}
// Returns the total index of the requested sample.
// OrganizeByFontAndClass must have been already called.
int TrainingSampleSet::GlobalSampleIndex(int font_id, int class_id, int index) const {
ASSERT_HOST(font_class_array_ != nullptr);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) {
return -1;
}
return (*font_class_array_)(font_index, class_id).samples[index];
}
// Gets the canonical sample for the given font, class pair.
// ComputeCanonicalSamples must have been called first.
const TrainingSample *TrainingSampleSet::GetCanonicalSample(int font_id, int class_id) const {
ASSERT_HOST(font_class_array_ != nullptr);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) {
return nullptr;
}
const int sample_index = (*font_class_array_)(font_index, class_id).canonical_sample;
return sample_index >= 0 ? samples_[sample_index] : nullptr;
}
// Gets the max distance for the given canonical sample.
// ComputeCanonicalSamples must have been called first.
float TrainingSampleSet::GetCanonicalDist(int font_id, int class_id) const {
ASSERT_HOST(font_class_array_ != nullptr);
int font_index = font_id_map_.SparseToCompact(font_id);
if (font_index < 0) {
return 0.0f;
}
if ((*font_class_array_)(font_index, class_id).canonical_sample >= 0) {
return (*font_class_array_)(font_index, class_id).canonical_dist;
} else {
return 0.0f;
}
}
// Generates indexed features for all samples with the supplied feature_space.
void TrainingSampleSet::IndexFeatures(const IntFeatureSpace &feature_space) {
for (auto &sample : samples_) {
sample->IndexFeatures(feature_space);
}
}
// Marks the given sample index for deletion.
// Deletion is actually completed by DeleteDeadSamples.
void TrainingSampleSet::KillSample(TrainingSample *sample) {
sample->set_sample_index(-1);
}
// Deletes all samples with zero features marked by KillSample.
void TrainingSampleSet::DeleteDeadSamples() {
using namespace std::placeholders; // for _1
for (auto &&it = samples_.begin(); it < samples_.end();) {
if (*it == nullptr || (*it)->class_id() < 0) {
samples_.erase(it);
delete *it;
} else {
++it;
}
}
num_raw_samples_ = samples_.size();
// Samples must be re-organized now we have deleted a few.
}
// Construct an array to access the samples by font,class pair.
void TrainingSampleSet::OrganizeByFontAndClass() {
// Font indexes are sparse, so we used a map to compact them, so we can
// have an efficient 2-d array of fonts and character classes.
SetupFontIdMap();
int compact_font_size = font_id_map_.CompactSize();
// Get a 2-d array of generic vectors.
delete font_class_array_;
FontClassInfo empty;
font_class_array_ =
new GENERIC_2D_ARRAY<FontClassInfo>(compact_font_size, unicharset_size_, empty);
for (size_t s = 0; s < samples_.size(); ++s) {
int font_id = samples_[s]->font_id();
int class_id = samples_[s]->class_id();
if (font_id < 0 || font_id >= font_id_map_.SparseSize()) {
tprintf("Font id = %d/%d, class id = %d/%d on sample %zu\n", font_id,
font_id_map_.SparseSize(), class_id, unicharset_size_, s);
}
ASSERT_HOST(font_id >= 0 && font_id < font_id_map_.SparseSize());
ASSERT_HOST(class_id >= 0 && class_id < unicharset_size_);
int font_index = font_id_map_.SparseToCompact(font_id);
(*font_class_array_)(font_index, class_id).samples.push_back(s);
}
// Set the num_raw_samples member of the FontClassInfo, to set the boundary
// between the raw samples and the replicated ones.
for (int f = 0; f < compact_font_size; ++f) {
for (int c = 0; c < unicharset_size_; ++c) {
(*font_class_array_)(f, c).num_raw_samples = (*font_class_array_)(f, c).samples.size();
}
}
// This is the global number of samples and also marks the boundary between
// real and replicated samples.
num_raw_samples_ = samples_.size();
}
// Constructs the font_id_map_ which maps real font_ids (sparse) to a compact
// index for the font_class_array_.
void TrainingSampleSet::SetupFontIdMap() {
// Number of samples for each font_id.
std::vector<int> font_counts;
for (auto &sample : samples_) {
const int font_id = sample->font_id();
while (font_id >= font_counts.size()) {
font_counts.push_back(0);
}
++font_counts[font_id];
}
font_id_map_.Init(font_counts.size(), false);
for (size_t f = 0; f < font_counts.size(); ++f) {
font_id_map_.SetMap(f, font_counts[f] > 0);
}
font_id_map_.Setup();
}
// Finds the sample for each font, class pair that has least maximum
// distance to all the other samples of the same font, class.
// OrganizeByFontAndClass must have been already called.
void TrainingSampleSet::ComputeCanonicalSamples(const IntFeatureMap &map, bool debug) {
ASSERT_HOST(font_class_array_ != nullptr);
IntFeatureDist f_table;
if (debug) {
tprintf("feature table size %d\n", map.sparse_size());
}
f_table.Init(&map);
int worst_s1 = 0;
int worst_s2 = 0;
double global_worst_dist = 0.0;
// Compute distances independently for each font and char index.
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
int font_id = font_id_map_.CompactToSparse(font_index);
for (int c = 0; c < unicharset_size_; ++c) {
int samples_found = 0;
FontClassInfo &fcinfo = (*font_class_array_)(font_index, c);
if (fcinfo.samples.empty() || (kTestChar >= 0 && c != kTestChar)) {
fcinfo.canonical_sample = -1;
fcinfo.canonical_dist = 0.0f;
if (debug) {
tprintf("Skipping class %d\n", c);
}
continue;
}
// The canonical sample will be the one with the min_max_dist, which
// is the sample with the lowest maximum distance to all other samples.
double min_max_dist = 2.0;
// We keep track of the farthest apart pair (max_s1, max_s2) which
// are max_max_dist apart, so we can see how bad the variability is.
double max_max_dist = 0.0;
int max_s1 = 0;
int max_s2 = 0;
fcinfo.canonical_sample = fcinfo.samples[0];
fcinfo.canonical_dist = 0.0f;
for (auto s1 : fcinfo.samples) {
const std::vector<int> &features1 = samples_[s1]->indexed_features();
f_table.Set(features1, features1.size(), true);
double max_dist = 0.0;
// Run the full squared-order search for similar samples. It is still
// reasonably fast because f_table.FeatureDistance is fast, but we
// may have to reconsider if we start playing with too many samples
// of a single char/font.
for (int s2 : fcinfo.samples) {
if (samples_[s2]->class_id() != c || samples_[s2]->font_id() != font_id || s2 == s1) {
continue;
}
std::vector<int> features2 = samples_[s2]->indexed_features();
double dist = f_table.FeatureDistance(features2);
if (dist > max_dist) {
max_dist = dist;
if (dist > max_max_dist) {
max_max_dist = dist;
max_s1 = s1;
max_s2 = s2;
}
}
}
// Using Set(..., false) is far faster than re initializing, due to
// the sparseness of the feature space.
f_table.Set(features1, features1.size(), false);
samples_[s1]->set_max_dist(max_dist);
++samples_found;
if (max_dist < min_max_dist) {
fcinfo.canonical_sample = s1;
fcinfo.canonical_dist = max_dist;
}
UpdateRange(max_dist, &min_max_dist, &max_max_dist);
}
if (max_max_dist > global_worst_dist) {
// Keep a record of the worst pair over all characters/fonts too.
global_worst_dist = max_max_dist;
worst_s1 = max_s1;
worst_s2 = max_s2;
}
if (debug) {
tprintf(
"Found %d samples of class %d=%s, font %d, "
"dist range [%g, %g], worst pair= %s, %s\n",
samples_found, c, unicharset_.debug_str(c).c_str(), font_index, min_max_dist,
max_max_dist, SampleToString(*samples_[max_s1]).c_str(),
SampleToString(*samples_[max_s2]).c_str());
}
}
}
if (debug) {
tprintf("Global worst dist = %g, between sample %d and %d\n", global_worst_dist, worst_s1,
worst_s2);
}
}
// Replicates the samples to a minimum frequency defined by
// 2 * kSampleRandomSize, or for larger counts duplicates all samples.
// After replication, the replicated samples are perturbed slightly, but
// in a predictable and repeatable way.
// Use after OrganizeByFontAndClass().
void TrainingSampleSet::ReplicateAndRandomizeSamples() {
ASSERT_HOST(font_class_array_ != nullptr);
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
for (int c = 0; c < unicharset_size_; ++c) {
FontClassInfo &fcinfo = (*font_class_array_)(font_index, c);
int sample_count = fcinfo.samples.size();
int min_samples = 2 * std::max(kSampleRandomSize, sample_count);
if (sample_count > 0 && sample_count < min_samples) {
int base_count = sample_count;
for (int base_index = 0; sample_count < min_samples; ++sample_count) {
int src_index = fcinfo.samples[base_index++];
if (base_index >= base_count) {
base_index = 0;
}
TrainingSample *sample =
samples_[src_index]->RandomizedCopy(sample_count % kSampleRandomSize);
int sample_index = samples_.size();
sample->set_sample_index(sample_index);
samples_.push_back(sample);
fcinfo.samples.push_back(sample_index);
}
}
}
}
}
// Caches the indexed features of the canonical samples.
// ComputeCanonicalSamples must have been already called.
// TODO(rays) see note on ReliablySeparable and try restricting the
// canonical features to those that truly represent all samples.
void TrainingSampleSet::ComputeCanonicalFeatures() {
ASSERT_HOST(font_class_array_ != nullptr);
const int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
const int font_id = font_id_map_.CompactToSparse(font_index);
for (int c = 0; c < unicharset_size_; ++c) {
int num_samples = NumClassSamples(font_id, c, false);
if (num_samples == 0) {
continue;
}
const TrainingSample *sample = GetCanonicalSample(font_id, c);
FontClassInfo &fcinfo = (*font_class_array_)(font_index, c);
fcinfo.canonical_features = sample->indexed_features();
}
}
}
// Computes the combined set of features used by all the samples of each
// font/class combination. Use after ReplicateAndRandomizeSamples.
void TrainingSampleSet::ComputeCloudFeatures(int feature_space_size) {
ASSERT_HOST(font_class_array_ != nullptr);
int font_size = font_id_map_.CompactSize();
for (int font_index = 0; font_index < font_size; ++font_index) {
int font_id = font_id_map_.CompactToSparse(font_index);
for (int c = 0; c < unicharset_size_; ++c) {
int num_samples = NumClassSamples(font_id, c, false);
if (num_samples == 0) {
continue;
}
FontClassInfo &fcinfo = (*font_class_array_)(font_index, c);
fcinfo.cloud_features.Init(feature_space_size);
for (int s = 0; s < num_samples; ++s) {
const TrainingSample *sample = GetSample(font_id, c, s);
const std::vector<int> &sample_features = sample->indexed_features();
for (int sample_feature : sample_features) {
fcinfo.cloud_features.SetBit(sample_feature);
}
}
}
}
}
// Adds all fonts of the given class to the shape.
void TrainingSampleSet::AddAllFontsForClass(int class_id, Shape *shape) const {
for (int f = 0; f < font_id_map_.CompactSize(); ++f) {
const int font_id = font_id_map_.CompactToSparse(f);
shape->AddToShape(class_id, font_id);
}
}
#ifndef GRAPHICS_DISABLED
// Display the samples with the given indexed feature that also match
// the given shape.
void TrainingSampleSet::DisplaySamplesWithFeature(int f_index, const Shape &shape,
const IntFeatureSpace &space,
ScrollView::Color color,
ScrollView *window) const {
for (int s = 0; s < num_raw_samples(); ++s) {
const TrainingSample *sample = GetSample(s);
if (shape.ContainsUnichar(sample->class_id())) {
std::vector<int> indexed_features;
space.IndexAndSortFeatures(sample->features(), sample->num_features(), &indexed_features);
for (int indexed_feature : indexed_features) {
if (indexed_feature == f_index) {
sample->DisplayFeatures(color, window);
}
}
}
}
}
#endif // !GRAPHICS_DISABLED
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/training/common/trainingsampleset.cpp
|
C++
|
apache-2.0
| 31,396
|
// Copyright 2010 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_TRAININGSAMPLESET_H_
#define TESSERACT_TRAINING_TRAININGSAMPLESET_H_
#include "bitvector.h"
#include "indexmapbidi.h"
#include "matrix.h"
#include "shapetable.h"
#include "trainingsample.h"
namespace tesseract {
class UNICHARSET;
struct FontInfo;
class FontInfoTable;
class IntFeatureMap;
class IntFeatureSpace;
class TrainingSample;
struct UnicharAndFonts;
// Collection of TrainingSample used for training or testing a classifier.
// Provides several useful methods to operate on the collection as a whole,
// including outlier detection and deletion, providing access by font and
// class, finding the canonical sample, finding the "cloud" features (OR of
// all features in all samples), replication of samples, caching of distance
// metrics.
class TrainingSampleSet {
public:
explicit TrainingSampleSet(const FontInfoTable &fontinfo_table);
~TrainingSampleSet();
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
// Accessors
int num_samples() const {
return samples_.size();
}
int num_raw_samples() const {
return num_raw_samples_;
}
int NumFonts() const {
return font_id_map_.SparseSize();
}
const UNICHARSET &unicharset() const {
return unicharset_;
}
int charsetsize() const {
return unicharset_size_;
}
const FontInfoTable &fontinfo_table() const {
return fontinfo_table_;
}
// Loads an initial unicharset, or sets one up if the file cannot be read.
void LoadUnicharset(const char *filename);
// Adds a character sample to this sample set.
// If the unichar is not already in the local unicharset, it is added.
// Returns the unichar_id of the added sample, from the local unicharset.
int AddSample(const char *unichar, TrainingSample *sample);
// Adds a character sample to this sample set with the given unichar_id,
// which must correspond to the local unicharset (in this).
void AddSample(int unichar_id, TrainingSample *sample);
// Returns the number of samples for the given font,class pair.
// If randomize is true, returns the number of samples accessible
// with randomizing on. (Increases the number of samples if small.)
// OrganizeByFontAndClass must have been already called.
int NumClassSamples(int font_id, int class_id, bool randomize) const;
// Gets a sample by its index.
const TrainingSample *GetSample(int index) const;
// Gets a sample by its font, class, index.
// OrganizeByFontAndClass must have been already called.
const TrainingSample *GetSample(int font_id, int class_id, int index) const;
// Get a sample by its font, class, index. Does not randomize.
// OrganizeByFontAndClass must have been already called.
TrainingSample *MutableSample(int font_id, int class_id, int index);
// Returns a string debug representation of the given sample:
// font, unichar_str, bounding box, page.
std::string SampleToString(const TrainingSample &sample) const;
// Gets the combined set of features used by all the samples of the given
// font/class combination.
const BitVector &GetCloudFeatures(int font_id, int class_id) const;
// Gets the indexed features of the canonical sample of the given
// font/class combination.
const std::vector<int> &GetCanonicalFeatures(int font_id, int class_id) const;
// Returns the distance between the given UniCharAndFonts pair.
// If matched_fonts, only matching fonts, are considered, unless that yields
// the empty set.
// OrganizeByFontAndClass must have been already called.
float UnicharDistance(const UnicharAndFonts &uf1, const UnicharAndFonts &uf2, bool matched_fonts,
const IntFeatureMap &feature_map);
// Returns the distance between the given pair of font/class pairs.
// Finds in cache or computes and caches.
// OrganizeByFontAndClass must have been already called.
float ClusterDistance(int font_id1, int class_id1, int font_id2, int class_id2,
const IntFeatureMap &feature_map);
// Computes the distance between the given pair of font/class pairs.
float ComputeClusterDistance(int font_id1, int class_id1, int font_id2, int class_id2,
const IntFeatureMap &feature_map) const;
// Returns the number of canonical features of font/class 2 for which
// neither the feature nor any of its near neighbors occurs in the cloud
// of font/class 1. Each such feature is a reliable separation between
// the classes, ASSUMING that the canonical sample is sufficiently
// representative that every sample has a feature near that particular
// feature. To check that this is so on the fly would be prohibitively
// expensive, but it might be possible to pre-qualify the canonical features
// to include only those for which this assumption is true.
// ComputeCanonicalFeatures and ComputeCloudFeatures must have been called
// first, or the results will be nonsense.
int ReliablySeparable(int font_id1, int class_id1, int font_id2, int class_id2,
const IntFeatureMap &feature_map, bool thorough) const;
// Returns the total index of the requested sample.
// OrganizeByFontAndClass must have been already called.
int GlobalSampleIndex(int font_id, int class_id, int index) const;
// Gets the canonical sample for the given font, class pair.
// ComputeCanonicalSamples must have been called first.
const TrainingSample *GetCanonicalSample(int font_id, int class_id) const;
// Gets the max distance for the given canonical sample.
// ComputeCanonicalSamples must have been called first.
float GetCanonicalDist(int font_id, int class_id) const;
// Returns a mutable pointer to the sample with the given index.
TrainingSample *mutable_sample(int index) {
return samples_[index];
}
// Gets ownership of the sample with the given index, removing it from this.
TrainingSample *extract_sample(int index) {
TrainingSample *sample = samples_[index];
samples_[index] = nullptr;
return sample;
}
// Generates indexed features for all samples with the supplied feature_space.
void IndexFeatures(const IntFeatureSpace &feature_space);
// Marks the given sample for deletion.
// Deletion is actually completed by DeleteDeadSamples.
void KillSample(TrainingSample *sample);
// Deletes all samples with a negative sample index marked by KillSample.
// Must be called before OrganizeByFontAndClass, and OrganizeByFontAndClass
// must be called after as the samples have been renumbered.
void DeleteDeadSamples();
// Construct an array to access the samples by font,class pair.
void OrganizeByFontAndClass();
// Constructs the font_id_map_ which maps real font_ids (sparse) to a compact
// index for the font_class_array_.
void SetupFontIdMap();
// Finds the sample for each font, class pair that has least maximum
// distance to all the other samples of the same font, class.
// OrganizeByFontAndClass must have been already called.
void ComputeCanonicalSamples(const IntFeatureMap &map, bool debug);
// Replicates the samples to a minimum frequency defined by
// 2 * kSampleRandomSize, or for larger counts duplicates all samples.
// After replication, the replicated samples are perturbed slightly, but
// in a predictable and repeatable way.
// Use after OrganizeByFontAndClass().
void ReplicateAndRandomizeSamples();
// Caches the indexed features of the canonical samples.
// ComputeCanonicalSamples must have been already called.
void ComputeCanonicalFeatures();
// Computes the combined set of features used by all the samples of each
// font/class combination. Use after ReplicateAndRandomizeSamples.
void ComputeCloudFeatures(int feature_space_size);
// Adds all fonts of the given class to the shape.
void AddAllFontsForClass(int class_id, Shape *shape) const;
// Display the samples with the given indexed feature that also match
// the given shape.
void DisplaySamplesWithFeature(int f_index, const Shape &shape,
const IntFeatureSpace &feature_space, ScrollView::Color color,
ScrollView *window) const;
private:
// Struct to store a triplet of unichar, font, distance in the distance cache.
struct FontClassDistance {
int unichar_id;
int font_id; // Real font id.
float distance;
};
// Simple struct to store information related to each font/class combination.
struct FontClassInfo {
FontClassInfo();
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
// Number of raw samples.
int32_t num_raw_samples;
// Index of the canonical sample.
int32_t canonical_sample;
// Max distance of the canonical sample from any other.
float canonical_dist;
// Sample indices for the samples, including replicated.
std::vector<int32_t> samples;
// Non-serialized cache data.
// Indexed features of the canonical sample.
std::vector<int> canonical_features;
// The mapped features of all the samples.
BitVector cloud_features;
// Caches for ClusterDistance.
// Caches for other fonts but matching this unichar. -1 indicates not set.
// Indexed by compact font index from font_id_map_.
std::vector<float> font_distance_cache;
// Caches for other unichars but matching this font. -1 indicates not set.
std::vector<float> unichar_distance_cache;
// Cache for the rest (non matching font and unichar.)
// A cache of distances computed by ReliablySeparable.
std::vector<FontClassDistance> distance_cache;
};
std::vector<TrainingSample *> samples_;
// Number of samples before replication/randomization.
int num_raw_samples_;
// Character set we are training for.
UNICHARSET unicharset_;
// Character set size to which the 2-d arrays below refer.
int unicharset_size_;
// Map to allow the font_class_array_ below to be compact.
// The sparse space is the real font_id, used in samples_ .
// The compact space is an index to font_class_array_
IndexMapBiDi font_id_map_;
// A 2-d array of FontClassInfo holding information related to each
// (font_id, class_id) pair.
GENERIC_2D_ARRAY<FontClassInfo> *font_class_array_;
// Reference to the fontinfo_table_ in MasterTrainer. Provides names
// for font_ids in the samples. Not serialized!
const FontInfoTable &fontinfo_table_;
};
} // namespace tesseract.
#endif // TRAININGSAMPLESETSET_H_
|
2301_81045437/tesseract
|
src/training/common/trainingsampleset.h
|
C++
|
apache-2.0
| 11,592
|
///////////////////////////////////////////////////////////////////////
// File: dawg2wordlist.cpp
// Description: Program to create a word list from a DAWG and unicharset.
// Author: David Eger
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "commontraining.h" // CheckSharedLibraryVersion
#include "dawg.h"
#include "trie.h"
#include "unicharset.h"
#include "serialis.h"
using namespace tesseract;
static std::unique_ptr<tesseract::Dawg> LoadSquishedDawg(const UNICHARSET &unicharset, const char *filename) {
const int kDictDebugLevel = 1;
tesseract::TFile dawg_file;
if (!dawg_file.Open(filename, nullptr)) {
tprintf("Could not open %s for reading.\n", filename);
return nullptr;
}
tprintf("Loading word list from %s\n", filename);
auto retval = std::make_unique<tesseract::SquishedDawg>(tesseract::DAWG_TYPE_WORD, "eng",
SYSTEM_DAWG_PERM, kDictDebugLevel);
if (!retval->Load(&dawg_file)) {
tprintf("Could not read %s\n", filename);
return nullptr;
}
tprintf("Word list loaded.\n");
return retval;
}
class WordOutputter {
public:
WordOutputter(FILE *file) : file_(file) {}
void output_word(const char *word) {
fprintf(file_, "%s\n", word);
}
private:
FILE *file_;
};
// returns 0 if successful.
static int WriteDawgAsWordlist(const UNICHARSET &unicharset, const tesseract::Dawg *dawg,
const char *outfile_name) {
FILE *out = fopen(outfile_name, "wb");
if (out == nullptr) {
tprintf("Could not open %s for writing.\n", outfile_name);
return EXIT_FAILURE;
}
WordOutputter outputter(out);
using namespace std::placeholders; // for _1
dawg->iterate_words(unicharset, std::bind(&WordOutputter::output_word, &outputter, _1));
return fclose(out);
}
int main(int argc, char *argv[]) {
tesseract::CheckSharedLibraryVersion();
if (argc > 1 && (!strcmp(argv[1], "-v") || !strcmp(argv[1], "--version"))) {
printf("%s\n", tesseract::TessBaseAPI::Version());
return 0;
} else if (argc != 4) {
tprintf("Print all the words in a given dawg.\n");
tprintf(
"Usage: %s -v | --version | %s <unicharset> <dawgfile> "
"<wordlistfile>\n",
argv[0], argv[0]);
return EXIT_FAILURE;
}
const char *unicharset_file = argv[1];
const char *dawg_file = argv[2];
const char *wordlist_file = argv[3];
UNICHARSET unicharset;
if (!unicharset.load_from_file(unicharset_file)) {
tprintf("Error loading unicharset from %s.\n", unicharset_file);
return EXIT_FAILURE;
}
auto dict = LoadSquishedDawg(unicharset, dawg_file);
if (dict == nullptr) {
tprintf("Error loading dictionary from %s.\n", dawg_file);
return EXIT_FAILURE;
}
int retval = WriteDawgAsWordlist(unicharset, dict.get(), wordlist_file);
return retval;
}
|
2301_81045437/tesseract
|
src/training/dawg2wordlist.cpp
|
C++
|
apache-2.0
| 3,469
|
/**********************************************************************
* File: degradeimage.cpp
* Description: Function to degrade an image (usually of text) as if it
* has been printed and then scanned.
* Authors: Ray Smith
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "degradeimage.h"
#include <allheaders.h> // from leptonica
#include <cstdlib>
#include "helpers.h" // For TRand.
#include "rect.h"
namespace tesseract {
// A randomized perspective distortion can be applied to synthetic input.
// The perspective distortion comes from leptonica, which uses 2 sets of 4
// corners to determine the distortion. There are random values for each of
// the x numbers x0..x3 and y0..y3, except for x2 and x3 which are instead
// defined in terms of a single shear value. This reduces the degrees of
// freedom enough to make the distortion more realistic than it would otherwise
// be if all 8 coordinates could move independently.
// One additional factor is used for the color of the pixels that don't exist
// in the source image.
// Name for each of the randomizing factors.
enum FactorNames {
FN_INCOLOR,
FN_Y0,
FN_Y1,
FN_Y2,
FN_Y3,
FN_X0,
FN_X1,
FN_SHEAR,
// x2 = x1 - shear
// x3 = x0 + shear
FN_NUM_FACTORS
};
// Rotation is +/- kRotationRange radians.
const float kRotationRange = 0.02f;
// Number of grey levels to shift by for each exposure step.
const int kExposureFactor = 16;
// Salt and pepper noise is +/- kSaltnPepper.
const int kSaltnPepper = 5;
// Min sum of width + height on which to operate the ramp.
const int kMinRampSize = 1000;
// Degrade the pix as if by a print/copy/scan cycle with exposure > 0
// corresponding to darkening on the copier and <0 lighter and 0 not copied.
// Exposures in [-2,2] are most useful, with -3 and 3 being extreme.
// If rotation is nullptr, rotation is skipped. If *rotation is non-zero, the
// pix is rotated by *rotation else it is randomly rotated and *rotation is
// modified.
//
// HOW IT WORKS:
// Most of the process is really dictated by the fact that the minimum
// available convolution is 3X3, which is too big really to simulate a
// good quality print/scan process. (2X2 would be better.)
// 1 pixel wide inputs are heavily smeared by the 3X3 convolution, making the
// images generally biased to being too light, so most of the work is to make
// them darker. 3 levels of thickening/darkening are achieved with 2 dilations,
// (using a greyscale erosion) one heavy (by being before convolution) and one
// light (after convolution).
// With no dilation, after covolution, the images are so light that a heavy
// constant offset is required to make the 0 image look reasonable. A simple
// constant offset multiple of exposure to undo this value is enough to achieve
// all the required lighting. This gives the advantage that exposure level 1
// with a single dilation gives a good impression of the broken-yet-too-dark
// problem that is often seen in scans.
// A small random rotation gives some varying greyscale values on the edges,
// and some random salt and pepper noise on top helps to realistically jaggy-up
// the edges.
// Finally a greyscale ramp provides a continuum of effects between exposure
// levels.
Image DegradeImage(Image input, int exposure, TRand *randomizer, float *rotation) {
Image pix = pixConvertTo8(input, false);
input.destroy();
input = pix;
int width = pixGetWidth(input);
int height = pixGetHeight(input);
if (exposure >= 2) {
// An erosion simulates the spreading darkening of a dark copy.
// This is backwards to binary morphology,
// see http://www.leptonica.com/grayscale-morphology.html
pix = input;
input = pixErodeGray(pix, 3, 3);
pix.destroy();
}
// A convolution is essential to any mode as no scanner produces an
// image as sharp as the electronic image.
pix = pixBlockconv(input, 1, 1);
input.destroy();
// A small random rotation helps to make the edges jaggy in a realistic way.
if (rotation != nullptr) {
float radians_clockwise = 0.0f;
if (*rotation) {
radians_clockwise = *rotation;
} else if (randomizer != nullptr) {
radians_clockwise = randomizer->SignedRand(kRotationRange);
}
input = pixRotate(pix, radians_clockwise, L_ROTATE_AREA_MAP, L_BRING_IN_WHITE, 0, 0);
// Rotate the boxes to match.
*rotation = radians_clockwise;
pix.destroy();
} else {
input = pix;
}
if (exposure >= 3 || exposure == 1) {
// Erosion after the convolution is not as heavy as before, so it is
// good for level 1 and in addition as a level 3.
// This is backwards to binary morphology,
// see http://www.leptonica.com/grayscale-morphology.html
pix = input;
input = pixErodeGray(pix, 3, 3);
pix.destroy();
}
// The convolution really needed to be 2x2 to be realistic enough, but
// we only have 3x3, so we have to bias the image darker or lose thin
// strokes.
int erosion_offset = 0;
// For light and 0 exposure, there is no dilation, so compensate for the
// convolution with a big darkening bias which is undone for lighter
// exposures.
if (exposure <= 0) {
erosion_offset = -3 * kExposureFactor;
}
// Add in a general offset of the greyscales for the exposure level so
// a threshold of 128 gives a reasonable binary result.
erosion_offset -= exposure * kExposureFactor;
// Add a gradual fade over the page and a small amount of salt and pepper
// noise to simulate noise in the sensor/paper fibres and varying
// illumination.
l_uint32 *data = pixGetData(input);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int pixel = GET_DATA_BYTE(data, x);
if (randomizer != nullptr) {
pixel += randomizer->IntRand() % (kSaltnPepper * 2 + 1) - kSaltnPepper;
}
if (height + width > kMinRampSize) {
pixel -= (2 * x + y) * 32 / (height + width);
}
pixel += erosion_offset;
if (pixel < 0) {
pixel = 0;
}
if (pixel > 255) {
pixel = 255;
}
SET_DATA_BYTE(data, x, pixel);
}
data += pixGetWpl(input);
}
return input;
}
// Creates and returns a Pix distorted by various means according to the bool
// flags. If boxes is not nullptr, the boxes are resized/positioned according to
// any spatial distortion and also by the integer reduction factor box_scale
// so they will match what the network will output.
// Returns nullptr on error. The returned Pix must be pixDestroyed.
Image PrepareDistortedPix(const Image pix, bool perspective, bool invert, bool white_noise,
bool smooth_noise, bool blur, int box_reduction, TRand *randomizer,
std::vector<TBOX> *boxes) {
Image distorted = pix.copy();
// Things to do to synthetic training data.
if ((white_noise || smooth_noise) && randomizer->SignedRand(1.0) > 0.0) {
// TODO(rays) Cook noise in a more thread-safe manner than rand().
// Attempt to make the sequences reproducible.
srand(randomizer->IntRand());
Image pixn = pixAddGaussianNoise(distorted, 8.0);
distorted.destroy();
if (smooth_noise) {
distorted = pixBlockconv(pixn, 1, 1);
pixn.destroy();
} else {
distorted = pixn;
}
}
if (blur && randomizer->SignedRand(1.0) > 0.0) {
Image blurred = pixBlockconv(distorted, 1, 1);
distorted.destroy();
distorted = blurred;
}
if (perspective) {
GeneratePerspectiveDistortion(0, 0, randomizer, &distorted, boxes);
}
if (boxes != nullptr) {
for (auto &b : *boxes) {
b.scale(1.0f / box_reduction);
if (b.width() <= 0) {
b.set_right(b.left() + 1);
}
}
}
if (invert && randomizer->SignedRand(1.0) < -0) {
pixInvert(distorted, distorted);
}
return distorted;
}
// Distorts anything that has a non-null pointer with the same pseudo-random
// perspective distortion. Width and height only need to be set if there
// is no pix. If there is a pix, then they will be taken from there.
void GeneratePerspectiveDistortion(int width, int height, TRand *randomizer, Image *pix,
std::vector<TBOX> *boxes) {
if (pix != nullptr && *pix != nullptr) {
width = pixGetWidth(*pix);
height = pixGetHeight(*pix);
}
float *im_coeffs = nullptr;
float *box_coeffs = nullptr;
l_int32 incolor = ProjectiveCoeffs(width, height, randomizer, &im_coeffs, &box_coeffs);
if (pix != nullptr && *pix != nullptr) {
// Transform the image.
Image transformed = pixProjective(*pix, im_coeffs, incolor);
if (transformed == nullptr) {
tprintf("Projective transformation failed!!\n");
return;
}
pix->destroy();
*pix = transformed;
}
if (boxes != nullptr) {
// Transform the boxes.
for (auto &b : *boxes) {
int x1, y1, x2, y2;
const TBOX &box = b;
projectiveXformSampledPt(box_coeffs, box.left(), height - box.top(), &x1, &y1);
projectiveXformSampledPt(box_coeffs, box.right(), height - box.bottom(), &x2, &y2);
TBOX new_box1(x1, height - y2, x2, height - y1);
projectiveXformSampledPt(box_coeffs, box.left(), height - box.bottom(), &x1, &y1);
projectiveXformSampledPt(box_coeffs, box.right(), height - box.top(), &x2, &y2);
TBOX new_box2(x1, height - y1, x2, height - y2);
b = new_box1.bounding_union(new_box2);
}
}
lept_free(im_coeffs);
lept_free(box_coeffs);
}
// Computes the coefficients of a randomized projective transformation.
// The image transform requires backward transformation coefficient, and the
// box transform the forward coefficients.
// Returns the incolor arg to pixProjective.
int ProjectiveCoeffs(int width, int height, TRand *randomizer, float **im_coeffs,
float **box_coeffs) {
// Setup "from" points.
Pta *src_pts = ptaCreate(4);
ptaAddPt(src_pts, 0.0f, 0.0f);
ptaAddPt(src_pts, width, 0.0f);
ptaAddPt(src_pts, width, height);
ptaAddPt(src_pts, 0.0f, height);
// Extract factors from pseudo-random sequence.
float factors[FN_NUM_FACTORS];
float shear = 0.0f; // Shear is signed.
for (int i = 0; i < FN_NUM_FACTORS; ++i) {
// Everything is squared to make wild values rarer.
if (i == FN_SHEAR) {
// Shear is signed.
shear = randomizer->SignedRand(0.5 / 3.0);
shear = shear >= 0.0 ? shear * shear : -shear * shear;
// Keep the sheared points within the original rectangle.
if (shear < -factors[FN_X0]) {
shear = -factors[FN_X0];
}
if (shear > factors[FN_X1]) {
shear = factors[FN_X1];
}
factors[i] = shear;
} else if (i != FN_INCOLOR) {
factors[i] = fabs(randomizer->SignedRand(1.0));
if (i <= FN_Y3) {
factors[i] *= 5.0 / 8.0;
} else {
factors[i] *= 0.5;
}
factors[i] *= factors[i];
}
}
// Setup "to" points.
Pta *dest_pts = ptaCreate(4);
ptaAddPt(dest_pts, factors[FN_X0] * width, factors[FN_Y0] * height);
ptaAddPt(dest_pts, (1.0f - factors[FN_X1]) * width, factors[FN_Y1] * height);
ptaAddPt(dest_pts, (1.0f - factors[FN_X1] + shear) * width, (1 - factors[FN_Y2]) * height);
ptaAddPt(dest_pts, (factors[FN_X0] + shear) * width, (1 - factors[FN_Y3]) * height);
getProjectiveXformCoeffs(dest_pts, src_pts, im_coeffs);
getProjectiveXformCoeffs(src_pts, dest_pts, box_coeffs);
ptaDestroy(&src_pts);
ptaDestroy(&dest_pts);
return factors[FN_INCOLOR] > 0.5f ? L_BRING_IN_WHITE : L_BRING_IN_BLACK;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/degradeimage.cpp
|
C++
|
apache-2.0
| 12,171
|
/**********************************************************************
* File: degradeimage.h
* Description: Function to degrade an image (usually of text) as if it
* has been printed and then scanned.
* Authors: Ray Smith
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_DEGRADEIMAGE_H_
#define TESSERACT_TRAINING_DEGRADEIMAGE_H_
#include <allheaders.h>
#include "helpers.h" // For TRand.
#include "rect.h"
namespace tesseract {
// Degrade the pix as if by a print/copy/scan cycle with exposure > 0
// corresponding to darkening on the copier and <0 lighter and 0 not copied.
// If rotation is not nullptr, the clockwise rotation in radians is saved there.
// The input pix must be 8 bit grey. (Binary with values 0 and 255 is OK.)
// The input image is destroyed and a different image returned.
Image DegradeImage(Image input, int exposure, TRand *randomizer, float *rotation);
// Creates and returns a Pix distorted by various means according to the bool
// flags. If boxes is not nullptr, the boxes are resized/positioned according to
// any spatial distortion and also by the integer reduction factor box_scale
// so they will match what the network will output.
// Returns nullptr on error. The returned Pix must be pixDestroyed.
Image PrepareDistortedPix(const Image pix, bool perspective, bool invert, bool white_noise,
bool smooth_noise, bool blur, int box_reduction, TRand *randomizer,
std::vector<TBOX> *boxes);
// Distorts anything that has a non-null pointer with the same pseudo-random
// perspective distortion. Width and height only need to be set if there
// is no pix. If there is a pix, then they will be taken from there.
void GeneratePerspectiveDistortion(int width, int height, TRand *randomizer, Image *pix,
std::vector<TBOX> *boxes);
// Computes the coefficients of a randomized projective transformation.
// The image transform requires backward transformation coefficient, and the
// box transform the forward coefficients.
// Returns the incolor arg to pixProjective.
int ProjectiveCoeffs(int width, int height, TRand *randomizer, float **im_coeffs,
float **box_coeffs);
} // namespace tesseract
#endif // TESSERACT_TRAINING_DEGRADEIMAGE_H_
|
2301_81045437/tesseract
|
src/training/degradeimage.h
|
C++
|
apache-2.0
| 2,946
|
///////////////////////////////////////////////////////////////////////
// File: lstmeval.cpp
// Description: Evaluation program for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "commontraining.h"
#include "lstmtester.h"
#include "tprintf.h"
using namespace tesseract;
static STRING_PARAM_FLAG(model, "", "Name of model file (training or recognition)");
static STRING_PARAM_FLAG(traineddata, "",
"If model is a training checkpoint, then traineddata must "
"be the traineddata file that was given to the trainer");
static STRING_PARAM_FLAG(eval_listfile, "", "File listing sample files in lstmf training format.");
static INT_PARAM_FLAG(max_image_MB, 2000, "Max memory to use for images.");
static INT_PARAM_FLAG(verbosity, 1, "Amount of diagnosting information to output (0-2).");
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
ParseArguments(&argc, &argv);
if (FLAGS_model.empty()) {
tprintf("Must provide a --model!\n");
return EXIT_FAILURE;
}
if (FLAGS_eval_listfile.empty()) {
tprintf("Must provide a --eval_listfile!\n");
return EXIT_FAILURE;
}
tesseract::TessdataManager mgr;
if (!mgr.Init(FLAGS_model.c_str())) {
if (FLAGS_traineddata.empty()) {
tprintf("Must supply --traineddata to eval a training checkpoint!\n");
return EXIT_FAILURE;
}
tprintf("%s is not a recognition model, trying training checkpoint...\n", FLAGS_model.c_str());
if (!mgr.Init(FLAGS_traineddata.c_str())) {
tprintf("Failed to load language model from %s!\n", FLAGS_traineddata.c_str());
return EXIT_FAILURE;
}
std::vector<char> model_data;
if (!tesseract::LoadDataFromFile(FLAGS_model.c_str(), &model_data)) {
tprintf("Failed to load model from: %s\n", FLAGS_model.c_str());
return EXIT_FAILURE;
}
mgr.OverwriteEntry(tesseract::TESSDATA_LSTM, &model_data[0], model_data.size());
}
tesseract::LSTMTester tester(static_cast<int64_t>(FLAGS_max_image_MB) * 1048576);
if (!tester.LoadAllEvalData(FLAGS_eval_listfile.c_str())) {
tprintf("Failed to load eval data from: %s\n", FLAGS_eval_listfile.c_str());
return EXIT_FAILURE;
}
double errs = 0.0;
std::string result = tester.RunEvalSync(0, &errs, mgr,
/*training_stage (irrelevant)*/ 0, FLAGS_verbosity);
tprintf("%s\n", result.c_str());
return EXIT_SUCCESS;
} /* main */
|
2301_81045437/tesseract
|
src/training/lstmeval.cpp
|
C++
|
apache-2.0
| 3,121
|
///////////////////////////////////////////////////////////////////////
// File: lstmtraining.cpp
// Description: Training program for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include <cerrno>
#include <locale> // for std::locale::classic
#if defined(__USE_GNU)
# include <cfenv> // for feenableexcept
#endif
#include "commontraining.h"
#include "fileio.h" // for LoadFileLinesToStrings
#include "lstmtester.h"
#include "lstmtrainer.h"
#include "params.h"
#include "tprintf.h"
#include "unicharset_training_utils.h"
using namespace tesseract;
static INT_PARAM_FLAG(debug_interval, 0, "How often to display the alignment.");
static STRING_PARAM_FLAG(net_spec, "", "Network specification");
static INT_PARAM_FLAG(net_mode, 192, "Controls network behavior.");
static INT_PARAM_FLAG(perfect_sample_delay, 0, "How many imperfect samples between perfect ones.");
static DOUBLE_PARAM_FLAG(target_error_rate, 0.01, "Final error rate in percent.");
static DOUBLE_PARAM_FLAG(weight_range, 0.1, "Range of initial random weights.");
static DOUBLE_PARAM_FLAG(learning_rate, 10.0e-4, "Weight factor for new deltas.");
static BOOL_PARAM_FLAG(reset_learning_rate, false,
"Resets all stored learning rates to the value specified by --learning_rate.");
static DOUBLE_PARAM_FLAG(momentum, 0.5, "Decay factor for repeating deltas.");
static DOUBLE_PARAM_FLAG(adam_beta, 0.999, "Decay factor for repeating deltas.");
static INT_PARAM_FLAG(max_image_MB, 6000, "Max memory to use for images.");
static STRING_PARAM_FLAG(continue_from, "", "Existing model to extend");
static STRING_PARAM_FLAG(model_output, "lstmtrain", "Basename for output models");
static STRING_PARAM_FLAG(train_listfile, "",
"File listing training files in lstmf training format.");
static STRING_PARAM_FLAG(eval_listfile, "", "File listing eval files in lstmf training format.");
#if defined(__USE_GNU)
static BOOL_PARAM_FLAG(debug_float, false, "Raise error on certain float errors.");
#endif
static BOOL_PARAM_FLAG(stop_training, false, "Just convert the training model to a runtime model.");
static BOOL_PARAM_FLAG(convert_to_int, false, "Convert the recognition model to an integer model.");
static BOOL_PARAM_FLAG(sequential_training, false,
"Use the training files sequentially instead of round-robin.");
static INT_PARAM_FLAG(append_index, -1,
"Index in continue_from Network at which to"
" attach the new network defined by net_spec");
static BOOL_PARAM_FLAG(debug_network, false, "Get info on distribution of weight values");
static INT_PARAM_FLAG(max_iterations, 0, "If set, exit after this many iterations");
static STRING_PARAM_FLAG(traineddata, "", "Combined Dawgs/Unicharset/Recoder for language model");
static STRING_PARAM_FLAG(old_traineddata, "",
"When changing the character set, this specifies the old"
" character set that is to be replaced");
static BOOL_PARAM_FLAG(randomly_rotate, false,
"Train OSD and randomly turn training samples upside-down");
// Number of training images to train between calls to MaintainCheckpoints.
const int kNumPagesPerBatch = 100;
// Apart from command-line flags, input is a collection of lstmf files, that
// were previously created using tesseract with the lstm.train config file.
// The program iterates over the inputs, feeding the data to the network,
// until the error rate reaches a specified target or max_iterations is reached.
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
ParseArguments(&argc, &argv);
#if defined(__USE_GNU)
if (FLAGS_debug_float) {
// Raise SIGFPE for unwanted floating point calculations.
feenableexcept(FE_DIVBYZERO | FE_OVERFLOW | FE_INVALID);
}
#endif
if (FLAGS_model_output.empty()) {
tprintf("Must provide a --model_output!\n");
return EXIT_FAILURE;
}
if (FLAGS_traineddata.empty()) {
tprintf("Must provide a --traineddata see training documentation\n");
return EXIT_FAILURE;
}
// Check write permissions.
std::string test_file = FLAGS_model_output;
test_file += "_wtest";
FILE *f = fopen(test_file.c_str(), "wb");
if (f != nullptr) {
fclose(f);
if (remove(test_file.c_str()) != 0) {
tprintf("Error, failed to remove %s: %s\n", test_file.c_str(), strerror(errno));
return EXIT_FAILURE;
}
} else {
tprintf("Error, model output cannot be written: %s\n", strerror(errno));
return EXIT_FAILURE;
}
// Setup the trainer.
std::string checkpoint_file = FLAGS_model_output;
checkpoint_file += "_checkpoint";
std::string checkpoint_bak = checkpoint_file + ".bak";
tesseract::LSTMTrainer trainer(FLAGS_model_output, checkpoint_file,
FLAGS_debug_interval,
static_cast<int64_t>(FLAGS_max_image_MB) * 1048576);
if (!trainer.InitCharSet(FLAGS_traineddata.c_str())) {
tprintf("Error, failed to read %s\n", FLAGS_traineddata.c_str());
return EXIT_FAILURE;
}
// Reading something from an existing model doesn't require many flags,
// so do it now and exit.
if (FLAGS_stop_training || FLAGS_debug_network) {
if (!trainer.TryLoadingCheckpoint(FLAGS_continue_from.c_str(), nullptr)) {
tprintf("Failed to read continue from: %s\n", FLAGS_continue_from.c_str());
return EXIT_FAILURE;
}
if (FLAGS_debug_network) {
trainer.DebugNetwork();
} else {
if (FLAGS_convert_to_int) {
trainer.ConvertToInt();
}
if (!trainer.SaveTraineddata(FLAGS_model_output.c_str())) {
tprintf("Failed to write recognition model : %s\n", FLAGS_model_output.c_str());
}
}
return EXIT_SUCCESS;
}
// Get the list of files to process.
if (FLAGS_train_listfile.empty()) {
tprintf("Must supply a list of training filenames! --train_listfile\n");
return EXIT_FAILURE;
}
std::vector<std::string> filenames;
if (!tesseract::LoadFileLinesToStrings(FLAGS_train_listfile.c_str(), &filenames)) {
tprintf("Failed to load list of training filenames from %s\n", FLAGS_train_listfile.c_str());
return EXIT_FAILURE;
}
// Checkpoints always take priority if they are available.
if (trainer.TryLoadingCheckpoint(checkpoint_file.c_str(), nullptr) ||
trainer.TryLoadingCheckpoint(checkpoint_bak.c_str(), nullptr)) {
tprintf("Successfully restored trainer from %s\n", checkpoint_file.c_str());
} else {
if (!FLAGS_continue_from.empty()) {
// Load a past model file to improve upon.
if (!trainer.TryLoadingCheckpoint(FLAGS_continue_from.c_str(),
FLAGS_append_index >= 0 ? FLAGS_continue_from.c_str()
: FLAGS_old_traineddata.c_str())) {
tprintf("Failed to continue from: %s\n", FLAGS_continue_from.c_str());
return EXIT_FAILURE;
}
tprintf("Continuing from %s\n", FLAGS_continue_from.c_str());
if (FLAGS_reset_learning_rate) {
trainer.SetLearningRate(FLAGS_learning_rate);
tprintf("Set learning rate to %f\n", static_cast<float>(FLAGS_learning_rate));
}
trainer.InitIterations();
}
if (FLAGS_continue_from.empty() || FLAGS_append_index >= 0) {
if (FLAGS_append_index >= 0) {
tprintf("Appending a new network to an old one!!");
if (FLAGS_continue_from.empty()) {
tprintf("Must set --continue_from for appending!\n");
return EXIT_FAILURE;
}
}
// We are initializing from scratch.
if (!trainer.InitNetwork(FLAGS_net_spec.c_str(), FLAGS_append_index, FLAGS_net_mode,
FLAGS_weight_range, FLAGS_learning_rate, FLAGS_momentum,
FLAGS_adam_beta)) {
tprintf("Failed to create network from spec: %s\n", FLAGS_net_spec.c_str());
return EXIT_FAILURE;
}
trainer.set_perfect_delay(FLAGS_perfect_sample_delay);
}
}
if (!trainer.LoadAllTrainingData(
filenames,
FLAGS_sequential_training ? tesseract::CS_SEQUENTIAL : tesseract::CS_ROUND_ROBIN,
FLAGS_randomly_rotate)) {
tprintf("Load of images failed!!\n");
return EXIT_FAILURE;
}
tesseract::LSTMTester tester(static_cast<int64_t>(FLAGS_max_image_MB) * 1048576);
tesseract::TestCallback tester_callback = nullptr;
if (!FLAGS_eval_listfile.empty()) {
using namespace std::placeholders; // for _1, _2, _3...
if (!tester.LoadAllEvalData(FLAGS_eval_listfile.c_str())) {
tprintf("Failed to load eval data from: %s\n", FLAGS_eval_listfile.c_str());
return EXIT_FAILURE;
}
tester_callback = std::bind(&tesseract::LSTMTester::RunEvalAsync, &tester, _1, _2, _3, _4);
}
int max_iterations = FLAGS_max_iterations;
if (max_iterations < 0) {
// A negative value is interpreted as epochs
max_iterations = filenames.size() * (-max_iterations);
} else if (max_iterations == 0) {
// "Infinite" iterations.
max_iterations = INT_MAX;
}
do {
// Train a few.
int iteration = trainer.training_iteration();
for (int target_iteration = iteration + kNumPagesPerBatch;
iteration < target_iteration && iteration < max_iterations;
iteration = trainer.training_iteration()) {
trainer.TrainOnLine(&trainer, false);
}
std::stringstream log_str;
log_str.imbue(std::locale::classic());
trainer.MaintainCheckpoints(tester_callback, log_str);
tprintf("%s\n", log_str.str().c_str());
} while (trainer.best_error_rate() > FLAGS_target_error_rate &&
(trainer.training_iteration() < max_iterations));
tprintf("Finished! Selected model with minimal training error rate (BCER) = %g\n",
trainer.best_error_rate());
return EXIT_SUCCESS;
} /* main */
|
2301_81045437/tesseract
|
src/training/lstmtraining.cpp
|
C++
|
apache-2.0
| 10,551
|
///////////////////////////////////////////////////////////////////////
// File: merge_unicharsets.cpp
// Description: Simple tool to merge two or more unicharsets.
// Author: Ray Smith
//
// (C) Copyright 2015, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "commontraining.h" // CheckSharedLibraryVersion
#include "unicharset.h"
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
if (argc > 1 && (!strcmp(argv[1], "-v") || !strcmp(argv[1], "--version"))) {
printf("%s\n", tesseract::TessBaseAPI::Version());
return EXIT_SUCCESS;
} else if (argc < 4) {
// Print usage
printf(
"Usage: %s -v | --version |\n"
" %s unicharset-in-1 ... unicharset-in-n unicharset-out\n",
argv[0], argv[0]);
return EXIT_FAILURE;
}
tesseract::UNICHARSET input_unicharset, result_unicharset;
for (int arg = 1; arg < argc - 1; ++arg) {
// Load the input unicharset
if (input_unicharset.load_from_file(argv[arg])) {
printf("Loaded unicharset of size %zu from file %s\n", input_unicharset.size(), argv[arg]);
result_unicharset.AppendOtherUnicharset(input_unicharset);
} else {
printf("Failed to load unicharset from file %s!!\n", argv[arg]);
return EXIT_FAILURE;
}
}
// Save the combined unicharset.
if (result_unicharset.save_to_file(argv[argc - 1])) {
printf("Wrote unicharset file %s.\n", argv[argc - 1]);
} else {
printf("Cannot save unicharset file %s.\n", argv[argc - 1]);
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
2301_81045437/tesseract
|
src/training/merge_unicharsets.cpp
|
C++
|
apache-2.0
| 2,159
|
/******************************************************************************
** Filename: MergeNF.c
** Purpose: Program for merging similar nano-feature protos
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#define _USE_MATH_DEFINES // for M_PI
#include <algorithm>
#include <cfloat> // for FLT_MAX
#include <cmath> // for M_PI
#include <cstdio>
#include <cstring>
#include "cluster.h"
#include "clusttool.h"
#include "featdefs.h"
#include "intproto.h"
#include "mergenf.h"
#include "ocrfeatures.h"
#include "oldlist.h"
#include "params.h"
#include "protos.h"
using namespace tesseract;
/*-------------------once in subfeat---------------------------------*/
static double_VAR(training_angle_match_scale, 1.0, "Angle Match Scale ...");
static double_VAR(training_similarity_midpoint, 0.0075, "Similarity Midpoint ...");
static double_VAR(training_similarity_curl, 2.0, "Similarity Curl ...");
/*-----------------------------once in
* fasttrain----------------------------------*/
static double_VAR(training_tangent_bbox_pad, 0.5, "Tangent bounding box pad ...");
static double_VAR(training_orthogonal_bbox_pad, 2.5, "Orthogonal bounding box pad ...");
static double_VAR(training_angle_pad, 45.0, "Angle pad ...");
/**
* Compare protos p1 and p2 and return an estimate of the
* worst evidence rating that will result for any part of p1
* that is compared to p2. In other words, if p1 were broken
* into pico-features and each pico-feature was matched to p2,
* what is the worst evidence rating that will be achieved for
* any pico-feature.
*
* @param p1, p2 protos to be compared
*
* Globals: none
*
* @return Worst possible result when matching p1 to p2.
*/
float CompareProtos(PROTO_STRUCT *p1, PROTO_STRUCT *p2) {
float WorstEvidence = WORST_EVIDENCE;
float Evidence;
float Angle, Length;
/* if p1 and p2 are not close in length, don't let them match */
Length = std::fabs(p1->Length - p2->Length);
if (Length > MAX_LENGTH_MISMATCH) {
return (0.0);
}
/* create a dummy pico-feature to be used for comparisons */
auto Feature = new FEATURE_STRUCT(&PicoFeatDesc);
Feature->Params[PicoFeatDir] = p1->Angle;
/* convert angle to radians */
Angle = p1->Angle * 2.0 * M_PI;
/* find distance from center of p1 to 1/2 picofeat from end */
Length = p1->Length / 2.0 - GetPicoFeatureLength() / 2.0;
if (Length < 0) {
Length = 0;
}
/* set the dummy pico-feature at one end of p1 and match it to p2 */
Feature->Params[PicoFeatX] = p1->X + std::cos(Angle) * Length;
Feature->Params[PicoFeatY] = p1->Y + std::sin(Angle) * Length;
if (DummyFastMatch(Feature, p2)) {
Evidence = SubfeatureEvidence(Feature, p2);
if (Evidence < WorstEvidence) {
WorstEvidence = Evidence;
}
} else {
delete Feature;
return 0.0;
}
/* set the dummy pico-feature at the other end of p1 and match it to p2 */
Feature->Params[PicoFeatX] = p1->X - std::cos(Angle) * Length;
Feature->Params[PicoFeatY] = p1->Y - std::sin(Angle) * Length;
if (DummyFastMatch(Feature, p2)) {
Evidence = SubfeatureEvidence(Feature, p2);
if (Evidence < WorstEvidence) {
WorstEvidence = Evidence;
}
} else {
delete Feature;
return 0.0;
}
delete Feature;
return (WorstEvidence);
} /* CompareProtos */
/**
* This routine computes a proto which is the weighted
* average of protos p1 and p2. The new proto is returned
* in MergedProto.
*
* @param p1, p2 protos to be merged
* @param w1, w2 weight of each proto
* @param MergedProto place to put resulting merged proto
*/
void ComputeMergedProto(PROTO_STRUCT *p1, PROTO_STRUCT *p2, float w1, float w2, PROTO_STRUCT *MergedProto) {
float TotalWeight;
TotalWeight = w1 + w2;
w1 /= TotalWeight;
w2 /= TotalWeight;
MergedProto->X = p1->X * w1 + p2->X * w2;
MergedProto->Y = p1->Y * w1 + p2->Y * w2;
MergedProto->Length = p1->Length * w1 + p2->Length * w2;
MergedProto->Angle = p1->Angle * w1 + p2->Angle * w2;
FillABC(MergedProto);
} /* ComputeMergedProto */
/**
* This routine searches through all of the prototypes in
* Class and returns the id of the proto which would provide
* the best approximation of Prototype. If no close
* approximation can be found, NO_PROTO is returned.
*
* @param Class class to search for matching old proto in
* @param NumMerged # of protos merged into each proto of Class
* @param Prototype new proto to find match for
*
* Globals: none
*
* @return Id of closest proto in Class or NO_PROTO.
*/
int FindClosestExistingProto(CLASS_TYPE Class, int NumMerged[], PROTOTYPE *Prototype) {
PROTO_STRUCT NewProto;
PROTO_STRUCT MergedProto;
int Pid;
PROTO_STRUCT *Proto;
int BestProto;
float BestMatch;
float Match, OldMatch, NewMatch;
MakeNewFromOld(&NewProto, Prototype);
BestProto = NO_PROTO;
BestMatch = WORST_MATCH_ALLOWED;
for (Pid = 0; Pid < Class->NumProtos; Pid++) {
Proto = ProtoIn(Class, Pid);
ComputeMergedProto(Proto, &NewProto, static_cast<float>(NumMerged[Pid]), 1.0, &MergedProto);
OldMatch = CompareProtos(Proto, &MergedProto);
NewMatch = CompareProtos(&NewProto, &MergedProto);
Match = std::min(OldMatch, NewMatch);
if (Match > BestMatch) {
BestProto = Pid;
BestMatch = Match;
}
}
return BestProto;
} /* FindClosestExistingProto */
/**
* This fills in the fields of the New proto based on the
* fields of the Old proto.
*
* @param New new proto to be filled in
* @param Old old proto to be converted
*
* Globals: none
*/
void MakeNewFromOld(PROTO_STRUCT *New, PROTOTYPE *Old) {
New->X = CenterX(Old->Mean);
New->Y = CenterY(Old->Mean);
New->Length = LengthOf(Old->Mean);
New->Angle = OrientationOf(Old->Mean);
FillABC(New);
} /* MakeNewFromOld */
/*-------------------once in subfeat---------------------------------*/
/**
* @name SubfeatureEvidence
*
* Compare a feature to a prototype. Print the result.
*/
float SubfeatureEvidence(FEATURE Feature, PROTO_STRUCT *Proto) {
float Distance;
float Dangle;
Dangle = Proto->Angle - Feature->Params[PicoFeatDir];
if (Dangle < -0.5) {
Dangle += 1.0;
}
if (Dangle > 0.5) {
Dangle -= 1.0;
}
Dangle *= training_angle_match_scale;
Distance =
Proto->A * Feature->Params[PicoFeatX] + Proto->B * Feature->Params[PicoFeatY] + Proto->C;
return (EvidenceOf(Distance * Distance + Dangle * Dangle));
}
/**
* @name EvidenceOf
*
* Return the new type of evidence number corresponding to this
* distance value. This number is no longer based on the chi squared
* approximation. The equation that represents the transform is:
* 1 / (1 + (sim / midpoint) ^ curl)
*/
double EvidenceOf(double Similarity) {
Similarity /= training_similarity_midpoint;
if (training_similarity_curl == 3) {
Similarity = Similarity * Similarity * Similarity;
} else if (training_similarity_curl == 2) {
Similarity = Similarity * Similarity;
} else {
Similarity = pow(Similarity, training_similarity_curl);
}
return (1.0 / (1.0 + Similarity));
}
/**
* This routine returns true if Feature would be matched
* by a fast match table built from Proto.
*
* @param Feature feature to be "fast matched" to proto
* @param Proto proto being "fast matched" against
*
* Globals:
* - training_tangent_bbox_pad bounding box pad tangent to proto
* - training_orthogonal_bbox_pad bounding box pad orthogonal to proto
*
* @return true if feature could match Proto.
*/
bool DummyFastMatch(FEATURE Feature, PROTO_STRUCT *Proto) {
FRECT BoundingBox;
float MaxAngleError;
float AngleError;
MaxAngleError = training_angle_pad / 360.0;
AngleError = std::fabs(Proto->Angle - Feature->Params[PicoFeatDir]);
if (AngleError > 0.5) {
AngleError = 1.0 - AngleError;
}
if (AngleError > MaxAngleError) {
return false;
}
ComputePaddedBoundingBox(Proto, training_tangent_bbox_pad * GetPicoFeatureLength(),
training_orthogonal_bbox_pad * GetPicoFeatureLength(), &BoundingBox);
return PointInside(&BoundingBox, Feature->Params[PicoFeatX], Feature->Params[PicoFeatY]);
} /* DummyFastMatch */
/**
* This routine computes a bounding box that encloses the
* specified proto along with some padding. The
* amount of padding is specified as separate distances
* in the tangential and orthogonal directions.
*
* @param Proto proto to compute bounding box for
* @param TangentPad amount of pad to add in direction of segment
* @param OrthogonalPad amount of pad to add orthogonal to segment
* @param[out] BoundingBox place to put results
*/
void ComputePaddedBoundingBox(PROTO_STRUCT *Proto, float TangentPad, float OrthogonalPad,
FRECT *BoundingBox) {
float Length = Proto->Length / 2.0 + TangentPad;
float Angle = Proto->Angle * 2.0 * M_PI;
float CosOfAngle = fabs(std::cos(Angle));
float SinOfAngle = fabs(std::sin(Angle));
float Pad = std::max(CosOfAngle * Length, SinOfAngle * OrthogonalPad);
BoundingBox->MinX = Proto->X - Pad;
BoundingBox->MaxX = Proto->X + Pad;
Pad = std::max(SinOfAngle * Length, CosOfAngle * OrthogonalPad);
BoundingBox->MinY = Proto->Y - Pad;
BoundingBox->MaxY = Proto->Y + Pad;
} /* ComputePaddedBoundingBox */
/**
* Return true if point (X,Y) is inside of Rectangle.
*
* Globals: none
*
* @return true if point (X,Y) is inside of Rectangle.
*/
bool PointInside(FRECT *Rectangle, float X, float Y) {
return (X >= Rectangle->MinX) && (X <= Rectangle->MaxX) && (Y >= Rectangle->MinY) &&
(Y <= Rectangle->MaxY);
} /* PointInside */
|
2301_81045437/tesseract
|
src/training/mergenf.cpp
|
C++
|
apache-2.0
| 10,260
|
/******************************************************************************
** Filename: MergeNF.c
** Purpose: Program for merging similar nano-feature protos
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#ifndef TESSERACT_TRAINING_MERGENF_H_
#define TESSERACT_TRAINING_MERGENF_H_
/**----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------**/
#include "cluster.h"
#include "ocrfeatures.h"
#include "picofeat.h"
#include "protos.h"
#define WORST_MATCH_ALLOWED (0.9)
#define WORST_EVIDENCE (1.0)
#define MAX_LENGTH_MISMATCH (2.0 * GetPicoFeatureLength())
#define PROTO_SUFFIX ".mf.p"
#define CONFIG_SUFFIX ".cl"
#define NO_PROTO (-1)
#define XPOSITION 0
#define YPOSITION 1
#define MFLENGTH 2
#define ORIENTATION 3
struct FRECT {
float MinX, MaxX, MinY, MaxY;
};
/**----------------------------------------------------------------------------
Public Macros
----------------------------------------------------------------------------**/
#define CenterX(M) ((M)[XPOSITION])
#define CenterY(M) ((M)[YPOSITION])
#define LengthOf(M) ((M)[MFLENGTH])
#define OrientationOf(M) ((M)[ORIENTATION])
/**----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------**/
float CompareProtos(tesseract::PROTO_STRUCT *p1, tesseract::PROTO_STRUCT *p2);
void ComputeMergedProto(tesseract::PROTO_STRUCT *p1, tesseract::PROTO_STRUCT *p2, float w1, float w2,
tesseract::PROTO_STRUCT *MergedProto);
int FindClosestExistingProto(tesseract::CLASS_TYPE Class, int NumMerged[],
tesseract::PROTOTYPE *Prototype);
void MakeNewFromOld(tesseract::PROTO_STRUCT *New, tesseract::PROTOTYPE *Old);
float SubfeatureEvidence(tesseract::FEATURE Feature, tesseract::PROTO_STRUCT *Proto);
double EvidenceOf(double Similarity);
bool DummyFastMatch(tesseract::FEATURE Feature, tesseract::PROTO_STRUCT *Proto);
void ComputePaddedBoundingBox(tesseract::PROTO_STRUCT *Proto, float TangentPad, float OrthogonalPad,
FRECT *BoundingBox);
bool PointInside(FRECT *Rectangle, float X, float Y);
#endif // TESSERACT_TRAINING_MERGENF_H_
|
2301_81045437/tesseract
|
src/training/mergenf.h
|
C
|
apache-2.0
| 3,028
|
/******************************************************************************
** Filename: mftraining.c
** Purpose: Separates training pages into files for each character.
** Strips from files only the features and there parameters of
** the feature type mf.
** Author: Dan Johnson
** Revisment: Christy Russon
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#define _USE_MATH_DEFINES // for M_PI
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include <cmath> // for M_PI
#include <cstdio>
#include <cstring>
#include "classify.h"
#include "cluster.h"
#include "clusttool.h"
#include "commontraining.h"
#include "featdefs.h"
#include "fontinfo.h"
#include "indexmapbidi.h"
#include "intproto.h"
#include "mastertrainer.h"
#include "mergenf.h"
#include "mf.h"
#include "ocrfeatures.h"
#include "oldlist.h"
#include "protos.h"
#include "shapetable.h"
#include "tprintf.h"
#include "unicity_table.h"
using namespace tesseract;
/*----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
static void DisplayProtoList(const char *ch, LIST protolist) {
auto window = std::make_unique<ScrollView>("Char samples", 50, 200, 520, 520, 260, 260, true);
LIST proto = protolist;
iterate(proto) {
auto *prototype = reinterpret_cast<PROTOTYPE *>(proto->first_node());
if (prototype->Significant) {
window->Pen(ScrollView::GREEN);
} else if (prototype->NumSamples == 0) {
window->Pen(ScrollView::BLUE);
} else if (prototype->Merged) {
window->Pen(ScrollView::MAGENTA);
} else {
window->Pen(ScrollView::RED);
}
float x = CenterX(prototype->Mean);
float y = CenterY(prototype->Mean);
double angle = OrientationOf(prototype->Mean) * 2 * M_PI;
auto dx = static_cast<float>(LengthOf(prototype->Mean) * cos(angle) / 2);
auto dy = static_cast<float>(LengthOf(prototype->Mean) * sin(angle) / 2);
window->SetCursor((x - dx) * 256, (y - dy) * 256);
window->DrawTo((x + dx) * 256, (y + dy) * 256);
auto prototypeNumSamples = prototype->NumSamples;
if (prototype->Significant) {
tprintf("Green proto at (%g,%g)+(%g,%g) %d samples\n", x, y, dx, dy, prototypeNumSamples);
} else if (prototype->NumSamples > 0 && !prototype->Merged) {
tprintf("Red proto at (%g,%g)+(%g,%g) %d samples\n", x, y, dx, dy, prototypeNumSamples);
}
}
window->Update();
}
#endif // !GRAPHICS_DISABLED
// Helper to run clustering on a single config.
// Mostly copied from the old mftraining, but with renamed variables.
static LIST ClusterOneConfig(int shape_id, const char *class_label, LIST mf_classes,
const ShapeTable &shape_table, MasterTrainer *trainer) {
int num_samples;
CLUSTERER *clusterer =
trainer->SetupForClustering(shape_table, feature_defs, shape_id, &num_samples);
Config.MagicSamples = num_samples;
LIST proto_list = ClusterSamples(clusterer, &Config);
CleanUpUnusedData(proto_list);
// Merge protos where reasonable to make more of them significant by
// representing almost all samples of the class/font.
MergeInsignificantProtos(proto_list, class_label, clusterer, &Config);
#ifndef GRAPHICS_DISABLED
if (strcmp(FLAGS_test_ch.c_str(), class_label) == 0) {
DisplayProtoList(FLAGS_test_ch.c_str(), proto_list);
}
#endif // !GRAPHICS_DISABLED
// Delete the protos that will not be used in the inttemp output file.
proto_list = RemoveInsignificantProtos(proto_list, true, false, clusterer->SampleSize);
FreeClusterer(clusterer);
MERGE_CLASS merge_class = FindClass(mf_classes, class_label);
if (merge_class == nullptr) {
merge_class = new MERGE_CLASS_NODE(class_label);
mf_classes = push(mf_classes, merge_class);
}
int config_id = AddConfigToClass(merge_class->Class);
merge_class->Class->font_set.push_back(shape_id);
LIST proto_it = proto_list;
iterate(proto_it) {
auto *prototype = reinterpret_cast<PROTOTYPE *>(proto_it->first_node());
// See if proto can be approximated by existing proto.
int p_id = FindClosestExistingProto(merge_class->Class, merge_class->NumMerged, prototype);
if (p_id == NO_PROTO) {
// Need to make a new proto, as it doesn't match anything.
p_id = AddProtoToClass(merge_class->Class);
MakeNewFromOld(ProtoIn(merge_class->Class, p_id), prototype);
merge_class->NumMerged[p_id] = 1;
} else {
PROTO_STRUCT dummy_proto;
MakeNewFromOld(&dummy_proto, prototype);
// Merge with the similar proto.
ComputeMergedProto(ProtoIn(merge_class->Class, p_id), &dummy_proto,
static_cast<float>(merge_class->NumMerged[p_id]), 1.0,
ProtoIn(merge_class->Class, p_id));
merge_class->NumMerged[p_id]++;
}
AddProtoToConfig(p_id, merge_class->Class->Configurations[config_id]);
}
FreeProtoList(&proto_list);
return mf_classes;
}
// Helper to setup the config map.
// Setup an index mapping from the shapes in the shape table to the classes
// that will be trained. In keeping with the original design, each shape
// with the same list of unichars becomes a different class and the configs
// represent the different combinations of fonts.
static void SetupConfigMap(ShapeTable *shape_table, IndexMapBiDi *config_map) {
int num_configs = shape_table->NumShapes();
config_map->Init(num_configs, true);
config_map->Setup();
for (int c1 = 0; c1 < num_configs; ++c1) {
// Only process ids that are not already merged.
if (config_map->SparseToCompact(c1) == c1) {
Shape *shape1 = shape_table->MutableShape(c1);
// Find all the subsequent shapes that are equal.
for (int c2 = c1 + 1; c2 < num_configs; ++c2) {
if (shape_table->MutableShape(c2)->IsEqualUnichars(shape1)) {
config_map->Merge(c1, c2);
}
}
}
}
config_map->CompleteMerges();
}
/**
* This program reads in a text file consisting of feature
* samples from a training page in the following format:
* @verbatim
FontName UTF8-char-str xmin ymin xmax ymax page-number
NumberOfFeatureTypes(N)
FeatureTypeName1 NumberOfFeatures(M)
Feature1
...
FeatureM
FeatureTypeName2 NumberOfFeatures(M)
Feature1
...
FeatureM
...
FeatureTypeNameN NumberOfFeatures(M)
Feature1
...
FeatureM
FontName CharName ...
@endverbatim
* The result of this program is a binary inttemp file used by
* the OCR engine.
* @param argc number of command line arguments
* @param argv array of command line arguments
* @return 0 if no error occurred
*/
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
ParseArguments(&argc, &argv);
ShapeTable *shape_table = nullptr;
std::string file_prefix;
// Load the training data.
auto trainer = tesseract::LoadTrainingData(argv + 1, false, &shape_table, file_prefix);
if (trainer == nullptr) {
return EXIT_FAILURE; // Failed.
}
// Setup an index mapping from the shapes in the shape table to the classes
// that will be trained. In keeping with the original design, each shape
// with the same list of unichars becomes a different class and the configs
// represent the different combinations of fonts.
IndexMapBiDi config_map;
SetupConfigMap(shape_table, &config_map);
WriteShapeTable(file_prefix, *shape_table);
// If the shape_table is flat, then either we didn't run shape clustering, or
// it did nothing, so we just output the trainer's unicharset.
// Otherwise shape_set will hold a fake unicharset with an entry for each
// shape in the shape table, and we will output that instead.
UNICHARSET shape_set;
const UNICHARSET *unicharset = &trainer->unicharset();
// If we ran shapeclustering (and it worked) then at least one shape will
// have multiple unichars, so we have to build a fake unicharset.
if (shape_table->AnyMultipleUnichars()) {
unicharset = &shape_set;
// Now build a fake unicharset for the compact shape space to keep the
// output modules happy that we are doing things correctly.
int num_shapes = config_map.CompactSize();
for (int s = 0; s < num_shapes; ++s) {
char shape_label[14];
snprintf(shape_label, sizeof(shape_label), "sh%04d", s);
shape_set.unichar_insert(shape_label);
}
}
// Now train each config separately.
int num_configs = shape_table->NumShapes();
LIST mf_classes = NIL_LIST;
for (int s = 0; s < num_configs; ++s) {
int unichar_id, font_id;
if (unicharset == &shape_set) {
// Using fake unichar_ids from the config_map/shape_set.
unichar_id = config_map.SparseToCompact(s);
} else {
// Get the real unichar_id from the shape table/unicharset.
shape_table->GetFirstUnicharAndFont(s, &unichar_id, &font_id);
}
const char *class_label = unicharset->id_to_unichar(unichar_id);
mf_classes = ClusterOneConfig(s, class_label, mf_classes, *shape_table, trainer.get());
}
std::string inttemp_file = file_prefix;
inttemp_file += "inttemp";
std::string pffmtable_file = std::move(file_prefix);
pffmtable_file += "pffmtable";
CLASS_STRUCT *float_classes = SetUpForFloat2Int(*unicharset, mf_classes);
// Now write the inttemp and pffmtable.
trainer->WriteInttempAndPFFMTable(trainer->unicharset(), *unicharset, *shape_table, float_classes,
inttemp_file.c_str(), pffmtable_file.c_str());
for (size_t c = 0; c < unicharset->size(); ++c) {
FreeClassFields(&float_classes[c]);
}
delete[] float_classes;
FreeLabeledClassList(mf_classes);
delete shape_table;
printf("Done!\n");
if (!FLAGS_test_ch.empty()) {
// If we are displaying debug window(s), wait for the user to look at them.
printf("Hit return to exit...\n");
while (getchar() != '\n') {
;
}
}
return EXIT_SUCCESS;
} /* main */
|
2301_81045437/tesseract
|
src/training/mftraining.cpp
|
C++
|
apache-2.0
| 10,950
|
/**********************************************************************
* File: boxchar.cpp
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "boxchar.h"
#include "fileio.h"
#include "normstrngs.h"
#include "tprintf.h"
#include "unicharset.h"
#include "unicode/uchar.h" // from libicu
#include <algorithm>
#include <cstddef>
#include <vector>
// Absolute Ratio of dx:dy or dy:dx to be a newline.
const int kMinNewlineRatio = 5;
namespace tesseract {
BoxChar::BoxChar(const char *utf8_str, int len)
: ch_(utf8_str, len), box_(nullptr), page_(0), rtl_index_(-1) {}
BoxChar::~BoxChar() {
boxDestroy(&box_);
}
void BoxChar::AddBox(int x, int y, int width, int height) {
box_ = boxCreate(x, y, width, height);
}
// Increments *num_rtl and *num_ltr according to the directionality of
// characters in the box.
void BoxChar::GetDirection(int *num_rtl, int *num_ltr) const {
// Convert the unichar to UTF32 representation
std::vector<char32> uni_vector = UNICHAR::UTF8ToUTF32(ch_.c_str());
if (uni_vector.empty()) {
tprintf("Illegal utf8 in boxchar string:%s = ", ch_.c_str());
for (char c : ch_) {
tprintf(" 0x%x", c);
}
tprintf("\n");
return;
}
for (char32 ch : uni_vector) {
UCharDirection dir = u_charDirection(ch);
if (dir == U_RIGHT_TO_LEFT || dir == U_RIGHT_TO_LEFT_ARABIC || dir == U_RIGHT_TO_LEFT_ISOLATE) {
++*num_rtl;
} else if ((dir == U_ARABIC_NUMBER) ||
(dir != U_DIR_NON_SPACING_MARK && dir != U_BOUNDARY_NEUTRAL)) {
++*num_ltr;
}
}
}
// Reverses the order of unicodes within the box. If Pango generates a
// ligature, these will get reversed on output, so reverse now.
void BoxChar::ReverseUnicodesInBox() {
std::vector<char32> unicodes = UNICHAR::UTF8ToUTF32(ch_.c_str());
std::reverse(unicodes.begin(), unicodes.end());
ch_ = UNICHAR::UTF32ToUTF8(unicodes);
}
/* static */
void BoxChar::TranslateBoxes(int xshift, int yshift, std::vector<BoxChar *> *boxes) {
for (auto &boxe : *boxes) {
Box *box = boxe->box_;
if (box != nullptr) {
box->x += xshift;
box->y += yshift;
}
}
}
// Prepares for writing the boxes to a file by inserting newlines, spaces,
// and re-ordering so the boxes are strictly left-to-right.
/* static */
void BoxChar::PrepareToWrite(std::vector<BoxChar *> *boxes) {
bool rtl_rules = ContainsMostlyRTL(*boxes);
bool vertical_rules = MostlyVertical(*boxes);
InsertNewlines(rtl_rules, vertical_rules, boxes);
InsertSpaces(rtl_rules, vertical_rules, boxes);
for (size_t i = 0; i < boxes->size(); ++i) {
if ((*boxes)[i]->box_ == nullptr) {
tprintf("Null box at index %zu\n", i);
}
}
if (rtl_rules) {
ReorderRTLText(boxes);
}
}
// Inserts newline (tab) characters into the vector at newline positions.
/* static */
void BoxChar::InsertNewlines(bool rtl_rules, bool vertical_rules, std::vector<BoxChar *> *boxes) {
size_t prev_i = SIZE_MAX;
int max_shift = 0;
for (size_t i = 0; i < boxes->size(); ++i) {
Box *box = (*boxes)[i]->box_;
if (box == nullptr) {
if (prev_i == SIZE_MAX || prev_i + 1 < i || i + 1 == boxes->size()) {
// Erase null boxes at the start of a line and after another null box.
do {
delete (*boxes)[i];
boxes->erase(boxes->begin() + i);
if (i == 0) {
break;
}
} while (i-- == boxes->size() && (*boxes)[i]->box_ == nullptr);
}
continue;
}
if (prev_i != SIZE_MAX) {
Box *prev_box = (*boxes)[prev_i]->box_;
int shift = box->x - prev_box->x;
if (vertical_rules) {
shift = box->y - prev_box->y;
} else if (rtl_rules) {
shift = -shift;
}
if (-shift > max_shift) {
// This is a newline. Since nothing cares about the size of the box,
// except the out-of-bounds checker, minimize the chance of creating
// a box outside the image by making the width and height 1.
int width = 1;
int height = 1;
int x = prev_box->x + prev_box->w;
int y = prev_box->y;
if (vertical_rules) {
x = prev_box->x;
y = prev_box->y + prev_box->h;
} else if (rtl_rules) {
x = prev_box->x - width;
if (x < 0) {
tprintf("prev x = %d, width=%d\n", prev_box->x, width);
x = 0;
}
}
if (prev_i + 1 == i) {
// New character needed.
auto *new_box = new BoxChar("\t", 1);
new_box->AddBox(x, y, width, height);
new_box->page_ = (*boxes)[i]->page_;
boxes->insert(boxes->begin() + i, new_box);
++i;
} else {
(*boxes)[i - 1]->AddBox(x, y, width, height);
(*boxes)[i - 1]->ch_ = "\t";
}
max_shift = 0;
} else if (shift > max_shift) {
max_shift = shift;
}
}
prev_i = i;
}
}
// Converts nullptr boxes to space characters, with appropriate bounding boxes.
/* static */
void BoxChar::InsertSpaces(bool rtl_rules, bool vertical_rules, std::vector<BoxChar *> *boxes) {
// After InsertNewlines, any remaining null boxes are not newlines, and are
// singletons, so add a box to each remaining null box.
for (size_t i = 1; i + 1 < boxes->size(); ++i) {
Box *box = (*boxes)[i]->box_;
if (box == nullptr) {
Box *prev = (*boxes)[i - 1]->box_;
Box *next = (*boxes)[i + 1]->box_;
ASSERT_HOST(prev != nullptr && next != nullptr);
int top = std::min(prev->y, next->y);
int bottom = std::max(prev->y + prev->h, next->y + next->h);
int left = prev->x + prev->w;
int right = next->x;
if (vertical_rules) {
top = prev->y + prev->h;
bottom = next->y;
left = std::min(prev->x, next->x);
right = std::max(prev->x + prev->w, next->x + next->w);
} else if (rtl_rules) {
// With RTL we have to account for BiDi.
// Right becomes the min left of all prior boxes back to the first
// space or newline.
right = prev->x;
left = next->x + next->w;
for (int j = i - 2; j >= 0 && (*boxes)[j]->ch_ != " " && (*boxes)[j]->ch_ != "\t"; --j) {
prev = (*boxes)[j]->box_;
ASSERT_HOST(prev != nullptr);
if (prev->x < right) {
right = prev->x;
}
}
// Left becomes the max right of all next boxes forward to the first
// space or newline.
for (size_t j = i + 2;
j < boxes->size() && (*boxes)[j]->box_ != nullptr && (*boxes)[j]->ch_ != "\t"; ++j) {
next = (*boxes)[j]->box_;
if (next->x + next->w > left) {
left = next->x + next->w;
}
}
}
// Italic and stylized characters can produce negative spaces, which
// Leptonica doesn't like, so clip to a positive size.
if (right <= left) {
right = left + 1;
}
if (bottom <= top) {
bottom = top + 1;
}
(*boxes)[i]->AddBox(left, top, right - left, bottom - top);
(*boxes)[i]->ch_ = " ";
}
}
}
// Reorders text in a right-to-left script in left-to-right order.
/* static */
void BoxChar::ReorderRTLText(std::vector<BoxChar *> *boxes) {
// Ideally we need the inverse of the algorithm used by ResultIterator.
// For now, let's try a sort that reverses original positions for RTL
// characters, otherwise by x-position. This should be much closer to
// correct than just sorting by x-position.
size_t num_boxes = boxes->size();
for (size_t i = 0; i < num_boxes; ++i) {
int num_rtl = 0, num_ltr = 0;
(*boxes)[i]->GetDirection(&num_rtl, &num_ltr);
if (num_rtl > num_ltr) {
(*boxes)[i]->set_rtl_index(i);
(*boxes)[i]->ReverseUnicodesInBox();
}
}
BoxCharPtrSort sorter;
size_t end = 0;
for (size_t start = 0; start < boxes->size(); start = end + 1) {
end = start + 1;
while (end < boxes->size() && (*boxes)[end]->ch_ != "\t") {
++end;
}
std::sort(boxes->begin() + start, boxes->begin() + end, sorter);
}
}
// Returns true if the vector contains mostly RTL characters.
/* static */
bool BoxChar::ContainsMostlyRTL(const std::vector<BoxChar *> &boxes) {
int num_rtl = 0, num_ltr = 0;
for (auto boxe : boxes) {
boxe->GetDirection(&num_rtl, &num_ltr);
}
return num_rtl > num_ltr;
}
// Returns true if the text is mostly laid out vertically.
/* static */
bool BoxChar::MostlyVertical(const std::vector<BoxChar *> &boxes) {
int64_t total_dx = 0, total_dy = 0;
for (size_t i = 1; i < boxes.size(); ++i) {
if (boxes[i - 1]->box_ != nullptr && boxes[i]->box_ != nullptr &&
boxes[i - 1]->page_ == boxes[i]->page_) {
int dx = boxes[i]->box_->x - boxes[i - 1]->box_->x;
int dy = boxes[i]->box_->y - boxes[i - 1]->box_->y;
if (abs(dx) > abs(dy) * kMinNewlineRatio || abs(dy) > abs(dx) * kMinNewlineRatio) {
total_dx += static_cast<int64_t>(dx) * dx;
total_dy += static_cast<int64_t>(dy) * dy;
}
}
}
return total_dy > total_dx;
}
// Returns the total length of all the strings in the boxes.
/* static */
int BoxChar::TotalByteLength(const std::vector<BoxChar *> &boxes) {
int total_length = 0;
for (auto boxe : boxes) {
total_length += boxe->ch_.size();
}
return total_length;
}
// Rotate the boxes in [start_box, end_box) by the given rotation.
// The rotation is in radians clockwise about the given center.
/* static */
void BoxChar::RotateBoxes(float rotation, int xcenter, int ycenter, int start_box, int end_box,
std::vector<BoxChar *> *boxes) {
Boxa *orig = boxaCreate(0);
for (int i = start_box; i < end_box; ++i) {
Box *box = (*boxes)[i]->box_;
if (box) {
boxaAddBox(orig, box, L_CLONE);
}
}
Boxa *rotated = boxaRotate(orig, xcenter, ycenter, rotation);
boxaDestroy(&orig);
for (int i = start_box, box_ind = 0; i < end_box; ++i) {
if ((*boxes)[i]->box_) {
boxDestroy(&((*boxes)[i]->box_));
(*boxes)[i]->box_ = boxaGetBox(rotated, box_ind++, L_CLONE);
}
}
boxaDestroy(&rotated);
}
const int kMaxLineLength = 1024;
/* static */
void BoxChar::WriteTesseractBoxFile(const std::string &filename, int height,
const std::vector<BoxChar *> &boxes) {
std::string output = GetTesseractBoxStr(height, boxes);
File::WriteStringToFileOrDie(output, filename);
}
/* static */
std::string BoxChar::GetTesseractBoxStr(int height, const std::vector<BoxChar *> &boxes) {
std::string output;
char buffer[kMaxLineLength];
for (auto boxe : boxes) {
const Box *box = boxe->box_;
if (box == nullptr) {
tprintf("Error: Call PrepareToWrite before WriteTesseractBoxFile!!\n");
return "";
}
int nbytes = snprintf(buffer, kMaxLineLength, "%s %d %d %d %d %d\n", boxe->ch_.c_str(), box->x,
height - box->y - box->h, box->x + box->w, height - box->y, boxe->page_);
output.append(buffer, nbytes);
}
return output;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/pango/boxchar.cpp
|
C++
|
apache-2.0
| 11,995
|
/**********************************************************************
* File: boxchar.h
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_BOXCHAR_H_
#define TESSERACT_TRAINING_BOXCHAR_H_
#include <string>
#include <vector>
#include <allheaders.h> // for Leptonica API
#if (LIBLEPT_MAJOR_VERSION == 1 && LIBLEPT_MINOR_VERSION >= 83) || LIBLEPT_MAJOR_VERSION > 1
#include <pix_internal.h> // for fast access to Box geometry
#endif
#include <tesseract/export.h>
namespace tesseract {
class BoxChar {
public:
BoxChar(const char *utf8_str, int len);
~BoxChar();
// Accessors.
const std::string &ch() const {
return ch_;
}
const Box *box() const {
return box_;
}
const int &page() const {
return page_;
}
void set_rtl_index(int index) {
rtl_index_ = index;
}
const int &rtl_index() const {
return rtl_index_;
}
// Set the box_ member.
void AddBox(int x, int y, int width, int height);
void set_page(int page) {
page_ = page;
}
std::string *mutable_ch() {
return &ch_;
}
Box *mutable_box() {
return box_;
}
// Sort function for sorting by left edge of box. Note that this will not
// work properly until after InsertNewlines and InsertSpaces.
bool operator<(const BoxChar &other) const {
if (box_ == nullptr) {
return true;
}
if (other.box_ == nullptr) {
return false;
}
return box_->x < other.box_->x;
}
// Increments *num_rtl and *num_ltr according to the directionality of
// characters in the box.
void GetDirection(int *num_rtl, int *num_ltr) const;
// Reverses the order of unicodes within the box. If Pango generates a
// ligature, these will get reversed on output, so reverse now.
void ReverseUnicodesInBox();
static void TranslateBoxes(int xshift, int yshift, std::vector<BoxChar *> *boxes);
// Prepares for writing the boxes to a file by inserting newlines, spaces,
// and re-ordering so the boxes are strictly left-to-right.
static void PrepareToWrite(std::vector<BoxChar *> *boxes);
// Inserts newline (tab) characters into the vector at newline positions.
static void InsertNewlines(bool rtl_rules, bool vertical_rules, std::vector<BoxChar *> *boxes);
// Converts nullptr boxes to space characters, with appropriate bounding
// boxes.
static void InsertSpaces(bool rtl_rules, bool vertical_rules, std::vector<BoxChar *> *boxes);
// Reorders text in a right-to-left script in left-to-right order.
static void ReorderRTLText(std::vector<BoxChar *> *boxes);
// Returns true if the vector contains mostly RTL characters.
static bool ContainsMostlyRTL(const std::vector<BoxChar *> &boxes);
// Returns true if the text is mostly laid out vertically.
static bool MostlyVertical(const std::vector<BoxChar *> &boxes);
// Returns the total length of all the strings in the boxes.
static int TotalByteLength(const std::vector<BoxChar *> &boxes);
// Rotate the vector of boxes between start and end by the given rotation.
// The rotation is in radians clockwise about the given center.
static void RotateBoxes(float rotation, int xcenter, int ycenter, int start_box, int end_box,
std::vector<BoxChar *> *boxes);
// Create a tesseract box file from the vector of boxes. The image height
// is needed to convert to tesseract coordinates.
static void WriteTesseractBoxFile(const std::string &name, int height,
const std::vector<BoxChar *> &boxes);
// Gets the tesseract box file as a string from the vector of boxes.
// The image height is needed to convert to tesseract coordinates.
static std::string GetTesseractBoxStr(int height, const std::vector<BoxChar *> &boxes);
private:
std::string ch_;
Box *box_;
int page_;
// If the box is an RTL character, contains the original position in the
// array of boxes (before reversal), otherwise -1.
int rtl_index_;
};
// Sort predicate to sort a vector of BoxChar*.
struct BoxCharPtrSort {
bool operator()(const BoxChar *box1, const BoxChar *box2) const {
if (box1->rtl_index() >= 0 && box2->rtl_index() >= 0) {
return box2->rtl_index() < box1->rtl_index();
}
return *box1 < *box2;
}
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_BOXCHAR_H_
|
2301_81045437/tesseract
|
src/training/pango/boxchar.h
|
C++
|
apache-2.0
| 5,250
|
#pragma once
#ifdef CMAKE_BUILD
# include <pango_training_export.h>
#endif
|
2301_81045437/tesseract
|
src/training/pango/export.h
|
C
|
apache-2.0
| 77
|
/**********************************************************************
* File: ligature_table.cpp
* Description: Class for adding and removing optional latin ligatures,
* conditional on codepoint support by a specified font
* (if specified).
* Author: Ranjith Unnikrishnan
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "ligature_table.h"
#include <tesseract/unichar.h>
#include "pango_font_info.h"
#include "tlog.h"
#include "unicharset.h"
#include "unicode/errorcode.h" // from libicu
#include "unicode/normlzr.h" // from libicu
#include "unicode/unistr.h" // from libicu
#include "unicode/utypes.h" // from libicu
#include <utility>
namespace tesseract {
static std::string EncodeAsUTF8(const char32 ch32) {
UNICHAR uni_ch(ch32);
return std::string(uni_ch.utf8(), uni_ch.utf8_len());
}
// Range of optional latin ligature characters in Unicode to build ligatures
// from. Note that this range does not contain the custom ligatures that we
// encode in the private use area.
const int kMinLigature = 0xfb00;
const int kMaxLigature = 0xfb17; // Don't put the wide Hebrew letters in.
/* static */
std::unique_ptr<LigatureTable> LigatureTable::instance_;
/* static */
LigatureTable *LigatureTable::Get() {
if (instance_ == nullptr) {
instance_.reset(new LigatureTable());
instance_->Init();
}
return instance_.get();
}
LigatureTable::LigatureTable()
: min_lig_length_(0), max_lig_length_(0), min_norm_length_(0), max_norm_length_(0) {}
void LigatureTable::Init() {
if (norm_to_lig_table_.empty()) {
for (char32 lig = kMinLigature; lig <= kMaxLigature; ++lig) {
// For each char in the range, convert to utf8, nfc normalize, and if
// the strings are different put the both mappings in the hash_maps.
std::string lig8 = EncodeAsUTF8(lig);
icu::UnicodeString unicode_lig8(static_cast<UChar32>(lig));
icu::UnicodeString normed8_result;
icu::ErrorCode status;
icu::Normalizer::normalize(unicode_lig8, UNORM_NFC, 0, normed8_result, status);
std::string normed8;
normed8_result.toUTF8String(normed8);
int lig_length = lig8.length();
int norm_length = normed8.size();
if (normed8 != lig8 && lig_length > 1 && norm_length > 1) {
norm_to_lig_table_[normed8] = lig8;
lig_to_norm_table_[lig8] = std::move(normed8);
if (min_lig_length_ == 0 || lig_length < min_lig_length_) {
min_lig_length_ = lig_length;
}
if (lig_length > max_lig_length_) {
max_lig_length_ = lig_length;
}
if (min_norm_length_ == 0 || norm_length < min_norm_length_) {
min_norm_length_ = norm_length;
}
if (norm_length > max_norm_length_) {
max_norm_length_ = norm_length;
}
}
}
// Add custom extra ligatures.
for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != nullptr; ++i) {
norm_to_lig_table_[UNICHARSET::kCustomLigatures[i][0]] = UNICHARSET::kCustomLigatures[i][1];
int norm_length = strlen(UNICHARSET::kCustomLigatures[i][0]);
if (min_norm_length_ == 0 || norm_length < min_norm_length_) {
min_norm_length_ = norm_length;
}
if (norm_length > max_norm_length_) {
max_norm_length_ = norm_length;
}
lig_to_norm_table_[UNICHARSET::kCustomLigatures[i][1]] = UNICHARSET::kCustomLigatures[i][0];
}
}
}
std::string LigatureTable::RemoveLigatures(const std::string &str) const {
std::string result;
UNICHAR::const_iterator it_begin = UNICHAR::begin(str.c_str(), str.length());
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
char tmp[5];
int len;
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
len = it.get_utf8(tmp);
tmp[len] = '\0';
auto lig_it = lig_to_norm_table_.find(tmp);
if (lig_it != lig_to_norm_table_.end()) {
result += lig_it->second;
} else {
result += tmp;
}
}
return result;
}
std::string LigatureTable::RemoveCustomLigatures(const std::string &str) const {
std::string result;
UNICHAR::const_iterator it_begin = UNICHAR::begin(str.c_str(), str.length());
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
char tmp[5];
int len;
int norm_ind;
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
len = it.get_utf8(tmp);
tmp[len] = '\0';
norm_ind = -1;
for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != nullptr && norm_ind < 0; ++i) {
if (!strcmp(tmp, UNICHARSET::kCustomLigatures[i][1])) {
norm_ind = i;
}
}
if (norm_ind >= 0) {
result += UNICHARSET::kCustomLigatures[norm_ind][0];
} else {
result += tmp;
}
}
return result;
}
std::string LigatureTable::AddLigatures(const std::string &str, const PangoFontInfo *font) const {
std::string result;
int len = str.size();
int step = 0;
int i = 0;
for (i = 0; i < len - min_norm_length_ + 1; i += step) {
step = 0;
for (int liglen = max_norm_length_; liglen >= min_norm_length_; --liglen) {
if (i + liglen <= len) {
std::string lig_cand = str.substr(i, liglen);
auto it = norm_to_lig_table_.find(lig_cand);
if (it != norm_to_lig_table_.end()) {
tlog(3, "Considering %s -> %s\n", lig_cand.c_str(), it->second.c_str());
if (font) {
// Test for renderability.
if (!font->CanRenderString(it->second.data(), it->second.length())) {
continue; // Not renderable
}
}
// Found a match so convert it.
step = liglen;
result += it->second;
tlog(2, "Substituted %s -> %s\n", lig_cand.c_str(), it->second.c_str());
break;
}
}
}
if (step == 0) {
result += str[i];
step = 1;
}
}
result += str.substr(i, len - i);
return result;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/pango/ligature_table.cpp
|
C++
|
apache-2.0
| 6,586
|
/**********************************************************************
* File: ligature_table.h
* Description: Class for adding and removing optional latin ligatures,
* conditional on codepoint support by a specified font
* (if specified).
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TRAININGDATA_LIGATURE_TABLE_H_
#define TRAININGDATA_LIGATURE_TABLE_H_
#include "export.h"
#include <memory>
#include <string>
#include <unordered_map>
namespace tesseract {
class PangoFontInfo; // defined in pango_font_info.h
// Map to substitute strings for ligatures.
using LigHash = std::unordered_map<std::string, std::string>;
class TESS_PANGO_TRAINING_API LigatureTable {
public:
// Get a static instance of this class.
static LigatureTable *Get();
// Convert the utf8 string so that ligaturizable sequences, such as "fi" get
// replaced by the (utf8 code for) appropriate ligature characters. Only do so
// if the corresponding ligature character is renderable in the current font.
std::string AddLigatures(const std::string &str, const PangoFontInfo *font) const;
// Remove all ligatures.
std::string RemoveLigatures(const std::string &str) const;
// Remove only custom ligatures (eg. "ct") encoded in the private-use-area.
std::string RemoveCustomLigatures(const std::string &str) const;
const LigHash &norm_to_lig_table() const {
return norm_to_lig_table_;
}
const LigHash &lig_to_norm_table() const {
return lig_to_norm_table_;
}
protected:
LigatureTable();
// Initialize the hash tables mapping between ligature strings and the
// corresponding ligature characters.
void Init();
static std::unique_ptr<LigatureTable> instance_;
LigHash norm_to_lig_table_;
LigHash lig_to_norm_table_;
int min_lig_length_;
int max_lig_length_;
int min_norm_length_;
int max_norm_length_;
private:
LigatureTable(const LigatureTable &) = delete;
void operator=(const LigatureTable &) = delete;
};
} // namespace tesseract
#endif // OCR_TRAININGDATA_TYPESETTING_LIGATURE_TABLE_H_
|
2301_81045437/tesseract
|
src/training/pango/ligature_table.h
|
C++
|
apache-2.0
| 2,761
|
/**********************************************************************
* File: pango_font_info.cpp
* Description: Font-related objects and helper functions
* Author: Ranjith Unnikrishnan
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#if (defined __CYGWIN__)
// workaround for stdlib.h and putenv
# undef __STRICT_ANSI__
#endif
#include "commandlineflags.h"
#include "fileio.h"
#include "normstrngs.h"
#include "pango_font_info.h"
#include "tlog.h"
#include <tesseract/unichar.h>
#include "pango/pango.h"
#include "pango/pangocairo.h"
#include "pango/pangofc-font.h"
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#ifndef _MSC_VER
# include <sys/param.h>
#endif
#define DISABLE_HEAP_LEAK_CHECK
using namespace tesseract;
namespace tesseract {
// Default assumed output resolution. Required only for providing font metrics
// in pixels.
const int kDefaultResolution = 300;
std::string PangoFontInfo::fonts_dir_;
std::string PangoFontInfo::cache_dir_;
static PangoGlyph get_glyph(PangoFont *font, gunichar wc) {
#if PANGO_VERSION_CHECK(1, 44, 0)
// pango_font_get_hb_font requires Pango 1.44 or newer.
hb_font_t *hb_font = pango_font_get_hb_font(font);
hb_codepoint_t glyph;
hb_font_get_nominal_glyph(hb_font, wc, &glyph);
#else
// Use deprecated pango_fc_font_get_glyph for older Pango versions.
PangoGlyph glyph = pango_fc_font_get_glyph(PANGO_FC_FONT(font), wc);
#endif
return glyph;
}
PangoFontInfo::PangoFontInfo() : desc_(nullptr), resolution_(kDefaultResolution) {
Clear();
}
PangoFontInfo::PangoFontInfo(const std::string &desc)
: desc_(nullptr), resolution_(kDefaultResolution) {
if (!ParseFontDescriptionName(desc)) {
tprintf("ERROR: Could not parse %s\n", desc.c_str());
Clear();
}
}
void PangoFontInfo::Clear() {
font_size_ = 0;
family_name_.clear();
font_type_ = UNKNOWN;
if (desc_) {
pango_font_description_free(desc_);
desc_ = nullptr;
}
}
PangoFontInfo::~PangoFontInfo() {
pango_font_description_free(desc_);
}
std::string PangoFontInfo::DescriptionName() const {
if (!desc_) {
return "";
}
char *desc_str = pango_font_description_to_string(desc_);
std::string desc_name(desc_str);
g_free(desc_str);
return desc_name;
}
// If not already initialized, initializes FontConfig by setting its
// environment variable and creating a fonts.conf file that points to the
// FLAGS_fonts_dir and the cache to FLAGS_fontconfig_tmpdir.
/* static */
void PangoFontInfo::SoftInitFontConfig() {
if (fonts_dir_.empty()) {
HardInitFontConfig(FLAGS_fonts_dir.c_str(), FLAGS_fontconfig_tmpdir.c_str());
}
}
// Re-initializes font config, whether or not already initialized.
// If already initialized, any existing cache is deleted, just to be sure.
/* static */
void PangoFontInfo::HardInitFontConfig(const char *fonts_dir, const char *cache_dir) {
if (!cache_dir_.empty()) {
File::DeleteMatchingFiles(File::JoinPath(cache_dir_.c_str(), "*cache-?").c_str());
}
const int MAX_FONTCONF_FILESIZE = 1024;
char fonts_conf_template[MAX_FONTCONF_FILESIZE];
cache_dir_ = cache_dir;
fonts_dir_ = fonts_dir;
snprintf(fonts_conf_template, MAX_FONTCONF_FILESIZE,
"<?xml version=\"1.0\"?>\n"
"<!DOCTYPE fontconfig SYSTEM \"fonts.dtd\">\n"
"<fontconfig>\n"
"<dir>%s</dir>\n"
"<cachedir>%s</cachedir>\n"
"<config></config>\n"
"</fontconfig>\n",
fonts_dir, cache_dir);
std::string fonts_conf_file = File::JoinPath(cache_dir, "fonts.conf");
File::WriteStringToFileOrDie(fonts_conf_template, fonts_conf_file);
#ifdef _WIN32
std::string env("FONTCONFIG_PATH=");
env.append(cache_dir);
_putenv(env.c_str());
_putenv("LANG=en_US.utf8");
#else
setenv("FONTCONFIG_PATH", cache_dir, true);
// Fix the locale so that the reported font names are consistent.
setenv("LANG", "en_US.utf8", true);
#endif // _WIN32
if (FcInitReinitialize() != FcTrue) {
tprintf("FcInitiReinitialize failed!!\n");
}
FontUtils::ReInit();
// Clear Pango's font cache too.
pango_cairo_font_map_set_default(nullptr);
}
static void ListFontFamilies(PangoFontFamily ***families, int *n_families) {
PangoFontInfo::SoftInitFontConfig();
PangoFontMap *font_map = pango_cairo_font_map_get_default();
DISABLE_HEAP_LEAK_CHECK;
pango_font_map_list_families(font_map, families, n_families);
}
bool PangoFontInfo::ParseFontDescription(const PangoFontDescription *desc) {
Clear();
const char *family = pango_font_description_get_family(desc);
if (!family) {
char *desc_str = pango_font_description_to_string(desc);
tprintf("WARNING: Could not parse family name from description: '%s'\n", desc_str);
g_free(desc_str);
return false;
}
family_name_ = std::string(family);
desc_ = pango_font_description_copy(desc);
// Set font size in points
font_size_ = pango_font_description_get_size(desc);
if (!pango_font_description_get_size_is_absolute(desc)) {
font_size_ /= PANGO_SCALE;
}
return true;
}
bool PangoFontInfo::ParseFontDescriptionName(const std::string &name) {
PangoFontDescription *desc = pango_font_description_from_string(name.c_str());
bool success = ParseFontDescription(desc);
pango_font_description_free(desc);
return success;
}
// Returns the PangoFont structure corresponding to the closest available font
// in the font map. Note that if the font is wholly missing, this could
// correspond to a completely different font family and face.
PangoFont *PangoFontInfo::ToPangoFont() const {
SoftInitFontConfig();
PangoFontMap *font_map = pango_cairo_font_map_get_default();
PangoContext *context = pango_context_new();
pango_cairo_context_set_resolution(context, resolution_);
pango_context_set_font_map(context, font_map);
PangoFont *font = nullptr;
{
DISABLE_HEAP_LEAK_CHECK;
font = pango_font_map_load_font(font_map, context, desc_);
}
g_object_unref(context);
return font;
}
bool PangoFontInfo::CoversUTF8Text(const char *utf8_text, int byte_length) const {
PangoFont *font = ToPangoFont();
if (font == nullptr) {
// Font not found.
return false;
}
PangoCoverage *coverage = pango_font_get_coverage(font, nullptr);
for (UNICHAR::const_iterator it = UNICHAR::begin(utf8_text, byte_length);
it != UNICHAR::end(utf8_text, byte_length); ++it) {
if (IsWhitespace(*it) || pango_is_zero_width(*it)) {
continue;
}
if (pango_coverage_get(coverage, *it) != PANGO_COVERAGE_EXACT) {
char tmp[5];
int len = it.get_utf8(tmp);
tmp[len] = '\0';
tlog(2, "'%s' (U+%x) not covered by font\n", tmp, *it);
#if PANGO_VERSION_CHECK(1, 52, 0)
g_object_unref(coverage);
#else
pango_coverage_unref(coverage);
#endif
g_object_unref(font);
return false;
}
}
#if PANGO_VERSION_CHECK(1, 52, 0)
g_object_unref(coverage);
#else
pango_coverage_unref(coverage);
#endif
g_object_unref(font);
return true;
}
// This variant of strncpy permits src and dest to overlap. It will copy the
// first byte first.
static char *my_strnmove(char *dest, const char *src, size_t n) {
char *ret = dest;
// Copy characters until n reaches zero or the src byte is a nul.
do {
*dest = *src;
--n;
++dest;
++src;
} while (n && src[0]);
// If we reached a nul byte and there are more 'n' left, zero them out.
while (n) {
*dest = '\0';
--n;
++dest;
}
return ret;
}
int PangoFontInfo::DropUncoveredChars(std::string *utf8_text) const {
int num_dropped_chars = 0;
PangoFont *font = ToPangoFont();
if (font == nullptr) {
// Font not found, drop all characters.
num_dropped_chars = utf8_text->length();
utf8_text->clear();
return num_dropped_chars;
}
PangoCoverage *coverage = pango_font_get_coverage(font, nullptr);
// Maintain two iterators that point into the string. For space efficiency, we
// will repeatedly copy one covered UTF8 character from one to the other, and
// at the end resize the string to the right length.
char *out = const_cast<char *>(utf8_text->c_str());
const UNICHAR::const_iterator it_begin = UNICHAR::begin(utf8_text->c_str(), utf8_text->length());
const UNICHAR::const_iterator it_end = UNICHAR::end(utf8_text->c_str(), utf8_text->length());
for (UNICHAR::const_iterator it = it_begin; it != it_end;) {
// Skip bad utf-8.
if (!it.is_legal()) {
++it; // One suitable error message will still be issued.
continue;
}
int unicode = *it;
int utf8_len = it.utf8_len();
const char *utf8_char = it.utf8_data();
// Move it forward before the data gets modified.
++it;
if (!IsWhitespace(unicode) && !pango_is_zero_width(unicode) &&
pango_coverage_get(coverage, unicode) != PANGO_COVERAGE_EXACT) {
if (TLOG_IS_ON(2)) {
UNICHAR unichar(unicode);
char *str = unichar.utf8_str();
tlog(2, "'%s' (U+%x) not covered by font\n", str, unicode);
delete[] str;
}
++num_dropped_chars;
continue;
}
my_strnmove(out, utf8_char, utf8_len);
out += utf8_len;
}
#if PANGO_VERSION_CHECK(1, 52, 0)
g_object_unref(coverage);
#else
pango_coverage_unref(coverage);
#endif
g_object_unref(font);
utf8_text->resize(out - utf8_text->c_str());
return num_dropped_chars;
}
bool PangoFontInfo::GetSpacingProperties(const std::string &utf8_char, int *x_bearing,
int *x_advance) const {
// Convert to equivalent PangoFont structure
PangoFont *font = ToPangoFont();
if (!font) {
return false;
}
// Find the glyph index in the font for the supplied utf8 character.
int total_advance = 0;
int min_bearing = 0;
// Handle multi-unicode strings by reporting the left-most position of the
// x-bearing, and right-most position of the x-advance if the string were to
// be rendered.
const UNICHAR::const_iterator it_begin = UNICHAR::begin(utf8_char.c_str(), utf8_char.length());
const UNICHAR::const_iterator it_end = UNICHAR::end(utf8_char.c_str(), utf8_char.length());
for (UNICHAR::const_iterator it = it_begin; it != it_end; ++it) {
PangoGlyph glyph_index = get_glyph(font, *it);
if (!glyph_index) {
// Glyph for given unicode character doesn't exist in font.
g_object_unref(font);
return false;
}
// Find the ink glyph extents for the glyph
PangoRectangle ink_rect, logical_rect;
pango_font_get_glyph_extents(font, glyph_index, &ink_rect, &logical_rect);
pango_extents_to_pixels(&ink_rect, nullptr);
pango_extents_to_pixels(&logical_rect, nullptr);
int bearing = total_advance + PANGO_LBEARING(ink_rect);
if (it == it_begin || bearing < min_bearing) {
min_bearing = bearing;
}
total_advance += PANGO_RBEARING(logical_rect);
}
*x_bearing = min_bearing;
*x_advance = total_advance;
g_object_unref(font);
return true;
}
bool PangoFontInfo::CanRenderString(const char *utf8_word, int len) const {
std::vector<std::string> graphemes;
return CanRenderString(utf8_word, len, &graphemes);
}
bool PangoFontInfo::CanRenderString(const char *utf8_word, int len,
std::vector<std::string> *graphemes) const {
if (graphemes) {
graphemes->clear();
}
// We check for font coverage of the text first, as otherwise Pango could
// (undesirably) fall back to another font that does have the required
// coverage.
if (!CoversUTF8Text(utf8_word, len)) {
return false;
}
// U+25CC dotted circle character that often (but not always) gets rendered
// when there is an illegal grapheme sequence.
const char32 kDottedCircleGlyph = 9676;
bool bad_glyph = false;
PangoFontMap *font_map = pango_cairo_font_map_get_default();
PangoContext *context = pango_context_new();
pango_context_set_font_map(context, font_map);
PangoLayout *layout;
{
// Pango is not releasing the cached layout.
DISABLE_HEAP_LEAK_CHECK;
layout = pango_layout_new(context);
}
if (desc_) {
pango_layout_set_font_description(layout, desc_);
} else {
PangoFontDescription *desc = pango_font_description_from_string(DescriptionName().c_str());
pango_layout_set_font_description(layout, desc);
pango_font_description_free(desc);
}
pango_layout_set_text(layout, utf8_word, len);
PangoLayoutIter *run_iter = nullptr;
{ // Fontconfig caches some information here that is not freed before exit.
DISABLE_HEAP_LEAK_CHECK;
run_iter = pango_layout_get_iter(layout);
}
do {
PangoLayoutRun *run = pango_layout_iter_get_run_readonly(run_iter);
if (!run) {
tlog(2, "Found end of line nullptr run marker\n");
continue;
}
PangoGlyph dotted_circle_glyph;
PangoFont *font = run->item->analysis.font;
dotted_circle_glyph = get_glyph(font, kDottedCircleGlyph);
if (TLOG_IS_ON(2)) {
PangoFontDescription *desc = pango_font_describe(font);
char *desc_str = pango_font_description_to_string(desc);
tlog(2, "Desc of font in run: %s\n", desc_str);
g_free(desc_str);
pango_font_description_free(desc);
}
PangoGlyphItemIter cluster_iter;
gboolean have_cluster;
for (have_cluster = pango_glyph_item_iter_init_start(&cluster_iter, run, utf8_word);
have_cluster && !bad_glyph;
have_cluster = pango_glyph_item_iter_next_cluster(&cluster_iter)) {
const int start_byte_index = cluster_iter.start_index;
const int end_byte_index = cluster_iter.end_index;
int start_glyph_index = cluster_iter.start_glyph;
int end_glyph_index = cluster_iter.end_glyph;
std::string cluster_text =
std::string(utf8_word + start_byte_index, end_byte_index - start_byte_index);
if (graphemes) {
graphemes->push_back(cluster_text);
}
if (IsUTF8Whitespace(cluster_text.c_str())) {
tlog(2, "Skipping whitespace\n");
continue;
}
if (TLOG_IS_ON(2)) {
printf("start_byte=%d end_byte=%d start_glyph=%d end_glyph=%d ", start_byte_index,
end_byte_index, start_glyph_index, end_glyph_index);
}
for (int i = start_glyph_index, step = (end_glyph_index > start_glyph_index) ? 1 : -1;
!bad_glyph && i != end_glyph_index; i += step) {
const bool unknown_glyph =
(cluster_iter.glyph_item->glyphs->glyphs[i].glyph & PANGO_GLYPH_UNKNOWN_FLAG);
const bool illegal_glyph =
(cluster_iter.glyph_item->glyphs->glyphs[i].glyph == dotted_circle_glyph);
bad_glyph = unknown_glyph || illegal_glyph;
if (TLOG_IS_ON(2)) {
printf("(%d=%d)", cluster_iter.glyph_item->glyphs->glyphs[i].glyph, bad_glyph ? 1 : 0);
}
}
if (TLOG_IS_ON(2)) {
printf(" '%s'\n", cluster_text.c_str());
}
if (bad_glyph)
tlog(1, "Found illegal glyph!\n");
}
} while (!bad_glyph && pango_layout_iter_next_run(run_iter));
pango_layout_iter_free(run_iter);
g_object_unref(context);
g_object_unref(layout);
if (bad_glyph && graphemes) {
graphemes->clear();
}
return !bad_glyph;
}
// ------------------------ FontUtils ------------------------------------
std::vector<std::string> FontUtils::available_fonts_; // cache list
// Returns whether the specified font description is available in the fonts
// directory.
//
// The generated list of font families and faces includes "synthesized" font
// faces that are not truly loadable. Pango versions >=1.18 have a
// pango_font_face_is_synthesized method that can be used to prune the list.
// Until then, we are restricted to using a hack where we try to load the font
// from the font_map, and then check what we loaded to see if it has the
// description we expected. If it is not, then the font is deemed unavailable.
//
// TODO: This function reports also some not synthesized fonts as not available
// e.g. 'Bitstream Charter Medium Italic', 'LMRoman17', so we need this hack
// until other solution is found.
/* static */
bool FontUtils::IsAvailableFont(const char *input_query_desc, std::string *best_match) {
std::string query_desc(input_query_desc);
PangoFontDescription *desc = pango_font_description_from_string(query_desc.c_str());
PangoFont *selected_font = nullptr;
{
PangoFontInfo::SoftInitFontConfig();
PangoFontMap *font_map = pango_cairo_font_map_get_default();
PangoContext *context = pango_context_new();
pango_context_set_font_map(context, font_map);
{
DISABLE_HEAP_LEAK_CHECK;
selected_font = pango_font_map_load_font(font_map, context, desc);
}
g_object_unref(context);
}
if (selected_font == nullptr) {
pango_font_description_free(desc);
tlog(4, "** Font '%s' failed to load from font map!\n", input_query_desc);
return false;
}
PangoFontDescription *selected_desc = pango_font_describe(selected_font);
bool equal = pango_font_description_equal(desc, selected_desc);
tlog(3, "query weight = %d \t selected weight =%d\n", pango_font_description_get_weight(desc),
pango_font_description_get_weight(selected_desc));
char *selected_desc_str = pango_font_description_to_string(selected_desc);
tlog(2, "query_desc: '%s' Selected: '%s'\n", query_desc.c_str(), selected_desc_str);
if (!equal && best_match != nullptr) {
*best_match = selected_desc_str;
// Clip the ending ' 0' if there is one. It seems that, if there is no
// point size on the end of the fontname, then Pango always appends ' 0'.
int len = best_match->size();
if (len > 2 && best_match->at(len - 1) == '0' && best_match->at(len - 2) == ' ') {
*best_match = best_match->substr(0, len - 2);
}
}
g_free(selected_desc_str);
pango_font_description_free(selected_desc);
g_object_unref(selected_font);
pango_font_description_free(desc);
if (!equal)
tlog(4, "** Font '%s' failed pango_font_description_equal!\n", input_query_desc);
return equal;
}
static bool ShouldIgnoreFontFamilyName(const char *query) {
static const char *kIgnoredFamilyNames[] = {"Sans", "Serif", "Monospace", nullptr};
const char **list = kIgnoredFamilyNames;
for (; *list != nullptr; ++list) {
if (!strcmp(*list, query)) {
return true;
}
}
return false;
}
// Outputs description names of available fonts.
/* static */
const std::vector<std::string> &FontUtils::ListAvailableFonts() {
if (!available_fonts_.empty()) {
return available_fonts_;
}
PangoFontFamily **families = nullptr;
int n_families = 0;
ListFontFamilies(&families, &n_families);
for (int i = 0; i < n_families; ++i) {
const char *family_name = pango_font_family_get_name(families[i]);
tlog(2, "Listing family %s\n", family_name);
if (ShouldIgnoreFontFamilyName(family_name)) {
continue;
}
int n_faces;
PangoFontFace **faces = nullptr;
pango_font_family_list_faces(families[i], &faces, &n_faces);
for (int j = 0; j < n_faces; ++j) {
PangoFontDescription *desc = pango_font_face_describe(faces[j]);
char *desc_str = pango_font_description_to_string(desc);
// "synthesized" font faces that are not truly loadable, so we skip it
if (!pango_font_face_is_synthesized(faces[j]) && IsAvailableFont(desc_str)) {
available_fonts_.emplace_back(desc_str);
}
pango_font_description_free(desc);
g_free(desc_str);
}
g_free(faces);
}
g_free(families);
std::sort(available_fonts_.begin(), available_fonts_.end());
return available_fonts_;
}
// Utilities written to be backward compatible with StringRender
/* static */
int FontUtils::FontScore(const std::unordered_map<char32, int64_t> &ch_map,
const std::string &fontname, int *raw_score, std::vector<bool> *ch_flags) {
PangoFontInfo font_info;
if (!font_info.ParseFontDescriptionName(fontname)) {
tprintf("ERROR: Could not parse %s\n", fontname.c_str());
}
PangoFont *font = font_info.ToPangoFont();
PangoCoverage *coverage = nullptr;
if (font != nullptr) {
coverage = pango_font_get_coverage(font, nullptr);
}
if (ch_flags) {
ch_flags->clear();
ch_flags->reserve(ch_map.size());
}
*raw_score = 0;
int ok_chars = 0;
for (auto &&it : ch_map) {
bool covered =
(coverage != nullptr) && (IsWhitespace(it.first) ||
(pango_coverage_get(coverage, it.first) == PANGO_COVERAGE_EXACT));
if (covered) {
++(*raw_score);
ok_chars += it.second;
}
if (ch_flags) {
ch_flags->push_back(covered);
}
}
#if PANGO_VERSION_CHECK(1, 52, 0)
g_object_unref(coverage);
#else
pango_coverage_unref(coverage);
#endif
g_object_unref(font);
return ok_chars;
}
/* static */
std::string FontUtils::BestFonts(const std::unordered_map<char32, int64_t> &ch_map,
std::vector<std::pair<const char *, std::vector<bool>>> *fonts) {
const double kMinOKFraction = 0.99;
// Weighted fraction of characters that must be renderable in a font to make
// it OK even if the raw count is not good.
const double kMinWeightedFraction = 0.99995;
fonts->clear();
std::vector<std::vector<bool>> font_flags;
std::vector<int> font_scores;
std::vector<int> raw_scores;
int most_ok_chars = 0;
int best_raw_score = 0;
const std::vector<std::string> &font_names = FontUtils::ListAvailableFonts();
for (const auto &font_name : font_names) {
std::vector<bool> ch_flags;
int raw_score = 0;
int ok_chars = FontScore(ch_map, font_name, &raw_score, &ch_flags);
most_ok_chars = std::max(ok_chars, most_ok_chars);
best_raw_score = std::max(raw_score, best_raw_score);
font_flags.push_back(ch_flags);
font_scores.push_back(ok_chars);
raw_scores.push_back(raw_score);
}
// Now select the fonts with a score above a threshold fraction
// of both the raw and weighted best scores. To prevent bogus fonts being
// selected for CJK, we require a high fraction (kMinOKFraction = 0.99) of
// BOTH weighted and raw scores.
// In low character-count scripts, the issue is more getting enough fonts,
// when only 1 or 2 might have all those rare dingbats etc in them, so we
// allow a font with a very high weighted (coverage) score
// (kMinWeightedFraction = 0.99995) to be used even if its raw score is poor.
int least_good_enough = static_cast<int>(most_ok_chars * kMinOKFraction);
int least_raw_enough = static_cast<int>(best_raw_score * kMinOKFraction);
int override_enough = static_cast<int>(most_ok_chars * kMinWeightedFraction);
std::string font_list;
for (unsigned i = 0; i < font_names.size(); ++i) {
int score = font_scores[i];
int raw_score = raw_scores[i];
if ((score >= least_good_enough && raw_score >= least_raw_enough) || score >= override_enough) {
fonts->push_back(std::make_pair(font_names[i].c_str(), font_flags[i]));
tlog(1, "OK font %s = %.4f%%, raw = %d = %.2f%%\n", font_names[i].c_str(),
100.0 * score / most_ok_chars, raw_score, 100.0 * raw_score / best_raw_score);
font_list += font_names[i];
font_list += "\n";
} else if (score >= least_good_enough || raw_score >= least_raw_enough) {
tlog(1, "Runner-up font %s = %.4f%%, raw = %d = %.2f%%\n", font_names[i].c_str(),
100.0 * score / most_ok_chars, raw_score, 100.0 * raw_score / best_raw_score);
}
}
return font_list;
}
/* static */
bool FontUtils::SelectFont(const char *utf8_word, const int utf8_len, std::string *font_name,
std::vector<std::string> *graphemes) {
return SelectFont(utf8_word, utf8_len, ListAvailableFonts(), font_name, graphemes);
}
/* static */
bool FontUtils::SelectFont(const char *utf8_word, const int utf8_len,
const std::vector<std::string> &all_fonts, std::string *font_name,
std::vector<std::string> *graphemes) {
if (font_name) {
font_name->clear();
}
if (graphemes) {
graphemes->clear();
}
for (const auto &all_font : all_fonts) {
PangoFontInfo font;
std::vector<std::string> found_graphemes;
ASSERT_HOST_MSG(font.ParseFontDescriptionName(all_font), "Could not parse font desc name %s\n",
all_font.c_str());
if (font.CanRenderString(utf8_word, utf8_len, &found_graphemes)) {
if (graphemes) {
graphemes->swap(found_graphemes);
}
if (font_name) {
*font_name = all_font;
}
return true;
}
}
return false;
}
// PangoFontInfo is reinitialized, so clear the static list of fonts.
/* static */
void FontUtils::ReInit() {
available_fonts_.clear();
}
// Print info about used font backend
/* static */
void FontUtils::PangoFontTypeInfo() {
PangoFontMap *font_map = pango_cairo_font_map_get_default();
if (pango_cairo_font_map_get_font_type(reinterpret_cast<PangoCairoFontMap *>(font_map)) ==
CAIRO_FONT_TYPE_TOY) {
printf("Using CAIRO_FONT_TYPE_TOY.\n");
} else if (pango_cairo_font_map_get_font_type(reinterpret_cast<PangoCairoFontMap *>(font_map)) ==
CAIRO_FONT_TYPE_FT) {
printf("Using CAIRO_FONT_TYPE_FT.\n");
} else if (pango_cairo_font_map_get_font_type(reinterpret_cast<PangoCairoFontMap *>(font_map)) ==
CAIRO_FONT_TYPE_WIN32) {
printf("Using CAIRO_FONT_TYPE_WIN32.\n");
} else if (pango_cairo_font_map_get_font_type(reinterpret_cast<PangoCairoFontMap *>(font_map)) ==
CAIRO_FONT_TYPE_QUARTZ) {
printf("Using CAIRO_FONT_TYPE_QUARTZ.\n");
} else if (pango_cairo_font_map_get_font_type(reinterpret_cast<PangoCairoFontMap *>(font_map)) ==
CAIRO_FONT_TYPE_USER) {
printf("Using CAIRO_FONT_TYPE_USER.\n");
} else if (!font_map) {
printf("Cannot create pango cairo font map!\n");
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/pango/pango_font_info.cpp
|
C++
|
apache-2.0
| 26,578
|
/**********************************************************************
* File: pango_font_info.h
* Description: Font-related objects and helper functions
* Author: Ranjith Unnikrishnan
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_PANGO_FONT_INFO_H_
#define TESSERACT_TRAINING_PANGO_FONT_INFO_H_
#include "export.h"
#include "commandlineflags.h"
#include "pango/pango-font.h"
#include "pango/pango.h"
#include "pango/pangocairo.h"
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
using char32 = signed int;
namespace tesseract {
// Data holder class for a font, intended to avoid having to work with Pango or
// FontConfig-specific objects directly.
class TESS_PANGO_TRAINING_API PangoFontInfo {
public:
enum FontTypeEnum {
UNKNOWN,
SERIF,
SANS_SERIF,
DECORATIVE,
};
PangoFontInfo();
~PangoFontInfo();
// Initialize from parsing a font description name, defined as a string of the
// format:
// "FamilyName [FaceName] [PointSize]"
// where a missing FaceName implies the default regular face.
// eg. "Arial Italic 12", "Verdana"
//
// FaceName is a combination of:
// [StyleName] [Variant] [Weight] [Stretch]
// with (all optional) Pango-defined values of:
// StyleName: Oblique, Italic
// Variant : Small-Caps
// Weight : Ultra-Light, Light, Medium, Semi-Bold, Bold, Ultra-Bold, Heavy
// Stretch : Ultra-Condensed, Extra-Condensed, Condensed, Semi-Condensed,
// Semi-Expanded, Expanded, Extra-Expanded, Ultra-Expanded.
explicit PangoFontInfo(const std::string &name);
bool ParseFontDescriptionName(const std::string &name);
// Returns true if the font have codepoint coverage for the specified text.
bool CoversUTF8Text(const char *utf8_text, int byte_length) const;
// Modifies string to remove unicode points that are not covered by the
// font. Returns the number of characters dropped.
int DropUncoveredChars(std::string *utf8_text) const;
// Returns true if the entire string can be rendered by the font with full
// character coverage and no unknown glyph or dotted-circle glyph
// substitutions on encountering a badly formed unicode sequence.
// If true, returns individual graphemes. Any whitespace characters in the
// original string are also included in the list.
bool CanRenderString(const char *utf8_word, int len, std::vector<std::string> *graphemes) const;
bool CanRenderString(const char *utf8_word, int len) const;
// Retrieves the x_bearing and x_advance for the given utf8 character in the
// font. Returns false if the glyph for the character could not be found in
// the font.
// Ref: http://freetype.sourceforge.net/freetype2/docs/glyphs/glyphs-3.html
bool GetSpacingProperties(const std::string &utf8_char, int *x_bearing, int *x_advance) const;
// If not already initialized, initializes FontConfig by setting its
// environment variable and creating a fonts.conf file that points to the
// FLAGS_fonts_dir and the cache to FLAGS_fontconfig_tmpdir.
static void SoftInitFontConfig();
// Re-initializes font config, whether or not already initialized.
// If already initialized, any existing cache is deleted, just to be sure.
static void HardInitFontConfig(const char *fonts_dir, const char *cache_dir);
// Accessors
std::string DescriptionName() const;
// Font Family name eg. "Arial"
const std::string &family_name() const {
return family_name_;
}
// Size in points (1/72"), rounded to the nearest integer.
int font_size() const {
return font_size_;
}
FontTypeEnum font_type() const {
return font_type_;
}
int resolution() const {
return resolution_;
}
void set_resolution(const int resolution) {
resolution_ = resolution;
}
private:
friend class FontUtils;
void Clear();
bool ParseFontDescription(const PangoFontDescription *desc);
// Returns the PangoFont structure corresponding to the closest available font
// in the font map.
PangoFont *ToPangoFont() const;
// Font properties set automatically from parsing the font description name.
std::string family_name_;
int font_size_;
FontTypeEnum font_type_;
// The Pango description that was used to initialize the instance.
PangoFontDescription *desc_;
// Default output resolution to assume for GetSpacingProperties() and any
// other methods that returns pixel values.
int resolution_;
// Fontconfig operates through an environment variable, so it intrinsically
// cannot be thread-friendly, but you can serialize multiple independent
// font configurations by calling HardInitFontConfig(fonts_dir, cache_dir).
// These hold the last initialized values set by HardInitFontConfig or
// the first call to SoftInitFontConfig.
// Directory to be scanned for font files.
static std::string fonts_dir_;
// Directory to store the cache of font information. (Can be the same as
// fonts_dir_)
static std::string cache_dir_;
private:
PangoFontInfo(const PangoFontInfo &) = delete;
void operator=(const PangoFontInfo &) = delete;
};
// Static utility methods for querying font availability and font-selection
// based on codepoint coverage.
class TESS_PANGO_TRAINING_API FontUtils {
public:
// Returns true if the font of the given description name is available in the
// target directory specified by --fonts_dir
static bool IsAvailableFont(const char *font_desc) {
return IsAvailableFont(font_desc, nullptr);
}
// Returns true if the font of the given description name is available in the
// target directory specified by --fonts_dir. If false is returned, and
// best_match is not nullptr, the closest matching font is returned there.
static bool IsAvailableFont(const char *font_desc, std::string *best_match);
// Outputs description names of available fonts.
static const std::vector<std::string> &ListAvailableFonts();
// Picks font among available fonts that covers and can render the given word,
// and returns the font description name and the decomposition of the word to
// graphemes. Returns false if no suitable font was found.
static bool SelectFont(const char *utf8_word, const int utf8_len, std::string *font_name,
std::vector<std::string> *graphemes);
// Picks font among all_fonts that covers and can render the given word,
// and returns the font description name and the decomposition of the word to
// graphemes. Returns false if no suitable font was found.
static bool SelectFont(const char *utf8_word, const int utf8_len,
const std::vector<std::string> &all_fonts, std::string *font_name,
std::vector<std::string> *graphemes);
// NOTE: The following utilities were written to be backward compatible with
// StringRender.
// BestFonts returns a font name and a bit vector of the characters it
// can render for the fonts that score within some fraction of the best
// font on the characters in the given hash map.
// In the flags vector, each flag is set according to whether the
// corresponding character (in order of iterating ch_map) can be rendered.
// The return string is a list of the acceptable fonts that were used.
static std::string BestFonts(const std::unordered_map<char32, int64_t> &ch_map,
std::vector<std::pair<const char *, std::vector<bool>>> *font_flag);
// FontScore returns the weighted renderability score of the given
// hash map character table in the given font. The unweighted score
// is also returned in raw_score.
// The values in the bool vector ch_flags correspond to whether the
// corresponding character (in order of iterating ch_map) can be rendered.
static int FontScore(const std::unordered_map<char32, int64_t> &ch_map,
const std::string &fontname, int *raw_score, std::vector<bool> *ch_flags);
// PangoFontInfo is reinitialized, so clear the static list of fonts.
static void ReInit();
static void PangoFontTypeInfo();
private:
static std::vector<std::string> available_fonts_; // cache list
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_PANGO_FONT_INFO_H_
|
2301_81045437/tesseract
|
src/training/pango/pango_font_info.h
|
C++
|
apache-2.0
| 8,877
|
/**********************************************************************
* File: stringrenderer.cpp
* Description: Class for rendering UTF-8 text to an image, and retrieving
* bounding boxes around each grapheme cluster.
* Author: Ranjith Unnikrishnan
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "stringrenderer.h"
#include <allheaders.h> // from leptonica
#include "boxchar.h"
#include "helpers.h" // for TRand
#include "ligature_table.h"
#include "normstrngs.h"
#include "tlog.h"
#include <tesseract/unichar.h>
#include "pango/pango-font.h"
#include "pango/pango-glyph-item.h"
#include "unicode/uchar.h" // from libicu
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstring>
#include <map>
#include <utility>
#include <vector>
#define DISABLE_HEAP_LEAK_CHECK
namespace tesseract {
static const int kDefaultOutputResolution = 300;
// Word joiner (U+2060) inserted after letters in ngram mode, as per
// recommendation in http://unicode.org/reports/tr14/ to avoid line-breaks at
// hyphens and other non-alpha characters.
static const char *kWordJoinerUTF8 = "\u2060";
static bool IsCombiner(int ch) {
const int char_type = u_charType(ch);
return ((char_type == U_NON_SPACING_MARK) || (char_type == U_ENCLOSING_MARK) ||
(char_type == U_COMBINING_SPACING_MARK));
}
static std::string EncodeAsUTF8(const char32 ch32) {
UNICHAR uni_ch(ch32);
return std::string(uni_ch.utf8(), uni_ch.utf8_len());
}
// Returns true with probability 'prob'.
static bool RandBool(const double prob, TRand *rand) {
if (prob == 1.0) {
return true;
}
if (prob == 0.0) {
return false;
}
return rand->UnsignedRand(1.0) < prob;
}
/* static */
static Image CairoARGB32ToPixFormat(cairo_surface_t *surface) {
if (cairo_image_surface_get_format(surface) != CAIRO_FORMAT_ARGB32) {
printf("Unexpected surface format %d\n", cairo_image_surface_get_format(surface));
return nullptr;
}
const int width = cairo_image_surface_get_width(surface);
const int height = cairo_image_surface_get_height(surface);
Image pix = pixCreate(width, height, 32);
int byte_stride = cairo_image_surface_get_stride(surface);
for (int i = 0; i < height; ++i) {
memcpy(reinterpret_cast<unsigned char *>(pixGetData(pix) + i * pixGetWpl(pix)) + 1,
cairo_image_surface_get_data(surface) + i * byte_stride,
byte_stride - ((i == height - 1) ? 1 : 0));
}
return pix;
}
StringRenderer::StringRenderer(const std::string &font_desc, int page_width, int page_height)
: font_(font_desc)
, page_width_(page_width)
, page_height_(page_height)
, h_margin_(50)
, v_margin_(50)
, pen_color_{0.0, 0.0, 0.0}
, char_spacing_(0)
, leading_(0)
, vertical_text_(false)
, gravity_hint_strong_(false)
, render_fullwidth_latin_(false)
, underline_start_prob_(0)
, underline_continuation_prob_(0)
, underline_style_(PANGO_UNDERLINE_SINGLE)
, drop_uncovered_chars_(true)
, strip_unrenderable_words_(false)
, add_ligatures_(false)
, output_word_boxes_(false)
, surface_(nullptr)
, cr_(nullptr)
, layout_(nullptr)
, start_box_(0)
, page_(0)
, box_padding_(0)
, page_boxes_(nullptr)
, total_chars_(0)
, font_index_(0)
, last_offset_(0) {
set_resolution(kDefaultOutputResolution);
set_font(font_desc);
}
bool StringRenderer::set_font(const std::string &desc) {
bool success = font_.ParseFontDescriptionName(desc);
font_.set_resolution(resolution_);
return success;
}
void StringRenderer::set_resolution(const int resolution) {
resolution_ = resolution;
font_.set_resolution(resolution);
}
void StringRenderer::set_underline_start_prob(const double frac) {
underline_start_prob_ = std::min(std::max(frac, 0.0), 1.0);
}
void StringRenderer::set_underline_continuation_prob(const double frac) {
underline_continuation_prob_ = std::min(std::max(frac, 0.0), 1.0);
}
StringRenderer::~StringRenderer() {
ClearBoxes();
FreePangoCairo();
}
void StringRenderer::InitPangoCairo() {
FreePangoCairo();
surface_ = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, page_width_, page_height_);
cr_ = cairo_create(surface_);
{
DISABLE_HEAP_LEAK_CHECK;
layout_ = pango_cairo_create_layout(cr_);
}
if (vertical_text_) {
PangoContext *context = pango_layout_get_context(layout_);
pango_context_set_base_gravity(context, PANGO_GRAVITY_EAST);
if (gravity_hint_strong_) {
pango_context_set_gravity_hint(context, PANGO_GRAVITY_HINT_STRONG);
}
pango_layout_context_changed(layout_);
}
SetLayoutProperties();
}
void StringRenderer::SetLayoutProperties() {
std::string font_desc = font_.DescriptionName();
// Specify the font via a description name
PangoFontDescription *desc = pango_font_description_from_string(font_desc.c_str());
// Assign the font description to the layout
pango_layout_set_font_description(layout_, desc);
pango_font_description_free(desc); // free the description
pango_cairo_context_set_resolution(pango_layout_get_context(layout_), resolution_);
int max_width = page_width_ - 2 * h_margin_;
int max_height = page_height_ - 2 * v_margin_;
tlog(3, "max_width = %d, max_height = %d\n", max_width, max_height);
if (vertical_text_) {
using std::swap;
swap(max_width, max_height);
}
pango_layout_set_width(layout_, max_width * PANGO_SCALE);
// Ultra-wide Thai strings need to wrap at char level.
pango_layout_set_wrap(layout_, PANGO_WRAP_WORD_CHAR);
// Adjust character spacing
PangoAttrList *attr_list = pango_attr_list_new();
if (char_spacing_) {
PangoAttribute *spacing_attr = pango_attr_letter_spacing_new(char_spacing_ * PANGO_SCALE);
spacing_attr->start_index = 0;
spacing_attr->end_index = static_cast<guint>(-1);
pango_attr_list_change(attr_list, spacing_attr);
}
if (add_ligatures_) {
set_features("liga, clig, dlig, hlig");
PangoAttribute *feature_attr = pango_attr_font_features_new(features_.c_str());
pango_attr_list_change(attr_list, feature_attr);
}
pango_layout_set_attributes(layout_, attr_list);
pango_attr_list_unref(attr_list);
// Adjust line spacing
if (leading_) {
pango_layout_set_spacing(layout_, leading_ * PANGO_SCALE);
}
}
void StringRenderer::FreePangoCairo() {
if (layout_) {
g_object_unref(layout_);
layout_ = nullptr;
}
if (cr_) {
cairo_destroy(cr_);
cr_ = nullptr;
}
if (surface_) {
cairo_surface_destroy(surface_);
surface_ = nullptr;
}
}
void StringRenderer::SetWordUnderlineAttributes(const std::string &page_text) {
if (underline_start_prob_ == 0) {
return;
}
PangoAttrList *attr_list = pango_layout_get_attributes(layout_);
const char *text = page_text.c_str();
size_t offset = 0;
TRand rand;
bool started_underline = false;
PangoAttribute *und_attr = nullptr;
while (offset < page_text.length()) {
offset += SpanUTF8Whitespace(text + offset);
if (offset == page_text.length()) {
break;
}
int word_start = offset;
int word_len = SpanUTF8NotWhitespace(text + offset);
offset += word_len;
if (started_underline) {
// Should we continue the underline to the next word?
if (RandBool(underline_continuation_prob_, &rand)) {
// Continue the current underline to this word.
und_attr->end_index = word_start + word_len;
} else {
// Otherwise end the current underline attribute at the end of the
// previous word.
pango_attr_list_insert(attr_list, und_attr);
started_underline = false;
und_attr = nullptr;
}
}
if (!started_underline && RandBool(underline_start_prob_, &rand)) {
// Start a new underline attribute
und_attr = pango_attr_underline_new(underline_style_);
und_attr->start_index = word_start;
und_attr->end_index = word_start + word_len;
started_underline = true;
}
}
// Finish the current underline attribute at the end of the page.
if (started_underline) {
und_attr->end_index = page_text.length();
pango_attr_list_insert(attr_list, und_attr);
}
}
// Returns offset in utf8 bytes to first page.
int StringRenderer::FindFirstPageBreakOffset(const char *text, int text_length) {
if (!text_length) {
return 0;
}
const int max_height = (page_height_ - 2 * v_margin_);
const int max_width = (page_width_ - 2 * h_margin_);
const int max_layout_height = vertical_text_ ? max_width : max_height;
UNICHAR::const_iterator it = UNICHAR::begin(text, text_length);
const UNICHAR::const_iterator it_end = UNICHAR::end(text, text_length);
const int kMaxUnicodeBufLength = 15000;
for (int i = 0; i < kMaxUnicodeBufLength && it != it_end; ++it, ++i) {
;
}
int buf_length = it.utf8_data() - text;
tlog(1, "len = %d buf_len = %d\n", text_length, buf_length);
pango_layout_set_text(layout_, text, buf_length);
PangoLayoutIter *line_iter = nullptr;
{ // Fontconfig caches some info here that is not freed before exit.
DISABLE_HEAP_LEAK_CHECK;
line_iter = pango_layout_get_iter(layout_);
}
bool first_page = true;
int page_top = 0;
int offset = buf_length;
do {
// Get bounding box of the current line
PangoRectangle line_ink_rect;
pango_layout_iter_get_line_extents(line_iter, &line_ink_rect, nullptr);
pango_extents_to_pixels(&line_ink_rect, nullptr);
PangoLayoutLine *line = pango_layout_iter_get_line_readonly(line_iter);
if (first_page) {
page_top = line_ink_rect.y;
first_page = false;
}
int line_bottom = line_ink_rect.y + line_ink_rect.height;
if (line_bottom - page_top > max_layout_height) {
offset = line->start_index;
tlog(1, "Found offset = %d\n", offset);
break;
}
} while (pango_layout_iter_next_line(line_iter));
pango_layout_iter_free(line_iter);
return offset;
}
const std::vector<BoxChar *> &StringRenderer::GetBoxes() const {
return boxchars_;
}
Boxa *StringRenderer::GetPageBoxes() const {
return page_boxes_;
}
void StringRenderer::RotatePageBoxes(float rotation) {
BoxChar::RotateBoxes(rotation, page_width_ / 2, page_height_ / 2, start_box_, boxchars_.size(),
&boxchars_);
}
void StringRenderer::ClearBoxes() {
for (auto &boxchar : boxchars_) {
delete boxchar;
}
boxchars_.clear();
boxaDestroy(&page_boxes_);
}
std::string StringRenderer::GetBoxesStr() {
BoxChar::PrepareToWrite(&boxchars_);
return BoxChar::GetTesseractBoxStr(page_height_, boxchars_);
}
void StringRenderer::WriteAllBoxes(const std::string &filename) {
BoxChar::PrepareToWrite(&boxchars_);
BoxChar::WriteTesseractBoxFile(filename, page_height_, boxchars_);
}
// Returns cluster strings in logical order.
bool StringRenderer::GetClusterStrings(std::vector<std::string> *cluster_text) {
std::map<int, std::string> start_byte_to_text;
PangoLayoutIter *run_iter = pango_layout_get_iter(layout_);
const char *full_text = pango_layout_get_text(layout_);
do {
PangoLayoutRun *run = pango_layout_iter_get_run_readonly(run_iter);
if (!run) {
// End of line nullptr run marker
tlog(2, "Found end of line marker\n");
continue;
}
PangoGlyphItemIter cluster_iter;
gboolean have_cluster;
for (have_cluster = pango_glyph_item_iter_init_start(&cluster_iter, run, full_text);
have_cluster; have_cluster = pango_glyph_item_iter_next_cluster(&cluster_iter)) {
const int start_byte_index = cluster_iter.start_index;
const int end_byte_index = cluster_iter.end_index;
std::string text =
std::string(full_text + start_byte_index, end_byte_index - start_byte_index);
if (IsUTF8Whitespace(text.c_str())) {
tlog(2, "Found whitespace\n");
text = " ";
}
tlog(2, "start_byte=%d end_byte=%d : '%s'\n", start_byte_index, end_byte_index, text.c_str());
if (add_ligatures_) {
// Make sure the output box files have ligatured text in case the font
// decided to use an unmapped glyph.
text = LigatureTable::Get()->AddLigatures(text, nullptr);
}
start_byte_to_text[start_byte_index] = std::move(text);
}
} while (pango_layout_iter_next_run(run_iter));
pango_layout_iter_free(run_iter);
cluster_text->clear();
for (auto it = start_byte_to_text.begin(); it != start_byte_to_text.end(); ++it) {
cluster_text->push_back(it->second);
}
return !cluster_text->empty();
}
// Merges an array of BoxChars into words based on the identification of
// BoxChars containing the space character as inter-word separators.
//
// Sometime two adjacent characters in the sequence may be detected as lying on
// different lines based on their spatial positions. This may be the result of a
// newline character at end of the last word on a line in the source text, or of
// a discretionary line-break created by Pango at intra-word locations like
// hyphens. When this is detected the word is split at that location into
// multiple BoxChars. Otherwise, each resulting BoxChar will contain a word and
// its bounding box.
static void MergeBoxCharsToWords(std::vector<BoxChar *> *boxchars) {
std::vector<BoxChar *> result;
bool started_word = false;
for (auto &boxchar : *boxchars) {
if (boxchar->ch() == " " || boxchar->box() == nullptr) {
result.push_back(boxchar);
boxchar = nullptr;
started_word = false;
continue;
}
if (!started_word) {
// Begin new word
started_word = true;
result.push_back(boxchar);
boxchar = nullptr;
} else {
BoxChar *last_boxchar = result.back();
// Compute bounding box union
const Box *box = boxchar->box();
Box *last_box = last_boxchar->mutable_box();
int left = std::min(last_box->x, box->x);
int right = std::max(last_box->x + last_box->w, box->x + box->w);
int top = std::min(last_box->y, box->y);
int bottom = std::max(last_box->y + last_box->h, box->y + box->h);
// Conclude that the word was broken to span multiple lines based on the
// size of the merged bounding box in relation to those of the individual
// characters seen so far.
if (right - left > last_box->w + 5 * box->w) {
tlog(1, "Found line break after '%s'", last_boxchar->ch().c_str());
// Insert a fake interword space and start a new word with the current
// boxchar.
result.push_back(new BoxChar(" ", 1));
result.push_back(boxchar);
boxchar = nullptr;
continue;
}
// Append to last word
last_boxchar->mutable_ch()->append(boxchar->ch());
last_box->x = left;
last_box->w = right - left;
last_box->y = top;
last_box->h = bottom - top;
delete boxchar;
boxchar = nullptr;
}
}
boxchars->swap(result);
}
void StringRenderer::ComputeClusterBoxes() {
const char *text = pango_layout_get_text(layout_);
PangoLayoutIter *cluster_iter = pango_layout_get_iter(layout_);
// Do a first pass to store cluster start indexes.
std::vector<int> cluster_start_indices;
do {
cluster_start_indices.push_back(pango_layout_iter_get_index(cluster_iter));
tlog(3, "Added %d\n", cluster_start_indices.back());
} while (pango_layout_iter_next_cluster(cluster_iter));
pango_layout_iter_free(cluster_iter);
cluster_start_indices.push_back(strlen(text));
tlog(3, "Added last index %d\n", cluster_start_indices.back());
// Sort the indices and create a map from start to end indices.
std::sort(cluster_start_indices.begin(), cluster_start_indices.end());
std::map<int, int> cluster_start_to_end_index;
for (size_t i = 0; i + 1 < cluster_start_indices.size(); ++i) {
cluster_start_to_end_index[cluster_start_indices[i]] = cluster_start_indices[i + 1];
}
// Iterate again to compute cluster boxes and their text with the obtained
// cluster extent information.
cluster_iter = pango_layout_get_iter(layout_);
// Store BoxChars* sorted by their byte start positions
std::map<int, BoxChar *> start_byte_to_box;
do {
PangoRectangle cluster_rect;
pango_layout_iter_get_cluster_extents(cluster_iter, &cluster_rect, nullptr);
pango_extents_to_pixels(&cluster_rect, nullptr);
const int start_byte_index = pango_layout_iter_get_index(cluster_iter);
const int end_byte_index = cluster_start_to_end_index[start_byte_index];
std::string cluster_text =
std::string(text + start_byte_index, end_byte_index - start_byte_index);
if (!cluster_text.empty() && cluster_text[0] == '\n') {
tlog(2, "Skipping newlines at start of text.\n");
continue;
}
if (!cluster_rect.width || !cluster_rect.height || IsUTF8Whitespace(cluster_text.c_str())) {
tlog(2, "Skipping whitespace with boxdim (%d,%d) '%s'\n", cluster_rect.width,
cluster_rect.height, cluster_text.c_str());
auto *boxchar = new BoxChar(" ", 1);
boxchar->set_page(page_);
start_byte_to_box[start_byte_index] = boxchar;
continue;
}
// Prepare a boxchar for addition at this byte position.
tlog(2, "[%d %d], %d, %d : start_byte=%d end_byte=%d : '%s'\n", cluster_rect.x, cluster_rect.y,
cluster_rect.width, cluster_rect.height, start_byte_index, end_byte_index,
cluster_text.c_str());
ASSERT_HOST_MSG(cluster_rect.width, "cluster_text:%s start_byte_index:%d\n",
cluster_text.c_str(), start_byte_index);
ASSERT_HOST_MSG(cluster_rect.height, "cluster_text:%s start_byte_index:%d\n",
cluster_text.c_str(), start_byte_index);
if (box_padding_) {
cluster_rect.x = std::max(0, cluster_rect.x - box_padding_);
cluster_rect.width += 2 * box_padding_;
cluster_rect.y = std::max(0, cluster_rect.y - box_padding_);
cluster_rect.height += 2 * box_padding_;
}
if (add_ligatures_) {
// Make sure the output box files have ligatured text in case the font
// decided to use an unmapped glyph.
cluster_text = LigatureTable::Get()->AddLigatures(cluster_text, nullptr);
}
auto *boxchar = new BoxChar(cluster_text.c_str(), cluster_text.size());
boxchar->set_page(page_);
boxchar->AddBox(cluster_rect.x, cluster_rect.y, cluster_rect.width, cluster_rect.height);
start_byte_to_box[start_byte_index] = boxchar;
} while (pango_layout_iter_next_cluster(cluster_iter));
pango_layout_iter_free(cluster_iter);
// There is a subtle bug in the cluster text reported by the PangoLayoutIter
// on ligatured characters (eg. The word "Lam-Aliph" in arabic). To work
// around this, we use text reported using the PangoGlyphIter which is
// accurate.
// TODO(ranjith): Revisit whether this is still needed in newer versions of
// pango.
std::vector<std::string> cluster_text;
if (GetClusterStrings(&cluster_text)) {
ASSERT_HOST(cluster_text.size() == start_byte_to_box.size());
int ind = 0;
for (auto it = start_byte_to_box.begin(); it != start_byte_to_box.end(); ++it, ++ind) {
it->second->mutable_ch()->swap(cluster_text[ind]);
}
}
// Append to the boxchars list in byte order.
std::vector<BoxChar *> page_boxchars;
page_boxchars.reserve(start_byte_to_box.size());
std::string last_ch;
for (auto it = start_byte_to_box.begin(); it != start_byte_to_box.end(); ++it) {
if (it->second->ch() == kWordJoinerUTF8) {
// Skip zero-width joiner characters (ZWJs) here.
delete it->second;
} else {
page_boxchars.push_back(it->second);
}
}
CorrectBoxPositionsToLayout(&page_boxchars);
if (render_fullwidth_latin_) {
for (auto &it : start_byte_to_box) {
// Convert fullwidth Latin characters to their halfwidth forms.
std::string half(ConvertFullwidthLatinToBasicLatin(it.second->ch()));
it.second->mutable_ch()->swap(half);
}
}
// Merge the character boxes into word boxes if we are rendering n-grams.
if (output_word_boxes_) {
MergeBoxCharsToWords(&page_boxchars);
}
boxchars_.insert(boxchars_.end(), page_boxchars.begin(), page_boxchars.end());
// Compute the page bounding box
Box *page_box = nullptr;
Boxa *all_boxes = nullptr;
for (auto &page_boxchar : page_boxchars) {
if (page_boxchar->box() == nullptr) {
continue;
}
if (all_boxes == nullptr) {
all_boxes = boxaCreate(0);
}
boxaAddBox(all_boxes, page_boxchar->mutable_box(), L_CLONE);
}
if (all_boxes != nullptr) {
boxaGetExtent(all_boxes, nullptr, nullptr, &page_box);
boxaDestroy(&all_boxes);
if (page_boxes_ == nullptr) {
page_boxes_ = boxaCreate(0);
}
boxaAddBox(page_boxes_, page_box, L_INSERT);
}
}
void StringRenderer::CorrectBoxPositionsToLayout(std::vector<BoxChar *> *boxchars) {
if (vertical_text_) {
const double rotation = -pango_gravity_to_rotation(
pango_context_get_base_gravity(pango_layout_get_context(layout_)));
BoxChar::TranslateBoxes(page_width_ - h_margin_, v_margin_, boxchars);
BoxChar::RotateBoxes(rotation, page_width_ - h_margin_, v_margin_, 0, boxchars->size(),
boxchars);
} else {
BoxChar::TranslateBoxes(h_margin_, v_margin_, boxchars);
}
}
int StringRenderer::StripUnrenderableWords(std::string *utf8_text) const {
std::string output_text;
std::string unrenderable_words;
const char *text = utf8_text->c_str();
size_t offset = 0;
int num_dropped = 0;
while (offset < utf8_text->length()) {
int space_len = SpanUTF8Whitespace(text + offset);
output_text.append(text + offset, space_len);
offset += space_len;
if (offset == utf8_text->length()) {
break;
}
int word_len = SpanUTF8NotWhitespace(text + offset);
if (font_.CanRenderString(text + offset, word_len)) {
output_text.append(text + offset, word_len);
} else {
++num_dropped;
unrenderable_words.append(text + offset, word_len);
unrenderable_words.append(" ");
}
offset += word_len;
}
utf8_text->swap(output_text);
if (num_dropped > 0) {
tprintf("Stripped %d unrenderable word(s): '%s'\n", num_dropped, unrenderable_words.c_str());
}
return num_dropped;
}
int StringRenderer::RenderToGrayscaleImage(const char *text, int text_length, Image *pix) {
Image orig_pix = nullptr;
int offset = RenderToImage(text, text_length, &orig_pix);
if (orig_pix) {
*pix = pixConvertTo8(orig_pix, false);
orig_pix.destroy();
}
return offset;
}
int StringRenderer::RenderToBinaryImage(const char *text, int text_length, int threshold,
Image *pix) {
Image orig_pix = nullptr;
int offset = RenderToImage(text, text_length, &orig_pix);
if (orig_pix) {
Image gray_pix = pixConvertTo8(orig_pix, false);
orig_pix.destroy();
*pix = pixThresholdToBinary(gray_pix, threshold);
gray_pix.destroy();
} else {
*pix = orig_pix;
}
return offset;
}
// Add word joiner (WJ) characters between adjacent non-space characters except
// immediately before a combiner.
/* static */
std::string StringRenderer::InsertWordJoiners(const std::string &text) {
std::string out_str;
const UNICHAR::const_iterator it_end = UNICHAR::end(text.c_str(), text.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(text.c_str(), text.length()); it != it_end;
++it) {
// Add the symbol to the output string.
out_str.append(it.utf8_data(), it.utf8_len());
// Check the next symbol.
UNICHAR::const_iterator next_it = it;
++next_it;
bool next_char_is_boundary = (next_it == it_end || *next_it == ' ');
bool next_char_is_combiner = (next_it == it_end) ? false : IsCombiner(*next_it);
if (*it != ' ' && *it != '\n' && !next_char_is_boundary && !next_char_is_combiner) {
out_str += kWordJoinerUTF8;
}
}
return out_str;
}
// Convert halfwidth Basic Latin characters to their fullwidth forms.
std::string StringRenderer::ConvertBasicLatinToFullwidthLatin(const std::string &str) {
std::string full_str;
const UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(str.c_str(), str.length()); it != it_end; ++it) {
// Convert printable and non-space 7-bit ASCII characters to
// their fullwidth forms.
if (IsInterchangeValid7BitAscii(*it) && isprint(*it) && !isspace(*it)) {
// Convert by adding 0xFEE0 to the codepoint of 7-bit ASCII.
char32 full_char = *it + 0xFEE0;
full_str.append(EncodeAsUTF8(full_char));
} else {
full_str.append(it.utf8_data(), it.utf8_len());
}
}
return full_str;
}
// Convert fullwidth Latin characters to their halfwidth forms.
std::string StringRenderer::ConvertFullwidthLatinToBasicLatin(const std::string &str) {
std::string half_str;
UNICHAR::const_iterator it_end = UNICHAR::end(str.c_str(), str.length());
for (UNICHAR::const_iterator it = UNICHAR::begin(str.c_str(), str.length()); it != it_end; ++it) {
char32 half_char = FullwidthToHalfwidth(*it);
// Convert fullwidth Latin characters to their halfwidth forms
// only if halfwidth forms are printable and non-space 7-bit ASCII.
if (IsInterchangeValid7BitAscii(half_char) && isprint(half_char) && !isspace(half_char)) {
half_str.append(EncodeAsUTF8(half_char));
} else {
half_str.append(it.utf8_data(), it.utf8_len());
}
}
return half_str;
}
// Returns offset to end of text substring rendered in this method.
int StringRenderer::RenderToImage(const char *text, int text_length, Image *pix) {
if (pix && *pix) {
pix->destroy();
}
InitPangoCairo();
const int page_offset = FindFirstPageBreakOffset(text, text_length);
if (!page_offset) {
return 0;
}
start_box_ = boxchars_.size();
if (!vertical_text_) {
// Translate by the specified margin
cairo_translate(cr_, h_margin_, v_margin_);
} else {
// Vertical text rendering is achieved by a two-step process of first
// performing regular horizontal layout with character orientation set to
// EAST, and then translating and rotating the layout before rendering onto
// the desired image surface. The settings required for the former step are
// done within InitPangoCairo().
//
// Translate to the top-right margin of page
cairo_translate(cr_, page_width_ - h_margin_, v_margin_);
// Rotate the layout
double rotation = -pango_gravity_to_rotation(
pango_context_get_base_gravity(pango_layout_get_context(layout_)));
tlog(2, "Rotating by %f radians\n", rotation);
cairo_rotate(cr_, rotation);
pango_cairo_update_layout(cr_, layout_);
}
std::string page_text(text, page_offset);
if (render_fullwidth_latin_) {
// Convert Basic Latin to their fullwidth forms.
page_text = ConvertBasicLatinToFullwidthLatin(page_text);
}
if (strip_unrenderable_words_) {
StripUnrenderableWords(&page_text);
}
if (drop_uncovered_chars_ && !font_.CoversUTF8Text(page_text.c_str(), page_text.length())) {
int num_dropped = font_.DropUncoveredChars(&page_text);
if (num_dropped) {
tprintf("WARNING: Dropped %d uncovered characters\n", num_dropped);
}
}
if (add_ligatures_) {
// Add ligatures wherever possible, including custom ligatures.
page_text = LigatureTable::Get()->AddLigatures(page_text, &font_);
}
if (underline_start_prob_ > 0) {
SetWordUnderlineAttributes(page_text);
}
pango_layout_set_text(layout_, page_text.c_str(), page_text.length());
if (pix) {
// Set a white background for the target image surface.
cairo_set_source_rgb(cr_, 1.0, 1.0, 1.0); // sets drawing colour to white
// Fill the surface with the active colour (if you don't do this, you will
// be given a surface with a transparent background to draw on)
cairo_paint(cr_);
// Set the ink color to black
cairo_set_source_rgb(cr_, pen_color_[0], pen_color_[1], pen_color_[2]);
// If the target surface or transformation properties of the cairo instance
// have changed, update the pango layout to reflect this
pango_cairo_update_layout(cr_, layout_);
{
DISABLE_HEAP_LEAK_CHECK; // for Fontconfig
// Draw the pango layout onto the cairo surface
pango_cairo_show_layout(cr_, layout_);
}
*pix = CairoARGB32ToPixFormat(surface_);
}
ComputeClusterBoxes();
FreePangoCairo();
// Update internal state variables.
++page_;
return page_offset;
}
// Render a string to an image, returning it as an 8 bit pix. Behaves as
// RenderString, except that it ignores the font set at construction and works
// through all the fonts, returning 0 until they are exhausted, at which point
// it returns the value it should have returned all along, but no pix this time.
// Fonts that don't contain a given proportion of the characters in the string
// get skipped.
// Fonts that work each get rendered and the font name gets added
// to the image.
// NOTE that no boxes are produced by this function.
//
// Example usage: To render a null terminated char-array "txt"
//
// int offset = 0;
// do {
// Image pix;
// offset += renderer.RenderAllFontsToImage(min_proportion, txt + offset,
// strlen(txt + offset), nullptr,
// &pix);
// ...
// } while (offset < strlen(text));
//
int StringRenderer::RenderAllFontsToImage(double min_coverage, const char *text, int text_length,
std::string *font_used, Image *image) {
*image = nullptr;
// Select a suitable font to render the title with.
const char kTitleTemplate[] = "%s : %d hits = %.2f%%, raw = %d = %.2f%%";
std::string title_font;
if (!FontUtils::SelectFont(kTitleTemplate, strlen(kTitleTemplate), &title_font, nullptr)) {
tprintf("WARNING: Could not find a font to render image title with!\n");
title_font = "Arial";
}
title_font += " 8";
tlog(1, "Selected title font: %s\n", title_font.c_str());
if (font_used) {
font_used->clear();
}
std::string orig_font = font_.DescriptionName();
if (char_map_.empty()) {
total_chars_ = 0;
// Fill the hash table and use that for computing which fonts to use.
for (UNICHAR::const_iterator it = UNICHAR::begin(text, text_length);
it != UNICHAR::end(text, text_length); ++it) {
++total_chars_;
++char_map_[*it];
}
tprintf("Total chars = %d\n", total_chars_);
}
const std::vector<std::string> &all_fonts = FontUtils::ListAvailableFonts();
for (size_t i = font_index_; i < all_fonts.size(); ++i) {
++font_index_;
int raw_score = 0;
int ok_chars = FontUtils::FontScore(char_map_, all_fonts[i], &raw_score, nullptr);
if (ok_chars > 0 && ok_chars >= total_chars_ * min_coverage) {
set_font(all_fonts[i]);
int offset = RenderToBinaryImage(text, text_length, 128, image);
ClearBoxes(); // Get rid of them as they are garbage.
const int kMaxTitleLength = 1024;
char title[kMaxTitleLength];
snprintf(title, kMaxTitleLength, kTitleTemplate, all_fonts[i].c_str(), ok_chars,
100.0 * ok_chars / total_chars_, raw_score, 100.0 * raw_score / char_map_.size());
tprintf("%s\n", title);
// This is a good font! Store the offset to return once we've tried all
// the fonts.
if (offset) {
last_offset_ = offset;
if (font_used) {
*font_used = all_fonts[i];
}
}
// Add the font to the image.
set_font(title_font);
v_margin_ /= 8;
Image title_image = nullptr;
RenderToBinaryImage(title, strlen(title), 128, &title_image);
*image |= title_image;
title_image.destroy();
v_margin_ *= 8;
set_font(orig_font);
// We return the real offset only after cycling through the list of fonts.
return 0;
} else {
tprintf("Font %s failed with %d hits = %.2f%%\n", all_fonts[i].c_str(), ok_chars,
100.0 * ok_chars / total_chars_);
}
}
font_index_ = 0;
char_map_.clear();
return last_offset_ == 0 ? -1 : last_offset_;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/pango/stringrenderer.cpp
|
C++
|
apache-2.0
| 32,792
|
/**********************************************************************
* File: stringrenderer.h
* Description: Class for rendering UTF-8 text to an image, and retrieving
* bounding boxes around each grapheme cluster.
*
* Instances are created using a font description string
* (eg. "Arial Italic 12"; see pango_font_info.h for the format)
* and the page dimensions. Other renderer properties such as
* spacing, ligaturization, as well a preprocessing behavior such
* as removal of unrenderable words and a special n-gram mode may
* be set using respective set_* methods.
*
* Author: Ranjith Unnikrishnan
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_STRINGRENDERER_H_
#define TESSERACT_TRAINING_STRINGRENDERER_H_
#include "export.h"
#include "pango/pango-layout.h"
#include "pango/pangocairo.h"
#include "pango_font_info.h"
#include "image.h"
#include <string>
#include <unordered_map>
#include <vector>
struct Boxa;
struct Pix;
namespace tesseract {
class BoxChar;
class TESS_PANGO_TRAINING_API StringRenderer {
public:
StringRenderer(const std::string &font_desc, int page_width, int page_height);
~StringRenderer();
// Renders the text with the chosen font and returns the byte offset up to
// which the text could be rendered so as to fit the specified page
// dimensions.
int RenderToImage(const char *text, int text_length, Image *pix);
int RenderToGrayscaleImage(const char *text, int text_length, Image *pix);
int RenderToBinaryImage(const char *text, int text_length, int threshold, Image *pix);
// Renders a line of text with all available fonts that were able to render
// at least min_coverage fraction of the input text. Use 1.0 to require that
// a font be able to render all the text.
int RenderAllFontsToImage(double min_coverage, const char *text, int text_length,
std::string *font_used, Image *pix);
bool set_font(const std::string &desc);
// Char spacing is in PIXELS!!!!.
void set_char_spacing(int char_spacing) {
char_spacing_ = char_spacing;
}
void set_leading(int leading) {
leading_ = leading;
}
void set_resolution(const int resolution);
void set_vertical_text(bool vertical_text) {
vertical_text_ = vertical_text;
}
void set_gravity_hint_strong(bool gravity_hint_strong) {
gravity_hint_strong_ = gravity_hint_strong;
}
void set_render_fullwidth_latin(bool render_fullwidth_latin) {
render_fullwidth_latin_ = render_fullwidth_latin;
}
// Sets the probability (value in [0, 1]) of starting to render a word with an
// underline. This implementation consider words to be space-delimited
// sequences of characters.
void set_underline_start_prob(const double frac);
// Set the probability (value in [0, 1]) of continuing a started underline to
// the next word.
void set_underline_continuation_prob(const double frac);
void set_underline_style(const PangoUnderline style) {
underline_style_ = style;
}
void set_features(const char *features) {
features_ = features;
}
void set_page(int page) {
page_ = page;
}
void set_box_padding(int val) {
box_padding_ = val;
}
void set_drop_uncovered_chars(bool val) {
drop_uncovered_chars_ = val;
}
void set_strip_unrenderable_words(bool val) {
strip_unrenderable_words_ = val;
}
void set_output_word_boxes(bool val) {
output_word_boxes_ = val;
}
// Before rendering the string, replace latin characters with their optional
// ligatured forms (such as "fi", "ffi" etc.) if the font_ covers those
// unicodes.
void set_add_ligatures(bool add_ligatures) {
add_ligatures_ = add_ligatures;
}
// Set the rgb value of the text ink. Values range in [0, 1.0]
void set_pen_color(double r, double g, double b) {
pen_color_[0] = r;
pen_color_[1] = g;
pen_color_[2] = b;
}
void set_h_margin(const int h_margin) {
h_margin_ = h_margin;
}
void set_v_margin(const int v_margin) {
v_margin_ = v_margin;
}
const PangoFontInfo &font() const {
return font_;
}
int h_margin() const {
return h_margin_;
}
int v_margin() const {
return v_margin_;
}
// Get the boxchars of all clusters rendered thus far (or since the last call
// to ClearBoxes()).
const std::vector<BoxChar *> &GetBoxes() const;
// Get the rendered page bounding boxes of all pages created thus far (or
// since last call to ClearBoxes()).
Boxa *GetPageBoxes() const;
// Rotate the boxes on the most recent page by the given rotation.
void RotatePageBoxes(float rotation);
// Delete all boxes.
void ClearBoxes();
// Returns the boxes in a boxfile string.
std::string GetBoxesStr();
// Writes the boxes to a boxfile.
void WriteAllBoxes(const std::string &filename);
// Removes space-delimited words from the string that are not renderable by
// the current font and returns the count of such words.
int StripUnrenderableWords(std::string *utf8_text) const;
// Insert a Word Joiner symbol (U+2060) between adjacent characters, excluding
// spaces and combining types, in each word before rendering to ensure words
// are not broken across lines. The output boxchars will not contain the
// joiner.
static std::string InsertWordJoiners(const std::string &text);
// Helper functions to convert fullwidth Latin and halfwidth Basic Latin.
static std::string ConvertBasicLatinToFullwidthLatin(const std::string &text);
static std::string ConvertFullwidthLatinToBasicLatin(const std::string &text);
protected:
// Init and free local renderer objects.
void InitPangoCairo();
void FreePangoCairo();
// Set rendering properties.
void SetLayoutProperties();
void SetWordUnderlineAttributes(const std::string &page_text);
// Compute bounding boxes around grapheme clusters.
void ComputeClusterBoxes();
void CorrectBoxPositionsToLayout(std::vector<BoxChar *> *boxchars);
bool GetClusterStrings(std::vector<std::string> *cluster_text);
int FindFirstPageBreakOffset(const char *text, int text_length);
PangoFontInfo font_;
// Page properties
int page_width_, page_height_, h_margin_, v_margin_;
// Text rendering properties
double pen_color_[3];
int char_spacing_;
int leading_, resolution_;
bool vertical_text_;
bool gravity_hint_strong_;
bool render_fullwidth_latin_;
double underline_start_prob_;
double underline_continuation_prob_;
PangoUnderline underline_style_;
std::string features_;
// Text filtering options
bool drop_uncovered_chars_;
bool strip_unrenderable_words_;
bool add_ligatures_;
bool output_word_boxes_;
// Pango and cairo specific objects
cairo_surface_t *surface_;
cairo_t *cr_;
PangoLayout *layout_;
// Internal state of current page number, updated on successive calls to
// RenderToImage()
int start_box_;
int page_;
// Boxes and associated text for all pages rendered with RenderToImage() since
// the last call to ClearBoxes().
std::vector<BoxChar *> boxchars_;
int box_padding_;
// Bounding boxes for pages since the last call to ClearBoxes().
Boxa *page_boxes_;
// Objects cached for subsequent calls to RenderAllFontsToImage()
std::unordered_map<char32, int64_t> char_map_; // Time-saving char histogram.
int total_chars_; // Number in the string to be rendered.
unsigned int font_index_; // Index of next font to use in font list.
int last_offset_; // Offset returned from last successful rendering
private:
StringRenderer(const StringRenderer &) = delete;
void operator=(const StringRenderer &) = delete;
};
} // namespace tesseract
#endif // THIRD_PARTY_TESSERACT_TRAINING_STRINGRENDERER_H_
|
2301_81045437/tesseract
|
src/training/pango/stringrenderer.h
|
C++
|
apache-2.0
| 8,462
|
/**********************************************************************
* File: tlog.cpp
* Description: Variant of printf with logging level controllable by a
* commandline flag.
* Author: Ranjith Unnikrishnan
* Created: Wed Nov 20 2013
*
* (C) Copyright 2013, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "tlog.h"
using namespace tesseract;
INT_PARAM_FLAG(tlog_level, 0, "Minimum logging level for tlog() output");
|
2301_81045437/tesseract
|
src/training/pango/tlog.cpp
|
C++
|
apache-2.0
| 1,061
|
/**********************************************************************
* File: tlog.h
* Description: Variant of printf with logging level controllable by a
* commandline flag.
* Author: Ranjith Unnikrishnan
* Created: Wed Nov 20 2013
*
* (C) Copyright 2013, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_TLOG_H_
#define TESSERACT_TRAINING_TLOG_H_
#include "export.h"
#include "commandlineflags.h"
#include "errcode.h"
#include "tprintf.h"
TESS_PANGO_TRAINING_API
DECLARE_INT_PARAM_FLAG(tlog_level);
// Variant guarded by the numeric logging level parameter FLAGS_tlog_level
// (default 0). Code using ParseCommandLineFlags() can control its value using
// the --tlog_level commandline argument. Otherwise it must be specified in a
// config file like other params.
#define tlog(level, ...) \
{ \
if (FLAGS_tlog_level >= level) { \
tprintf(__VA_ARGS__); \
} \
}
#define TLOG_IS_ON(level) (FLAGS_tlog_level >= level)
#endif // TESSERACT_TRAINING_TLOG_H_
|
2301_81045437/tesseract
|
src/training/pango/tlog.h
|
C
|
apache-2.0
| 1,722
|
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This program reads a unicharset file, puts the result in a UNICHARSET
// object, fills it with properties about the unichars it contains and writes
// the result back to a file.
#include "commandlineflags.h"
#include "commontraining.h" // CheckSharedLibraryVersion
#include "tprintf.h"
#include "unicharset_training_utils.h"
using namespace tesseract;
// The directory that is searched for universal script unicharsets.
static STRING_PARAM_FLAG(script_dir, "", "Directory name for input script unicharsets/xheights");
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
tesseract::ParseCommandLineFlags(argv[0], &argc, &argv, true);
// Check validity of input flags.
if (FLAGS_U.empty() || FLAGS_O.empty()) {
tprintf("Specify both input and output unicharsets!\n");
return EXIT_FAILURE;
}
if (FLAGS_script_dir.empty()) {
tprintf("Must specify a script_dir!\n");
return EXIT_FAILURE;
}
tesseract::SetPropertiesForInputFile(FLAGS_script_dir.c_str(), FLAGS_U.c_str(), FLAGS_O.c_str(),
FLAGS_X.c_str());
return EXIT_SUCCESS;
}
|
2301_81045437/tesseract
|
src/training/set_unicharset_properties.cpp
|
C++
|
apache-2.0
| 1,673
|
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Filename: shapeclustering.cpp
// Purpose: Generates a master shape table to merge similarly-shaped
// training data of whole, partial or multiple characters.
// Author: Ray Smith
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "commontraining.h"
#include "mastertrainer.h"
#include "params.h"
using namespace tesseract;
static INT_PARAM_FLAG(display_cloud_font, -1, "Display cloud of this font, canonical_class1");
static INT_PARAM_FLAG(display_canonical_font, -1,
"Display canonical sample of this font, canonical_class2");
static STRING_PARAM_FLAG(canonical_class1, "", "Class to show ambigs for");
static STRING_PARAM_FLAG(canonical_class2, "", "Class to show ambigs for");
// Loads training data, if requested displays debug information, otherwise
// creates the master shape table by shape clustering and writes it to a file.
// If FLAGS_display_cloud_font is set, then the cloud features of
// FLAGS_canonical_class1/FLAGS_display_cloud_font are shown in green ON TOP
// OF the red canonical features of FLAGS_canonical_class2/
// FLAGS_display_canonical_font, so as to show which canonical features are
// NOT in the cloud.
// Otherwise, if FLAGS_canonical_class1 is set, prints a table of font-wise
// cluster distances between FLAGS_canonical_class1 and FLAGS_canonical_class2.
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
ParseArguments(&argc, &argv);
std::string file_prefix;
auto trainer = tesseract::LoadTrainingData(argv + 1, false, nullptr, file_prefix);
if (!trainer) {
return EXIT_FAILURE;
}
if (FLAGS_display_cloud_font >= 0) {
#ifndef GRAPHICS_DISABLED
trainer->DisplaySamples(FLAGS_canonical_class1.c_str(), FLAGS_display_cloud_font,
FLAGS_canonical_class2.c_str(), FLAGS_display_canonical_font);
#endif // !GRAPHICS_DISABLED
return EXIT_SUCCESS;
} else if (!FLAGS_canonical_class1.empty()) {
trainer->DebugCanonical(FLAGS_canonical_class1.c_str(), FLAGS_canonical_class2.c_str());
return EXIT_SUCCESS;
}
trainer->SetupMasterShapes();
WriteShapeTable(file_prefix, trainer->master_shapes());
return EXIT_SUCCESS;
} /* main */
|
2301_81045437/tesseract
|
src/training/shapeclustering.cpp
|
C++
|
apache-2.0
| 2,851
|
/**********************************************************************
* File: text2image.cpp
* Description: Program to generate OCR training pages. Given a text file it
* outputs an image with a given font and degradation.
*
* Note that since the results depend on the fonts available on
* your system, running the code on a different machine, or
* different OS, or even at a different time on the same machine,
* may produce different fonts even if --font is given explicitly.
* To see names of available fonts, use --list_available_fonts with
* the appropriate --fonts_dir path.
* Specifying --use_only_legacy_fonts will restrict the available
* fonts to those listed in legacy_fonts.h
* Authors: Ranjith Unnikrishnan, Ray Smith
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "boxchar.h"
#include "commandlineflags.h"
#include "commontraining.h" // CheckSharedLibraryVersion
#include "degradeimage.h"
#include "errcode.h"
#include "fileio.h"
#include "helpers.h"
#include "normstrngs.h"
#include "stringrenderer.h"
#include "tlog.h"
#include "unicharset.h"
#include <allheaders.h> // from leptonica
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <map>
#include <random>
#include <string>
#include <utility>
#include <vector>
#ifdef _MSC_VER
# define putenv(s) _putenv(s)
#endif
using namespace tesseract;
// A number with which to initialize the random number generator.
const int kRandomSeed = 0x18273645;
// The text input file.
static STRING_PARAM_FLAG(text, "", "File name of text input to process");
// The text output file.
static STRING_PARAM_FLAG(outputbase, "", "Basename for output image/box file");
// Degrade the rendered image to mimic scanner quality.
static BOOL_PARAM_FLAG(degrade_image, true,
"Degrade rendered image with speckle noise, dilation/erosion "
"and rotation");
// Rotate the rendered image to have more realistic glyph borders
static BOOL_PARAM_FLAG(rotate_image, true, "Rotate the image in a random way.");
// Degradation to apply to the image.
static INT_PARAM_FLAG(exposure, 0, "Exposure level in photocopier");
// Distort the rendered image by various means according to the bool flags.
static BOOL_PARAM_FLAG(distort_image, false, "Degrade rendered image with noise, blur, invert.");
// Distortion to apply to the image.
static BOOL_PARAM_FLAG(invert, true, "Invert the image");
// Distortion to apply to the image.
static BOOL_PARAM_FLAG(white_noise, true, "Add Gaussian Noise");
// Distortion to apply to the image.
static BOOL_PARAM_FLAG(smooth_noise, true, "Smoothen Noise");
// Distortion to apply to the image.
static BOOL_PARAM_FLAG(blur, true, "Blur the image");
#if 0
// Distortion to apply to the image.
static BOOL_PARAM_FLAG(perspective, false, "Generate Perspective Distortion");
// Distortion to apply to the image.
static INT_PARAM_FLAG(box_reduction, 0, "Integer reduction factor box_scale");
#endif
// Output image resolution.
static INT_PARAM_FLAG(resolution, 300, "Pixels per inch");
// Width of output image (in pixels).
static INT_PARAM_FLAG(xsize, 3600, "Width of output image");
// Max height of output image (in pixels).
static INT_PARAM_FLAG(ysize, 4800, "Height of output image");
// Max number of pages to produce.
static INT_PARAM_FLAG(max_pages, 0, "Maximum number of pages to output (0=unlimited)");
// Margin around text (in pixels).
static INT_PARAM_FLAG(margin, 100, "Margin round edges of image");
// Size of text (in points).
static INT_PARAM_FLAG(ptsize, 12, "Size of printed text");
// Inter-character space (in ems).
static DOUBLE_PARAM_FLAG(char_spacing, 0, "Inter-character space in ems");
// Sets the probability (value in [0, 1]) of starting to render a word with an
// underline. Words are assumed to be space-delimited.
static DOUBLE_PARAM_FLAG(underline_start_prob, 0,
"Fraction of words to underline (value in [0,1])");
// Set the probability (value in [0, 1]) of continuing a started underline to
// the next word.
static DOUBLE_PARAM_FLAG(underline_continuation_prob, 0,
"Fraction of words to underline (value in [0,1])");
// Inter-line space (in pixels).
static INT_PARAM_FLAG(leading, 12, "Inter-line space (in pixels)");
// Layout and glyph orientation on rendering.
static STRING_PARAM_FLAG(writing_mode, "horizontal",
"Specify one of the following writing"
" modes.\n"
"'horizontal' : Render regular horizontal text. (default)\n"
"'vertical' : Render vertical text. Glyph orientation is"
" selected by Pango.\n"
"'vertical-upright' : Render vertical text. Glyph "
" orientation is set to be upright.");
static INT_PARAM_FLAG(box_padding, 0, "Padding around produced bounding boxes");
static BOOL_PARAM_FLAG(strip_unrenderable_words, true,
"Remove unrenderable words from source text");
// Font name.
static STRING_PARAM_FLAG(font, "Arial", "Font description name to use");
static BOOL_PARAM_FLAG(ligatures, false, "Rebuild and render ligatures");
static BOOL_PARAM_FLAG(find_fonts, false, "Search for all fonts that can render the text");
static BOOL_PARAM_FLAG(render_per_font, true,
"If find_fonts==true, render each font to its own image. "
"Image filenames are of the form output_name.font_name.tif");
static DOUBLE_PARAM_FLAG(min_coverage, 1.0,
"If find_fonts==true, the minimum coverage the font has of "
"the characters in the text file to include it, between "
"0 and 1.");
static BOOL_PARAM_FLAG(list_available_fonts, false, "List available fonts and quit.");
static BOOL_PARAM_FLAG(render_ngrams, false,
"Put each space-separated entity from the"
" input file into one bounding box. The ngrams in the input"
" file will be randomly permuted before rendering (so that"
" there is sufficient variety of characters on each line).");
static BOOL_PARAM_FLAG(output_word_boxes, false,
"Output word bounding boxes instead of character boxes. "
"This is used for Cube training, and implied by "
"--render_ngrams.");
static STRING_PARAM_FLAG(unicharset_file, "",
"File with characters in the unicharset. If --render_ngrams"
" is true and --unicharset_file is specified, ngrams with"
" characters that are not in unicharset will be omitted");
static BOOL_PARAM_FLAG(bidirectional_rotation, false, "Rotate the generated characters both ways.");
static BOOL_PARAM_FLAG(only_extract_font_properties, false,
"Assumes that the input file contains a list of ngrams. Renders"
" each ngram, extracts spacing properties and records them in"
" output_base/[font_name].fontinfo file.");
// Use these flags to output zero-padded, square individual character images
static BOOL_PARAM_FLAG(output_individual_glyph_images, false,
"If true also outputs individual character images");
static INT_PARAM_FLAG(glyph_resized_size, 0,
"Each glyph is square with this side length in pixels");
static INT_PARAM_FLAG(glyph_num_border_pixels_to_pad, 0,
"Final_size=glyph_resized_size+2*glyph_num_border_pixels_to_pad");
namespace tesseract {
struct SpacingProperties {
SpacingProperties() : x_gap_before(0), x_gap_after(0) {}
SpacingProperties(int b, int a) : x_gap_before(b), x_gap_after(a) {}
// These values are obtained from FT_Glyph_Metrics struct
// used by the FreeType font engine.
int x_gap_before; // horizontal x bearing
int x_gap_after; // horizontal advance - x_gap_before - width
std::map<std::string, int> kerned_x_gaps;
};
static bool IsWhitespaceBox(const BoxChar *boxchar) {
return (boxchar->box() == nullptr || SpanUTF8Whitespace(boxchar->ch().c_str()));
}
static std::string StringReplace(const std::string &in, const std::string &oldsub,
const std::string &newsub) {
std::string out;
size_t start_pos = 0, pos;
while ((pos = in.find(oldsub, start_pos)) != std::string::npos) {
out.append(in.data() + start_pos, pos - start_pos);
out.append(newsub.data(), newsub.length());
start_pos = pos + oldsub.length();
}
out.append(in.data() + start_pos, in.length() - start_pos);
return out;
}
// Assumes that each word (whitespace-separated entity) in text is a bigram.
// Renders the bigrams and calls FontInfo::GetSpacingProperties() to
// obtain spacing information. Produces the output .fontinfo file with a line
// per unichar of the form:
// unichar space_before space_after kerned1 kerned_space1 kerned2 ...
// Fox example, if unichar "A" has spacing of 0 pixels before and -1 pixels
// after, is kerned with "V" resulting in spacing of "AV" to be -7 and kerned
// with "T", such that "AT" has spacing of -5, the entry/line for unichar "A"
// in .fontinfo file will be:
// A 0 -1 T -5 V -7
static void ExtractFontProperties(const std::string &utf8_text, StringRenderer *render,
const std::string &output_base) {
std::map<std::string, SpacingProperties> spacing_map;
std::map<std::string, SpacingProperties>::iterator spacing_map_it0;
std::map<std::string, SpacingProperties>::iterator spacing_map_it1;
int x_bearing, x_advance;
int len = utf8_text.length();
int offset = 0;
const char *text = utf8_text.c_str();
while (offset < len) {
offset += render->RenderToImage(text + offset, strlen(text + offset), nullptr);
const std::vector<BoxChar *> &boxes = render->GetBoxes();
// If the page break split a bigram, correct the offset so we try the bigram
// on the next iteration.
if (boxes.size() > 2 && !IsWhitespaceBox(boxes[boxes.size() - 1]) &&
IsWhitespaceBox(boxes[boxes.size() - 2])) {
if (boxes.size() > 3) {
tprintf("WARNING: Adjusting to bad page break after '%s%s'\n",
boxes[boxes.size() - 4]->ch().c_str(), boxes[boxes.size() - 3]->ch().c_str());
}
offset -= boxes[boxes.size() - 1]->ch().size();
}
for (size_t b = 0; b < boxes.size(); b += 2) {
while (b < boxes.size() && IsWhitespaceBox(boxes[b])) {
++b;
}
if (b + 1 >= boxes.size()) {
break;
}
const std::string &ch0 = boxes[b]->ch();
// We encountered a ligature. This happens in at least two scenarios:
// One is when the rendered bigram forms a grapheme cluster (eg. the
// second character in the bigram is a combining vowel), in which case we
// correctly output only one bounding box.
// A second far less frequent case is when caused some fonts like 'DejaVu
// Sans Ultra-Light' force Pango to render a ligatured character even if
// the input consists of the separated characters. NOTE(ranjith): As per
// behdad@ this is not currently controllable at the level of the Pango
// API.
// The most frequent of all is a single character "word" made by the CJK
// segmenter.
// Safeguard against these cases here by just skipping the bigram.
if (IsWhitespaceBox(boxes[b + 1])) {
continue;
}
int xgap = (boxes[b + 1]->box()->x - (boxes[b]->box()->x + boxes[b]->box()->w));
spacing_map_it0 = spacing_map.find(ch0);
int ok_count = 0;
if (spacing_map_it0 == spacing_map.end() &&
render->font().GetSpacingProperties(ch0, &x_bearing, &x_advance)) {
spacing_map[ch0] = SpacingProperties(x_bearing, x_advance - x_bearing - boxes[b]->box()->w);
spacing_map_it0 = spacing_map.find(ch0);
++ok_count;
}
const std::string &ch1 = boxes[b + 1]->ch();
tlog(3, "%s%s\n", ch0.c_str(), ch1.c_str());
spacing_map_it1 = spacing_map.find(ch1);
if (spacing_map_it1 == spacing_map.end() &&
render->font().GetSpacingProperties(ch1, &x_bearing, &x_advance)) {
spacing_map[ch1] =
SpacingProperties(x_bearing, x_advance - x_bearing - boxes[b + 1]->box()->w);
spacing_map_it1 = spacing_map.find(ch1);
++ok_count;
}
if (ok_count == 2 &&
xgap != (spacing_map_it0->second.x_gap_after + spacing_map_it1->second.x_gap_before)) {
spacing_map_it0->second.kerned_x_gaps[ch1] = xgap;
}
}
render->ClearBoxes();
}
std::string output_string;
const int kBufSize = 1024;
char buf[kBufSize];
snprintf(buf, kBufSize, "%d\n", static_cast<int>(spacing_map.size()));
output_string.append(buf);
std::map<std::string, SpacingProperties>::const_iterator spacing_map_it;
for (spacing_map_it = spacing_map.begin(); spacing_map_it != spacing_map.end();
++spacing_map_it) {
snprintf(buf, kBufSize, "%s %d %d %d", spacing_map_it->first.c_str(),
spacing_map_it->second.x_gap_before, spacing_map_it->second.x_gap_after,
static_cast<int>(spacing_map_it->second.kerned_x_gaps.size()));
output_string.append(buf);
std::map<std::string, int>::const_iterator kern_it;
for (kern_it = spacing_map_it->second.kerned_x_gaps.begin();
kern_it != spacing_map_it->second.kerned_x_gaps.end(); ++kern_it) {
snprintf(buf, kBufSize, " %s %d", kern_it->first.c_str(), kern_it->second);
output_string.append(buf);
}
output_string.append("\n");
}
File::WriteStringToFileOrDie(output_string, output_base + ".fontinfo");
}
static bool MakeIndividualGlyphs(Image pix, const std::vector<BoxChar *> &vbox,
const int input_tiff_page) {
// If checks fail, return false without exiting text2image
if (!pix) {
tprintf("ERROR: MakeIndividualGlyphs(): Input Pix* is nullptr\n");
return false;
} else if (FLAGS_glyph_resized_size <= 0) {
tprintf("ERROR: --glyph_resized_size must be positive\n");
return false;
} else if (FLAGS_glyph_num_border_pixels_to_pad < 0) {
tprintf("ERROR: --glyph_num_border_pixels_to_pad must be 0 or positive\n");
return false;
}
const int n_boxes = vbox.size();
int n_boxes_saved = 0;
int current_tiff_page = 0;
int y_previous = 0;
static int glyph_count = 0;
for (int i = 0; i < n_boxes; i++) {
// Get one bounding box
Box *b = vbox[i]->mutable_box();
if (!b) {
continue;
}
const int x = b->x;
const int y = b->y;
const int w = b->w;
const int h = b->h;
// Check present tiff page (for multipage tiff)
if (y < y_previous - pixGetHeight(pix) / 10) {
tprintf("ERROR: Wrap-around encountered, at i=%d\n", i);
current_tiff_page++;
}
if (current_tiff_page < input_tiff_page) {
continue;
} else if (current_tiff_page > input_tiff_page) {
break;
}
// Check box validity
if (x < 0 || y < 0 || (x + w - 1) >= pixGetWidth(pix) || (y + h - 1) >= pixGetHeight(pix)) {
tprintf(
"ERROR: MakeIndividualGlyphs(): Index out of range, at i=%d"
" (x=%d, y=%d, w=%d, h=%d\n)",
i, x, y, w, h);
continue;
} else if (w < FLAGS_glyph_num_border_pixels_to_pad &&
h < FLAGS_glyph_num_border_pixels_to_pad) {
tprintf("ERROR: Input image too small to be a character, at i=%d\n", i);
continue;
}
// Crop the boxed character
Image pix_glyph = pixClipRectangle(pix, b, nullptr);
if (!pix_glyph) {
tprintf("ERROR: MakeIndividualGlyphs(): Failed to clip, at i=%d\n", i);
continue;
}
// Resize to square
Image pix_glyph_sq =
pixScaleToSize(pix_glyph, FLAGS_glyph_resized_size, FLAGS_glyph_resized_size);
if (!pix_glyph_sq) {
tprintf("ERROR: MakeIndividualGlyphs(): Failed to resize, at i=%d\n", i);
continue;
}
// Zero-pad
Image pix_glyph_sq_pad = pixAddBorder(pix_glyph_sq, FLAGS_glyph_num_border_pixels_to_pad, 0);
if (!pix_glyph_sq_pad) {
tprintf("ERROR: MakeIndividualGlyphs(): Failed to zero-pad, at i=%d\n", i);
continue;
}
// Write out
Image pix_glyph_sq_pad_8 = pixConvertTo8(pix_glyph_sq_pad, false);
char filename[1024];
snprintf(filename, 1024, "%s_%d.jpg", FLAGS_outputbase.c_str(), glyph_count++);
if (pixWriteJpeg(filename, pix_glyph_sq_pad_8, 100, 0)) {
tprintf(
"ERROR: MakeIndividualGlyphs(): Failed to write JPEG to %s,"
" at i=%d\n",
filename, i);
continue;
}
pix_glyph.destroy();
pix_glyph_sq.destroy();
pix_glyph_sq_pad.destroy();
pix_glyph_sq_pad_8.destroy();
n_boxes_saved++;
y_previous = y;
}
if (n_boxes_saved == 0) {
return false;
} else {
tprintf("Total number of characters saved = %d\n", n_boxes_saved);
return true;
}
}
} // namespace tesseract
using tesseract::DegradeImage;
using tesseract::ExtractFontProperties;
using tesseract::File;
using tesseract::FontUtils;
using tesseract::SpanUTF8NotWhitespace;
using tesseract::SpanUTF8Whitespace;
using tesseract::StringRenderer;
static int Main() {
if (FLAGS_list_available_fonts) {
const std::vector<std::string> &all_fonts = FontUtils::ListAvailableFonts();
for (unsigned int i = 0; i < all_fonts.size(); ++i) {
// Remove trailing comma: pango-font-description-to-string adds a comma
// to some fonts.
// See https://github.com/tesseract-ocr/tesseract/issues/408
std::string font_name(all_fonts[i].c_str());
if (font_name.back() == ',') {
font_name.pop_back();
}
printf("%3u: %s\n", i, font_name.c_str());
ASSERT_HOST_MSG(FontUtils::IsAvailableFont(all_fonts[i].c_str()),
"Font %s is unrecognized.\n", all_fonts[i].c_str());
}
return EXIT_SUCCESS;
}
// Check validity of input flags.
if (FLAGS_text.empty()) {
tprintf("'--text' option is missing!\n");
return EXIT_FAILURE;
}
if (FLAGS_outputbase.empty()) {
tprintf("'--outputbase' option is missing!\n");
return EXIT_FAILURE;
}
if (!FLAGS_unicharset_file.empty() && FLAGS_render_ngrams) {
tprintf("Use '--unicharset_file' only if '--render_ngrams' is set.\n");
return EXIT_FAILURE;
}
std::string font_name = FLAGS_font.c_str();
if (!FLAGS_find_fonts && !FontUtils::IsAvailableFont(font_name.c_str())) {
font_name += ',';
std::string pango_name;
if (!FontUtils::IsAvailableFont(font_name.c_str(), &pango_name)) {
tprintf("Could not find font named '%s'.\n", FLAGS_font.c_str());
if (!pango_name.empty()) {
tprintf("Pango suggested font '%s'.\n", pango_name.c_str());
}
tprintf("Please correct --font arg.\n");
return EXIT_FAILURE;
}
}
if (FLAGS_render_ngrams) {
FLAGS_output_word_boxes = true;
}
char font_desc_name[1024];
snprintf(font_desc_name, 1024, "%s %d", font_name.c_str(), static_cast<int>(FLAGS_ptsize));
StringRenderer render(font_desc_name, FLAGS_xsize, FLAGS_ysize);
render.set_add_ligatures(FLAGS_ligatures);
render.set_leading(FLAGS_leading);
render.set_resolution(FLAGS_resolution);
render.set_char_spacing(FLAGS_char_spacing * FLAGS_ptsize);
render.set_h_margin(FLAGS_margin);
render.set_v_margin(FLAGS_margin);
render.set_output_word_boxes(FLAGS_output_word_boxes);
render.set_box_padding(FLAGS_box_padding);
render.set_strip_unrenderable_words(FLAGS_strip_unrenderable_words);
render.set_underline_start_prob(FLAGS_underline_start_prob);
render.set_underline_continuation_prob(FLAGS_underline_continuation_prob);
// Set text rendering orientation and their forms.
if (FLAGS_writing_mode == "horizontal") {
// Render regular horizontal text (default).
render.set_vertical_text(false);
render.set_gravity_hint_strong(false);
render.set_render_fullwidth_latin(false);
} else if (FLAGS_writing_mode == "vertical") {
// Render vertical text. Glyph orientation is selected by Pango.
render.set_vertical_text(true);
render.set_gravity_hint_strong(false);
render.set_render_fullwidth_latin(false);
} else if (FLAGS_writing_mode == "vertical-upright") {
// Render vertical text. Glyph orientation is set to be upright.
// Also Basic Latin characters are converted to their fullwidth forms
// on rendering, since fullwidth Latin characters are well designed to fit
// vertical text lines, while .box files store halfwidth Basic Latin
// unichars.
render.set_vertical_text(true);
render.set_gravity_hint_strong(true);
render.set_render_fullwidth_latin(true);
} else {
tprintf("Invalid writing mode: %s\n", FLAGS_writing_mode.c_str());
return EXIT_FAILURE;
}
std::string src_utf8;
// This c_str is NOT redundant!
if (!File::ReadFileToString(FLAGS_text.c_str(), &src_utf8)) {
tprintf("Failed to read file: %s\n", FLAGS_text.c_str());
return EXIT_FAILURE;
}
// Remove the unicode mark if present.
if (strncmp(src_utf8.c_str(), "\xef\xbb\xbf", 3) == 0) {
src_utf8.erase(0, 3);
}
tlog(1, "Render string of size %zu\n", src_utf8.length());
if (FLAGS_render_ngrams || FLAGS_only_extract_font_properties) {
// Try to preserve behavior of old text2image by expanding inter-word
// spaces by a factor of 4.
const std::string kSeparator = FLAGS_render_ngrams ? " " : " ";
// Also restrict the number of characters per line to try and avoid
// line-breaking in the middle of words like "-A", "R$" etc. which are
// otherwise allowed by the standard unicode line-breaking rules.
const unsigned int kCharsPerLine = (FLAGS_ptsize > 20) ? 50 : 100;
std::string rand_utf8;
UNICHARSET unicharset;
if (FLAGS_render_ngrams && !FLAGS_unicharset_file.empty() &&
!unicharset.load_from_file(FLAGS_unicharset_file.c_str())) {
tprintf("Failed to load unicharset from file %s\n", FLAGS_unicharset_file.c_str());
return EXIT_FAILURE;
}
// If we are rendering ngrams that will be OCRed later, shuffle them so that
// tesseract does not have difficulties finding correct baseline, word
// spaces, etc.
const char *str8 = src_utf8.c_str();
int len = src_utf8.length();
int step;
std::vector<std::pair<int, int>> offsets;
int offset = SpanUTF8Whitespace(str8);
while (offset < len) {
step = SpanUTF8NotWhitespace(str8 + offset);
offsets.emplace_back(offset, step);
offset += step;
offset += SpanUTF8Whitespace(str8 + offset);
}
if (FLAGS_render_ngrams) {
std::seed_seq seed{kRandomSeed};
std::mt19937 random_gen(seed);
std::shuffle(offsets.begin(), offsets.end(), random_gen);
}
for (size_t i = 0, line = 1; i < offsets.size(); ++i) {
const char *curr_pos = str8 + offsets[i].first;
int ngram_len = offsets[i].second;
// Skip words that contain characters not in found in unicharset.
std::string cleaned = UNICHARSET::CleanupString(curr_pos, ngram_len);
if (!FLAGS_unicharset_file.empty() &&
!unicharset.encodable_string(cleaned.c_str(), nullptr)) {
continue;
}
rand_utf8.append(curr_pos, ngram_len);
if (rand_utf8.length() > line * kCharsPerLine) {
rand_utf8.append(" \n");
++line;
if (line & 0x1) {
rand_utf8.append(kSeparator);
}
} else {
rand_utf8.append(kSeparator);
}
}
tlog(1, "Rendered ngram string of size %zu\n", rand_utf8.length());
src_utf8.swap(rand_utf8);
}
if (FLAGS_only_extract_font_properties) {
tprintf("Extracting font properties only\n");
ExtractFontProperties(src_utf8, &render, FLAGS_outputbase.c_str());
tprintf("Done!\n");
return EXIT_SUCCESS;
}
int im = 0;
std::vector<float> page_rotation;
const char *to_render_utf8 = src_utf8.c_str();
tesseract::TRand randomizer;
randomizer.set_seed(kRandomSeed);
std::vector<std::string> font_names;
// We use a two pass mechanism to rotate images in both direction.
// The first pass(0) will rotate the images in random directions and
// the second pass(1) will mirror those rotations.
int num_pass = FLAGS_bidirectional_rotation ? 2 : 1;
for (int pass = 0; pass < num_pass; ++pass) {
int page_num = 0;
std::string font_used;
for (size_t offset = 0;
offset < strlen(to_render_utf8) && (FLAGS_max_pages == 0 || page_num < FLAGS_max_pages);
++im, ++page_num) {
tlog(1, "Starting page %d\n", im);
Image pix = nullptr;
if (FLAGS_find_fonts) {
offset += render.RenderAllFontsToImage(FLAGS_min_coverage, to_render_utf8 + offset,
strlen(to_render_utf8 + offset), &font_used, &pix);
} else {
offset +=
render.RenderToImage(to_render_utf8 + offset, strlen(to_render_utf8 + offset), &pix);
}
if (pix != nullptr) {
float rotation = 0;
if (pass == 1) {
// Pass 2, do mirror rotation.
rotation = -1 * page_rotation[page_num];
}
if (FLAGS_degrade_image) {
pix = DegradeImage(pix, FLAGS_exposure, &randomizer,
FLAGS_rotate_image ? &rotation : nullptr);
}
if (FLAGS_distort_image) {
// TODO: perspective is set to false and box_reduction to 1.
pix = PrepareDistortedPix(pix, false, FLAGS_invert, FLAGS_white_noise, FLAGS_smooth_noise,
FLAGS_blur, 1, &randomizer, nullptr);
}
render.RotatePageBoxes(rotation);
if (pass == 0) {
// Pass 1, rotate randomly and store the rotation..
page_rotation.push_back(rotation);
}
Image gray_pix = pixConvertTo8(pix, false);
pix.destroy();
Image binary = pixThresholdToBinary(gray_pix, 128);
gray_pix.destroy();
char tiff_name[1024];
if (FLAGS_find_fonts) {
if (FLAGS_render_per_font) {
std::string fontname_for_file = tesseract::StringReplace(font_used, " ", "_");
snprintf(tiff_name, 1024, "%s.%s.tif", FLAGS_outputbase.c_str(),
fontname_for_file.c_str());
pixWriteTiff(tiff_name, binary, IFF_TIFF_G4, "w");
tprintf("Rendered page %d to file %s\n", im, tiff_name);
} else {
font_names.push_back(font_used);
}
} else {
snprintf(tiff_name, 1024, "%s.tif", FLAGS_outputbase.c_str());
pixWriteTiff(tiff_name, binary, IFF_TIFF_G4, im == 0 ? "w" : "a");
tprintf("Rendered page %d to file %s\n", im, tiff_name);
}
// Make individual glyphs
if (FLAGS_output_individual_glyph_images) {
if (!MakeIndividualGlyphs(binary, render.GetBoxes(), im)) {
tprintf("ERROR: Individual glyphs not saved\n");
}
}
binary.destroy();
}
if (FLAGS_find_fonts && offset != 0) {
// We just want a list of names, or some sample images so we don't need
// to render more than the first page of the text.
break;
}
}
}
if (!FLAGS_find_fonts) {
std::string box_name = FLAGS_outputbase.c_str();
box_name += ".box";
render.WriteAllBoxes(box_name);
} else if (!FLAGS_render_per_font && !font_names.empty()) {
std::string filename = FLAGS_outputbase.c_str();
filename += ".fontlist.txt";
FILE *fp = fopen(filename.c_str(), "wb");
if (fp == nullptr) {
tprintf("Failed to create output font list %s\n", filename.c_str());
} else {
for (auto &font_name : font_names) {
fprintf(fp, "%s\n", font_name.c_str());
}
fclose(fp);
}
}
return EXIT_SUCCESS;
}
int main(int argc, char **argv) {
// Respect environment variable. could be:
// fc (fontconfig), win32, and coretext
// If not set force fontconfig for Mac OS.
// See https://github.com/tesseract-ocr/tesseract/issues/736
char *backend;
backend = getenv("PANGOCAIRO_BACKEND");
if (backend == nullptr) {
static char envstring[] = "PANGOCAIRO_BACKEND=fc";
putenv(envstring);
} else {
printf(
"Using '%s' as pango cairo backend based on environment "
"variable.\n",
backend);
}
tesseract::CheckSharedLibraryVersion();
if (argc > 1) {
if ((strcmp(argv[1], "-v") == 0) || (strcmp(argv[1], "--version") == 0)) {
FontUtils::PangoFontTypeInfo();
printf("Pango version: %s\n", pango_version_string());
}
}
tesseract::ParseCommandLineFlags(argv[0], &argc, &argv, true);
return Main();
}
|
2301_81045437/tesseract
|
src/training/text2image.cpp
|
C++
|
apache-2.0
| 29,584
|
#pragma once
#ifdef CMAKE_BUILD
# include <unicharset_training_export.h>
#endif
|
2301_81045437/tesseract
|
src/training/unicharset/export.h
|
C
|
apache-2.0
| 82
|
/**********************************************************************
* File: fileio.cpp
* Description: File I/O utilities.
* Author: Samuel Charron
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
* by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
**********************************************************************/
#ifdef _WIN32
# ifndef unlink
# include <io.h>
# endif
#else
# include <glob.h>
# include <unistd.h>
#endif
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <string>
#include "errcode.h"
#include "fileio.h"
#include "host.h" // includes windows.h for BOOL, ...
#include "tprintf.h"
namespace tesseract {
///////////////////////////////////////////////////////////////////////////////
// File::
///////////////////////////////////////////////////////////////////////////////
FILE *File::Open(const std::string &filename, const std::string &mode) {
return fopen(filename.c_str(), mode.c_str());
}
FILE *File::OpenOrDie(const std::string &filename, const std::string &mode) {
FILE *stream = fopen(filename.c_str(), mode.c_str());
if (stream == nullptr) {
tprintf("Unable to open '%s' in mode '%s': %s\n", filename.c_str(), mode.c_str(),
strerror(errno));
}
return stream;
}
void File::WriteStringToFileOrDie(const std::string &str, const std::string &filename) {
FILE *stream = fopen(filename.c_str(), "wb");
if (stream == nullptr) {
tprintf("Unable to open '%s' for writing: %s\n", filename.c_str(), strerror(errno));
return;
}
fputs(str.c_str(), stream);
ASSERT_HOST(fclose(stream) == 0);
}
bool File::Readable(const std::string &filename) {
FILE *stream = fopen(filename.c_str(), "rb");
if (stream == nullptr) {
return false;
}
fclose(stream);
return true;
}
bool File::ReadFileToString(const std::string &filename, std::string *out) {
FILE *stream = File::Open(filename, "rb");
if (stream == nullptr) {
return false;
}
InputBuffer in(stream);
*out = "";
in.Read(out);
return in.CloseFile();
}
std::string File::JoinPath(const std::string &prefix, const std::string &suffix) {
return (prefix.empty() || prefix[prefix.size() - 1] == '/') ? prefix + suffix
: prefix + "/" + suffix;
}
bool File::Delete(const char *pathname) {
#if !defined(_WIN32) || defined(__MINGW32__)
const int status = unlink(pathname);
#else
const int status = _unlink(pathname);
#endif
if (status != 0) {
tprintf("ERROR: Unable to delete file '%s$: %s\n", pathname, strerror(errno));
return false;
}
return true;
}
#ifdef _WIN32
bool File::DeleteMatchingFiles(const char *pattern) {
WIN32_FIND_DATA data;
BOOL result = TRUE;
HANDLE handle = FindFirstFile(pattern, &data);
bool all_deleted = true;
if (handle != INVALID_HANDLE_VALUE) {
for (; result; result = FindNextFile(handle, &data)) {
all_deleted &= File::Delete(data.cFileName);
}
FindClose(handle);
}
return all_deleted;
}
#else
bool File::DeleteMatchingFiles(const char *pattern) {
glob_t pglob;
char **paths;
bool all_deleted = true;
if (glob(pattern, 0, nullptr, &pglob) == 0) {
for (paths = pglob.gl_pathv; *paths != nullptr; paths++) {
all_deleted &= File::Delete(*paths);
}
globfree(&pglob);
}
return all_deleted;
}
#endif
///////////////////////////////////////////////////////////////////////////////
// InputBuffer::
///////////////////////////////////////////////////////////////////////////////
InputBuffer::InputBuffer(FILE *stream) : stream_(stream) {}
InputBuffer::InputBuffer(FILE *stream, size_t) : stream_(stream) {}
InputBuffer::~InputBuffer() {
if (stream_ != nullptr) {
fclose(stream_);
}
}
bool InputBuffer::Read(std::string *out) {
char buf[BUFSIZ + 1];
int l;
while ((l = fread(buf, 1, BUFSIZ, stream_)) > 0) {
if (ferror(stream_)) {
clearerr(stream_);
return false;
}
buf[l] = 0;
out->append(buf);
}
return true;
}
bool InputBuffer::CloseFile() {
int ret = fclose(stream_);
stream_ = nullptr;
return ret == 0;
}
///////////////////////////////////////////////////////////////////////////////
// OutputBuffer::
///////////////////////////////////////////////////////////////////////////////
OutputBuffer::OutputBuffer(FILE *stream) : stream_(stream) {}
OutputBuffer::OutputBuffer(FILE *stream, size_t) : stream_(stream) {}
OutputBuffer::~OutputBuffer() {
if (stream_ != nullptr) {
fclose(stream_);
}
}
void OutputBuffer::WriteString(const std::string &str) {
fputs(str.c_str(), stream_);
}
bool OutputBuffer::CloseFile() {
int ret = fclose(stream_);
stream_ = nullptr;
return ret == 0;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/fileio.cpp
|
C++
|
apache-2.0
| 5,219
|
/**********************************************************************
* File: fileio.h
* Description: File I/O utilities.
* Author: Samuel Charron
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
* by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_FILEIO_H_
#define TESSERACT_TRAINING_FILEIO_H_
#include "export.h"
#include "helpers.h" // for split
#include "serialis.h" // for LoadDataFromFile
#include <tesseract/export.h>
#include <cstddef>
#include <cstdio>
#include <string>
namespace tesseract {
// Reads a file as a vector of string.
inline bool LoadFileLinesToStrings(const char *filename, std::vector<std::string> *lines) {
std::vector<char> data;
if (!LoadDataFromFile(filename, &data)) {
return false;
}
// TODO: optimize.
std::string lines_str(&data[0], data.size());
*lines = split(lines_str, '\n');
return true;
}
// A class to manipulate FILE*s.
class TESS_UNICHARSET_TRAINING_API File {
public:
// Try to open the file 'filename' in mode 'mode'.
// Stop the program if it cannot open it.
static FILE *OpenOrDie(const std::string &filename, const std::string &mode);
static FILE *Open(const std::string &filename, const std::string &mode);
// Try to open the file 'filename' and to write 'str' in it.
// Stop the program if it fails.
static void WriteStringToFileOrDie(const std::string &str, const std::string &filename);
// Return true if the file 'filename' is readable.
static bool Readable(const std::string &filename);
static bool ReadFileToString(const std::string &filename, std::string *out);
// Helper methods
// Concatenate file paths removing any extra intervening '/' symbols.
static std::string JoinPath(const std::string &prefix, const std::string &suffix);
// Delete a filename or all filenames matching a glob pattern.
static bool Delete(const char *pathname);
static bool DeleteMatchingFiles(const char *pattern);
};
// A class to manipulate Files for reading.
class TESS_UNICHARSET_TRAINING_API InputBuffer {
public:
explicit InputBuffer(FILE *stream);
// 'size' is ignored.
InputBuffer(FILE *stream, size_t size);
~InputBuffer();
// Read data until end-of-file.
// The data is stored in '*out'.
// Return false if an error occurs, true otherwise.
bool Read(std::string *out);
// Close the FILE* used by InputBuffer.
// Return false if an error occurs, true otherwise.
bool CloseFile();
private:
FILE *stream_;
};
// A class to manipulate Files for writing.
class TESS_UNICHARSET_TRAINING_API OutputBuffer {
public:
explicit OutputBuffer(FILE *stream);
// 'size' is ignored.
OutputBuffer(FILE *stream, size_t size);
~OutputBuffer();
// Write string 'str' to the open FILE*.
void WriteString(const std::string &str);
// Close the FILE* used by InputBuffer.
// Return false if an error occurs, true otherwise.
bool CloseFile();
private:
FILE *stream_;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_FILEIO_H_
|
2301_81045437/tesseract
|
src/training/unicharset/fileio.h
|
C++
|
apache-2.0
| 3,566
|
///////////////////////////////////////////////////////////////////////
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "icuerrorcode.h"
namespace tesseract {
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
IcuErrorCode::~IcuErrorCode() {
if (isFailure()) {
handleFailure();
}
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/training/unicharset/icuerrorcode.cpp
|
C++
|
apache-2.0
| 988
|
/**********************************************************************
* File: icuerrorcode.h
* Description: Wrapper class for UErrorCode, with conversion operators for
* direct use in ICU C and C++ APIs.
* Author: Fredrik Roubert
* Created: Thu July 4 2013
*
* Features:
* - The constructor initializes the internal UErrorCode to U_ZERO_ERROR,
* removing one common source of errors.
* - Same use in C APIs taking a UErrorCode* (pointer) and C++ taking
* UErrorCode& (reference), via conversion operators.
* - Automatic checking for success when it goes out of scope. On failure,
* the destructor will log an error message and exit.
*
* Most of ICU will handle errors gracefully and provide sensible fallbacks.
* Using IcuErrorCode, it is therefore possible to write very compact code
* that does sensible things on failure and provides logging for debugging.
*
* Example:
* IcuErrorCode icuerrorcode;
* return collator.compareUTF8(a, b, icuerrorcode) == UCOL_EQUAL;
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_ICUERRORCODE_H_
#define TESSERACT_CCUTIL_ICUERRORCODE_H_
#include <cstdlib> // for exit
#include "tprintf.h"
#include "unicode/errorcode.h" // From libicu
namespace tesseract {
class IcuErrorCode : public icu::ErrorCode {
public:
IcuErrorCode() = default;
~IcuErrorCode() override;
protected:
void handleFailure() const override {
tprintf("ICU ERROR: %s\n", errorName());
exit(errorCode);
}
private:
// Disallow implicit copying of object.
IcuErrorCode(const IcuErrorCode &) = delete;
void operator=(const IcuErrorCode &) = delete;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_ICUERRORCODE_H_
|
2301_81045437/tesseract
|
src/training/unicharset/icuerrorcode.h
|
C++
|
apache-2.0
| 2,340
|
// Copyright 2017 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Purpose: Collection of convenience functions to simplify creation of the
// unicharset, recoder, and dawgs for an LSTM model.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lang_model_helpers.h"
#include "dawg.h"
#include "fileio.h"
#include "tessdatamanager.h"
#include "trie.h"
#include "unicharcompress.h"
#include <cstdlib>
#include <sys/stat.h>
#include <sys/types.h>
#if defined(_WIN32)
# include <direct.h>
#endif
namespace tesseract {
// Helper makes a filename (<output_dir>/<lang>/<lang><suffix>) and writes data
// to the file, using writer if not null, otherwise, a default writer.
// Default writer will overwrite any existing file, but a supplied writer
// can do its own thing. If lang is empty, returns true but does nothing.
// NOTE that suffix should contain any required . for the filename.
bool WriteFile(const std::string &output_dir, const std::string &lang, const std::string &suffix,
const std::vector<char> &data, FileWriter writer) {
if (lang.empty()) {
return true;
}
std::string dirname = output_dir + "/" + lang;
// Attempt to make the directory, but ignore errors, as it may not be a
// standard filesystem, and the writer will complain if not successful.
#if defined(_WIN32)
_mkdir(dirname.c_str());
#else
mkdir(dirname.c_str(), S_IRWXU | S_IRWXG);
#endif
std::string filename = dirname + "/" + lang + suffix;
if (writer == nullptr) {
return SaveDataToFile(data, filename.c_str());
} else {
return (*writer)(data, filename.c_str());
}
}
// Helper reads a file with optional reader and returns a string.
// On failure emits a warning message and returns an empty string.
std::string ReadFile(const std::string &filename, FileReader reader) {
if (filename.empty()) {
return std::string();
}
std::vector<char> data;
bool read_result;
if (reader == nullptr) {
read_result = LoadDataFromFile(filename.c_str(), &data);
} else {
read_result = (*reader)(filename.c_str(), &data);
}
if (read_result) {
return std::string(&data[0], data.size());
}
tprintf("Failed to read data from: %s\n", filename.c_str());
return std::string();
}
// Helper writes the unicharset to file and to the traineddata.
bool WriteUnicharset(const UNICHARSET &unicharset, const std::string &output_dir,
const std::string &lang, FileWriter writer, TessdataManager *traineddata) {
std::vector<char> unicharset_data;
TFile fp;
fp.OpenWrite(&unicharset_data);
if (!unicharset.save_to_file(&fp)) {
return false;
}
traineddata->OverwriteEntry(TESSDATA_LSTM_UNICHARSET, &unicharset_data[0],
unicharset_data.size());
return WriteFile(output_dir, lang, ".unicharset", unicharset_data, writer);
}
// Helper creates the recoder and writes it to the traineddata, and a human-
// readable form to file.
bool WriteRecoder(const UNICHARSET &unicharset, bool pass_through, const std::string &output_dir,
const std::string &lang, FileWriter writer, std::string *radical_table_data,
TessdataManager *traineddata) {
UnicharCompress recoder;
// Where the unicharset is carefully setup already to contain a good
// compact encoding, use a pass-through recoder that does nothing.
// For scripts that have a large number of unicodes (Han, Hangul) we want
// to use the recoder to compress the symbol space by re-encoding each
// unicode as multiple codes from a smaller 'alphabet' that are related to the
// shapes in the character. Hangul Jamo is a perfect example of this.
// See the Hangul Syllables section, sub-section "Equivalence" in:
// http://www.unicode.org/versions/Unicode10.0.0/ch18.pdf
if (pass_through) {
recoder.SetupPassThrough(unicharset);
} else {
int null_char = unicharset.has_special_codes() ? UNICHAR_BROKEN : unicharset.size();
tprintf("Null char=%d\n", null_char);
if (!recoder.ComputeEncoding(unicharset, null_char, radical_table_data)) {
tprintf("Creation of encoded unicharset failed!!\n");
return false;
}
}
TFile fp;
std::vector<char> recoder_data;
fp.OpenWrite(&recoder_data);
if (!recoder.Serialize(&fp)) {
return false;
}
traineddata->OverwriteEntry(TESSDATA_LSTM_RECODER, &recoder_data[0], recoder_data.size());
std::string encoding = recoder.GetEncodingAsString(unicharset);
recoder_data.resize(encoding.length(), 0);
memcpy(&recoder_data[0], &encoding[0], encoding.length());
std::string suffix;
suffix += ".charset_size=" + std::to_string(recoder.code_range());
suffix += ".txt";
return WriteFile(output_dir, lang, suffix, recoder_data, writer);
}
// Helper builds a dawg from the given words, using the unicharset as coding,
// and reverse_policy for LTR/RTL, and overwrites file_type in the traineddata.
static bool WriteDawg(const std::vector<std::string> &words, const UNICHARSET &unicharset,
Trie::RTLReversePolicy reverse_policy, TessdataType file_type,
TessdataManager *traineddata) {
// The first 3 arguments are not used in this case.
Trie trie(DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM, unicharset.size(), 0);
trie.add_word_list(words, unicharset, reverse_policy);
tprintf("Reducing Trie to SquishedDawg\n");
std::unique_ptr<SquishedDawg> dawg(trie.trie_to_dawg());
if (dawg == nullptr || dawg->NumEdges() == 0) {
return false;
}
TFile fp;
std::vector<char> dawg_data;
fp.OpenWrite(&dawg_data);
if (!dawg->write_squished_dawg(&fp)) {
return false;
}
traineddata->OverwriteEntry(file_type, &dawg_data[0], dawg_data.size());
return true;
}
// Builds and writes the dawgs, given a set of words, punctuation
// patterns, number patterns, to the traineddata. Encoding uses the given
// unicharset, and the punc dawgs is reversed if lang_is_rtl.
static bool WriteDawgs(const std::vector<std::string> &words, const std::vector<std::string> &puncs,
const std::vector<std::string> &numbers, bool lang_is_rtl,
const UNICHARSET &unicharset, TessdataManager *traineddata) {
if (puncs.empty()) {
tprintf("Must have non-empty puncs list to use language models!!\n");
return false;
}
// For each of the dawg types, make the dawg, and write to traineddata.
// Dawgs are reversed as follows:
// Words: According to the word content.
// Puncs: According to lang_is_rtl.
// Numbers: Never.
// System dawg (main wordlist).
if (!words.empty() && !WriteDawg(words, unicharset, Trie::RRP_REVERSE_IF_HAS_RTL,
TESSDATA_LSTM_SYSTEM_DAWG, traineddata)) {
return false;
}
// punc/punc-dawg.
Trie::RTLReversePolicy reverse_policy =
lang_is_rtl ? Trie::RRP_FORCE_REVERSE : Trie::RRP_DO_NO_REVERSE;
if (!WriteDawg(puncs, unicharset, reverse_policy, TESSDATA_LSTM_PUNC_DAWG, traineddata)) {
return false;
}
// numbers/number-dawg.
if (!numbers.empty() && !WriteDawg(numbers, unicharset, Trie::RRP_DO_NO_REVERSE,
TESSDATA_LSTM_NUMBER_DAWG, traineddata)) {
return false;
}
return true;
}
// The main function for combine_lang_model.cpp.
// Returns EXIT_SUCCESS or EXIT_FAILURE for error.
int CombineLangModel(const UNICHARSET &unicharset, const std::string &script_dir,
const std::string &version_str, const std::string &output_dir,
const std::string &lang, bool pass_through_recoder,
const std::vector<std::string> &words, const std::vector<std::string> &puncs,
const std::vector<std::string> &numbers, bool lang_is_rtl, FileReader reader,
FileWriter writer) {
// Build the traineddata file.
TessdataManager traineddata;
if (!version_str.empty()) {
traineddata.SetVersionString(traineddata.VersionString() + ":" + version_str);
}
// Unicharset and recoder.
if (!WriteUnicharset(unicharset, output_dir, lang, writer, &traineddata)) {
tprintf("Error writing unicharset!!\n");
return EXIT_FAILURE;
} else {
tprintf("Config file is optional, continuing...\n");
}
// If there is a config file, read it and add to traineddata.
std::string config_filename = script_dir + "/" + lang + "/" + lang + ".config";
std::string config_file = ReadFile(config_filename, reader);
if (config_file.length() > 0) {
traineddata.OverwriteEntry(TESSDATA_LANG_CONFIG, &config_file[0], config_file.length());
}
std::string radical_filename = script_dir + "/radical-stroke.txt";
std::string radical_data = ReadFile(radical_filename, reader);
if (radical_data.empty()) {
tprintf("Error reading radical code table %s\n", radical_filename.c_str());
return EXIT_FAILURE;
}
if (!WriteRecoder(unicharset, pass_through_recoder, output_dir, lang, writer, &radical_data,
&traineddata)) {
tprintf("Error writing recoder!!\n");
}
if (!words.empty() || !puncs.empty() || !numbers.empty()) {
if (!WriteDawgs(words, puncs, numbers, lang_is_rtl, unicharset, &traineddata)) {
tprintf("Error during conversion of wordlists to DAWGs!!\n");
return EXIT_FAILURE;
}
}
// Traineddata file.
std::vector<char> traineddata_data;
traineddata.Serialize(&traineddata_data);
if (!WriteFile(output_dir, lang, ".traineddata", traineddata_data, writer)) {
tprintf("Error writing output traineddata file!!\n");
return EXIT_FAILURE;
}
tprintf("Created %s/%s/%s.traineddata", output_dir.c_str(), lang.c_str(), lang.c_str());
return EXIT_SUCCESS;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/lang_model_helpers.cpp
|
C++
|
apache-2.0
| 10,200
|
// Copyright 2017 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
// Purpose: Collection of convenience functions to simplify creation of the
// unicharset, recoder, and dawgs for an LSTM model.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESSERACT_TRAINING_LANG_MODEL_HELPERS_H_
#define TESSERACT_TRAINING_LANG_MODEL_HELPERS_H_
#include "export.h"
#include "serialis.h"
#include "tessdatamanager.h"
#include "unicharset.h"
#include <string>
namespace tesseract {
// Helper makes a filename (<output_dir>/<lang>/<lang><suffix>) and writes data
// to the file, using writer if not null, otherwise, a default writer.
// Default writer will overwrite any existing file, but a supplied writer
// can do its own thing. If lang is empty, returns true but does nothing.
// NOTE that suffix should contain any required . for the filename.
TESS_UNICHARSET_TRAINING_API
bool WriteFile(const std::string &output_dir, const std::string &lang, const std::string &suffix,
const std::vector<char> &data, FileWriter writer);
// Helper reads a file with optional reader and returns a string.
// On failure emits a warning message and returns and empty string.
TESS_UNICHARSET_TRAINING_API
std::string ReadFile(const std::string &filename, FileReader reader = nullptr);
// Helper writes the unicharset to file and to the traineddata.
bool WriteUnicharset(const UNICHARSET &unicharset, const std::string &output_dir,
const std::string &lang, FileWriter writer, TessdataManager *traineddata);
// Helper creates the recoder from the unicharset and writes it to the
// traineddata, with a human-readable form to file at:
// <output_dir>/<lang>/<lang>.charset_size=<num> for some num being the size
// of the re-encoded character set. The charset_size file is written using
// writer if not null, or using a default file writer otherwise, overwriting
// any existing content.
// If pass_through is true, then the recoder will be a no-op, passing the
// unicharset codes through unchanged. Otherwise, the recoder will "compress"
// the unicharset by encoding Hangul in Jamos, decomposing multi-unicode
// symbols into sequences of unicodes, and encoding Han using the data in the
// radical_table_data, which must be the content of the file:
// langdata/radical-stroke.txt.
bool WriteRecoder(const UNICHARSET &unicharset, bool pass_through, const std::string &output_dir,
const std::string &lang, FileWriter writer, std::string *radical_table_data,
TessdataManager *traineddata);
// The main function for combine_lang_model.cpp.
// Returns EXIT_SUCCESS or EXIT_FAILURE for error.
// unicharset: can be a hand-created file with incomplete fields. Its basic
// and script properties will be set before it is used.
// script_dir: should point to the langdata (github repo) directory.
// version_str: arbitrary version label.
// Output files will be written to <output_dir>/<lang>/<lang>.*
// If pass_through_recoder is true, the unicharset will be used unchanged as
// labels in the classifier, otherwise, the unicharset will be "compressed" to
// make the recognition task simpler and faster.
// The words/puncs/numbers lists may be all empty. If any are non-empty then
// puncs must be non-empty.
// lang_is_rtl indicates that the language is generally written from right
// to left (eg Arabic/Hebrew).
TESS_UNICHARSET_TRAINING_API
int CombineLangModel(const UNICHARSET &unicharset, const std::string &script_dir,
const std::string &version_str, const std::string &output_dir,
const std::string &lang, bool pass_through_recoder,
const std::vector<std::string> &words, const std::vector<std::string> &puncs,
const std::vector<std::string> &numbers, bool lang_is_rtl, FileReader reader,
FileWriter writer);
} // namespace tesseract
#endif // TESSERACT_TRAINING_LANG_MODEL_HELPERS_H_
|
2301_81045437/tesseract
|
src/training/unicharset/lang_model_helpers.h
|
C++
|
apache-2.0
| 4,497
|
///////////////////////////////////////////////////////////////////////
// File: lstmtester.cpp
// Description: Top-level line evaluation class for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#include "lstmtester.h"
#include <iomanip> // for std::setprecision
#include <thread> // for std::thread
#include "fileio.h" // for LoadFileLinesToStrings
namespace tesseract {
LSTMTester::LSTMTester(int64_t max_memory) : test_data_(max_memory) {}
// Loads a set of lstmf files that were created using the lstm.train config to
// tesseract into memory ready for testing. Returns false if nothing was
// loaded. The arg is a filename of a file that lists the filenames.
bool LSTMTester::LoadAllEvalData(const char *filenames_file) {
std::vector<std::string> filenames;
if (!LoadFileLinesToStrings(filenames_file, &filenames)) {
tprintf("Failed to load list of eval filenames from %s\n", filenames_file);
return false;
}
return LoadAllEvalData(filenames);
}
// Loads a set of lstmf files that were created using the lstm.train config to
// tesseract into memory ready for testing. Returns false if nothing was
// loaded.
bool LSTMTester::LoadAllEvalData(const std::vector<std::string> &filenames) {
test_data_.Clear();
bool result = test_data_.LoadDocuments(filenames, CS_SEQUENTIAL, nullptr);
total_pages_ = test_data_.TotalPages();
return result;
}
// Runs an evaluation asynchronously on the stored data and returns a string
// describing the results of the previous test.
std::string LSTMTester::RunEvalAsync(int iteration, const double *training_errors,
const TessdataManager &model_mgr, int training_stage) {
std::string result;
if (total_pages_ == 0) {
result += "No test data at iteration " + std::to_string(iteration);
return result;
}
if (!LockIfNotRunning()) {
result += "Previous test incomplete, skipping test at iteration " + std::to_string(iteration);
return result;
}
// Save the args.
std::string prev_result = test_result_;
test_result_ = "";
if (training_errors != nullptr) {
test_iteration_ = iteration;
test_training_errors_ = training_errors;
test_model_mgr_ = model_mgr;
test_training_stage_ = training_stage;
std::thread t(&LSTMTester::ThreadFunc, this);
t.detach();
} else {
UnlockRunning();
}
return prev_result;
}
// Runs an evaluation synchronously on the stored data and returns a string
// describing the results.
std::string LSTMTester::RunEvalSync(int iteration, const double *training_errors,
const TessdataManager &model_mgr, int training_stage,
int verbosity) {
LSTMTrainer trainer;
trainer.InitCharSet(model_mgr);
TFile fp;
if (!model_mgr.GetComponent(TESSDATA_LSTM, &fp) || !trainer.DeSerialize(&model_mgr, &fp)) {
return "Deserialize failed";
}
int eval_iteration = 0;
double char_error = 0.0;
double word_error = 0.0;
int error_count = 0;
while (error_count < total_pages_) {
const ImageData *trainingdata = test_data_.GetPageBySerial(eval_iteration);
trainer.SetIteration(++eval_iteration);
NetworkIO fwd_outputs, targets;
Trainability result = trainer.PrepareForBackward(trainingdata, &fwd_outputs, &targets);
if (result != UNENCODABLE) {
char_error += trainer.NewSingleError(tesseract::ET_CHAR_ERROR);
word_error += trainer.NewSingleError(tesseract::ET_WORD_RECERR);
++error_count;
if (verbosity > 1 || (verbosity > 0 && result != PERFECT)) {
tprintf("Truth:%s\n", trainingdata->transcription().c_str());
std::vector<int> ocr_labels;
std::vector<int> xcoords;
trainer.LabelsFromOutputs(fwd_outputs, &ocr_labels, &xcoords);
std::string ocr_text = trainer.DecodeLabels(ocr_labels);
tprintf("OCR :%s\n", ocr_text.c_str());
if (verbosity > 2 || (verbosity > 1 && result != PERFECT)) {
tprintf("Line BCER=%f, BWER=%f\n\n",
trainer.NewSingleError(tesseract::ET_CHAR_ERROR),
trainer.NewSingleError(tesseract::ET_WORD_RECERR));
}
}
}
}
char_error *= 100.0 / total_pages_;
word_error *= 100.0 / total_pages_;
std::stringstream result;
result.imbue(std::locale::classic());
result << std::fixed << std::setprecision(3);
if (iteration != 0 || training_stage != 0) {
result << "At iteration " << iteration
<< ", stage " << training_stage << ", ";
}
result << "BCER eval=" << char_error << ", BWER eval=" << word_error;
return result.str();
}
// Helper thread function for RunEvalAsync.
// LockIfNotRunning must have returned true before calling ThreadFunc, and
// it will call UnlockRunning to release the lock after RunEvalSync completes.
void LSTMTester::ThreadFunc() {
test_result_ =
RunEvalSync(test_iteration_, test_training_errors_, test_model_mgr_, test_training_stage_,
/*verbosity*/ 0);
UnlockRunning();
}
// Returns true if there is currently nothing running, and takes the lock
// if there is nothing running.
bool LSTMTester::LockIfNotRunning() {
std::lock_guard<std::mutex> lock(running_mutex_);
if (async_running_) {
return false;
}
async_running_ = true;
return true;
}
// Releases the running lock.
void LSTMTester::UnlockRunning() {
std::lock_guard<std::mutex> lock(running_mutex_);
async_running_ = false;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/lstmtester.cpp
|
C++
|
apache-2.0
| 6,121
|
///////////////////////////////////////////////////////////////////////
// File: lstmtester.h
// Description: Top-level line evaluation class for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2016, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_LSTMTESTER_H_
#define TESSERACT_TRAINING_LSTMTESTER_H_
#include "export.h"
#include "lstmtrainer.h"
#include <mutex>
#include <string>
#include <vector>
namespace tesseract {
class TESS_UNICHARSET_TRAINING_API LSTMTester {
public:
LSTMTester(int64_t max_memory);
// Loads a set of lstmf files that were created using the lstm.train config to
// tesseract into memory ready for testing. Returns false if nothing was
// loaded. The arg is a filename of a file that lists the filenames, with one
// name per line. Conveniently, tesstrain.py generates such a file, along
// with the files themselves.
bool LoadAllEvalData(const char *filenames_file);
// Loads a set of lstmf files that were created using the lstm.train config to
// tesseract into memory ready for testing. Returns false if nothing was
// loaded.
bool LoadAllEvalData(const std::vector<std::string> &filenames);
// Runs an evaluation asynchronously on the stored eval data and returns a
// string describing the results of the previous test. Args match TestCallback
// declared in lstmtrainer.h:
// iteration: Current learning iteration number.
// training_errors: If not null, is an array of size ET_COUNT, indexed by
// the ErrorTypes enum and indicates the current errors measured by the
// trainer, and this is a serious request to run an evaluation. If null,
// then the caller is just polling for the results of the previous eval.
// model_data: is the model to evaluate, which should be a serialized
// LSTMTrainer.
// training_stage: an arbitrary number on the progress of training.
std::string RunEvalAsync(int iteration, const double *training_errors,
const TessdataManager &model_mgr, int training_stage);
// Runs an evaluation synchronously on the stored eval data and returns a
// string describing the results. Args as RunEvalAsync, except verbosity,
// which outputs errors, if 1, or all results if 2.
std::string RunEvalSync(int iteration, const double *training_errors, const TessdataManager &model_mgr,
int training_stage, int verbosity);
private:
// Helper thread function for RunEvalAsync.
// LockIfNotRunning must have returned true before calling ThreadFunc, and
// it will call UnlockRunning to release the lock after RunEvalSync completes.
void ThreadFunc();
// Returns true if there is currently nothing running, and takes the lock
// if there is nothing running.
bool LockIfNotRunning();
// Releases the running lock.
void UnlockRunning();
// The data to test with.
DocumentCache test_data_;
int total_pages_ = 0;
// Flag that indicates an asynchronous test is currently running.
// Protected by running_mutex_.
bool async_running_ = false;
std::mutex running_mutex_;
// Stored copies of the args for use while running asynchronously.
int test_iteration_ = 0;
const double *test_training_errors_ = nullptr;
TessdataManager test_model_mgr_;
int test_training_stage_ = 0;
std::string test_result_;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_LSTMTESTER_H_
|
2301_81045437/tesseract
|
src/training/unicharset/lstmtester.h
|
C++
|
apache-2.0
| 4,009
|
///////////////////////////////////////////////////////////////////////
// File: lstmtrainer.cpp
// Description: Top-level line trainer class for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#define _USE_MATH_DEFINES // needed to get definition of M_SQRT1_2
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include <cmath>
#include <iomanip> // for std::setprecision
#include <locale> // for std::locale::classic
#include <string>
#include "lstmtrainer.h"
#include <allheaders.h>
#include "boxread.h"
#include "ctc.h"
#include "imagedata.h"
#include "input.h"
#include "networkbuilder.h"
#include "ratngs.h"
#include "recodebeam.h"
#ifdef INCLUDE_TENSORFLOW
# include "tfnetwork.h"
#endif
#include "tprintf.h"
namespace tesseract {
// Min actual error rate increase to constitute divergence.
const double kMinDivergenceRate = 50.0;
// Min iterations since last best before acting on a stall.
const int kMinStallIterations = 10000;
// Fraction of current char error rate that sub_trainer_ has to be ahead
// before we declare the sub_trainer_ a success and switch to it.
const double kSubTrainerMarginFraction = 3.0 / 128;
// Factor to reduce learning rate on divergence.
const double kLearningRateDecay = M_SQRT1_2;
// LR adjustment iterations.
const int kNumAdjustmentIterations = 100;
// How often to add data to the error_graph_.
const int kErrorGraphInterval = 1000;
// Number of training images to train between calls to MaintainCheckpoints.
const int kNumPagesPerBatch = 100;
// Min percent error rate to consider start-up phase over.
const int kMinStartedErrorRate = 75;
// Error rate at which to transition to stage 1.
const double kStageTransitionThreshold = 10.0;
// Confidence beyond which the truth is more likely wrong than the recognizer.
const double kHighConfidence = 0.9375; // 15/16.
// Fraction of weight sign-changing total to constitute a definite improvement.
const double kImprovementFraction = 15.0 / 16.0;
// Fraction of last written best to make it worth writing another.
const double kBestCheckpointFraction = 31.0 / 32.0;
#ifndef GRAPHICS_DISABLED
// Scale factor for display of target activations of CTC.
const int kTargetXScale = 5;
const int kTargetYScale = 100;
#endif // !GRAPHICS_DISABLED
LSTMTrainer::LSTMTrainer()
: randomly_rotate_(false), training_data_(0), sub_trainer_(nullptr) {
EmptyConstructor();
debug_interval_ = 0;
}
LSTMTrainer::LSTMTrainer(const std::string &model_base, const std::string &checkpoint_name,
int debug_interval, int64_t max_memory)
: randomly_rotate_(false),
training_data_(max_memory),
sub_trainer_(nullptr) {
EmptyConstructor();
debug_interval_ = debug_interval;
model_base_ = model_base;
checkpoint_name_ = checkpoint_name;
}
LSTMTrainer::~LSTMTrainer() {
#ifndef GRAPHICS_DISABLED
delete align_win_;
delete target_win_;
delete ctc_win_;
delete recon_win_;
#endif
}
// Tries to deserialize a trainer from the given file and silently returns
// false in case of failure.
bool LSTMTrainer::TryLoadingCheckpoint(const char *filename,
const char *old_traineddata) {
std::vector<char> data;
if (!LoadDataFromFile(filename, &data)) {
return false;
}
tprintf("Loaded file %s, unpacking...\n", filename);
if (!ReadTrainingDump(data, *this)) {
return false;
}
if (IsIntMode()) {
tprintf("Error, %s is an integer (fast) model, cannot continue training\n",
filename);
return false;
}
if (((old_traineddata == nullptr || *old_traineddata == '\0') &&
network_->NumOutputs() == recoder_.code_range()) ||
filename == old_traineddata) {
return true; // Normal checkpoint load complete.
}
tprintf("Code range changed from %d to %d!\n", network_->NumOutputs(),
recoder_.code_range());
if (old_traineddata == nullptr || *old_traineddata == '\0') {
tprintf("Must supply the old traineddata for code conversion!\n");
return false;
}
TessdataManager old_mgr;
ASSERT_HOST(old_mgr.Init(old_traineddata));
TFile fp;
if (!old_mgr.GetComponent(TESSDATA_LSTM_UNICHARSET, &fp)) {
return false;
}
UNICHARSET old_chset;
if (!old_chset.load_from_file(&fp, false)) {
return false;
}
if (!old_mgr.GetComponent(TESSDATA_LSTM_RECODER, &fp)) {
return false;
}
UnicharCompress old_recoder;
if (!old_recoder.DeSerialize(&fp)) {
return false;
}
std::vector<int> code_map = MapRecoder(old_chset, old_recoder);
// Set the null_char_ to the new value.
int old_null_char = null_char_;
SetNullChar();
// Map the softmax(s) in the network.
network_->RemapOutputs(old_recoder.code_range(), code_map);
tprintf("Previous null char=%d mapped to %d\n", old_null_char, null_char_);
return true;
}
// Initializes the trainer with a network_spec in the network description
// net_flags control network behavior according to the NetworkFlags enum.
// There isn't really much difference between them - only where the effects
// are implemented.
// For other args see NetworkBuilder::InitNetwork.
// Note: Be sure to call InitCharSet before InitNetwork!
bool LSTMTrainer::InitNetwork(const char *network_spec, int append_index,
int net_flags, float weight_range,
float learning_rate, float momentum,
float adam_beta) {
mgr_.SetVersionString(mgr_.VersionString() + ":" + network_spec);
adam_beta_ = adam_beta;
learning_rate_ = learning_rate;
momentum_ = momentum;
SetNullChar();
if (!NetworkBuilder::InitNetwork(recoder_.code_range(), network_spec,
append_index, net_flags, weight_range,
&randomizer_, &network_)) {
return false;
}
network_str_ += network_spec;
tprintf("Built network:%s from request %s\n", network_->spec().c_str(),
network_spec);
tprintf(
"Training parameters:\n Debug interval = %d,"
" weights = %g, learning rate = %g, momentum=%g\n",
debug_interval_, weight_range, learning_rate_, momentum_);
tprintf("null char=%d\n", null_char_);
return true;
}
// Initializes a trainer from a serialized TFNetworkModel proto.
// Returns the global step of TensorFlow graph or 0 if failed.
#ifdef INCLUDE_TENSORFLOW
int LSTMTrainer::InitTensorFlowNetwork(const std::string &tf_proto) {
delete network_;
TFNetwork *tf_net = new TFNetwork("TensorFlow");
training_iteration_ = tf_net->InitFromProtoStr(tf_proto);
if (training_iteration_ == 0) {
tprintf("InitFromProtoStr failed!!\n");
return 0;
}
network_ = tf_net;
ASSERT_HOST(recoder_.code_range() == tf_net->num_classes());
return training_iteration_;
}
#endif
// Resets all the iteration counters for fine tuning or traininng a head,
// where we want the error reporting to reset.
void LSTMTrainer::InitIterations() {
sample_iteration_ = 0;
training_iteration_ = 0;
learning_iteration_ = 0;
prev_sample_iteration_ = 0;
best_error_rate_ = 100.0;
best_iteration_ = 0;
worst_error_rate_ = 0.0;
worst_iteration_ = 0;
stall_iteration_ = kMinStallIterations;
best_error_history_.clear();
best_error_iterations_.clear();
improvement_steps_ = kMinStallIterations;
perfect_delay_ = 0;
last_perfect_training_iteration_ = 0;
for (int i = 0; i < ET_COUNT; ++i) {
best_error_rates_[i] = 100.0;
worst_error_rates_[i] = 0.0;
error_buffers_[i].clear();
error_buffers_[i].resize(kRollingBufferSize_);
error_rates_[i] = 100.0;
}
error_rate_of_last_saved_best_ = kMinStartedErrorRate;
}
// If the training sample is usable, grid searches for the optimal
// dict_ratio/cert_offset, and returns the results in a string of space-
// separated triplets of ratio,offset=worderr.
Trainability LSTMTrainer::GridSearchDictParams(
const ImageData *trainingdata, int iteration, double min_dict_ratio,
double dict_ratio_step, double max_dict_ratio, double min_cert_offset,
double cert_offset_step, double max_cert_offset, std::string &results) {
sample_iteration_ = iteration;
NetworkIO fwd_outputs, targets;
Trainability result =
PrepareForBackward(trainingdata, &fwd_outputs, &targets);
if (result == UNENCODABLE || result == HI_PRECISION_ERR || dict_ == nullptr) {
return result;
}
// Encode/decode the truth to get the normalization.
std::vector<int> truth_labels, ocr_labels, xcoords;
ASSERT_HOST(EncodeString(trainingdata->transcription(), &truth_labels));
// NO-dict error.
RecodeBeamSearch base_search(recoder_, null_char_, SimpleTextOutput(),
nullptr);
base_search.Decode(fwd_outputs, 1.0, 0.0, RecodeBeamSearch::kMinCertainty,
nullptr);
base_search.ExtractBestPathAsLabels(&ocr_labels, &xcoords);
std::string truth_text = DecodeLabels(truth_labels);
std::string ocr_text = DecodeLabels(ocr_labels);
double baseline_error = ComputeWordError(&truth_text, &ocr_text);
results += "0,0=" + std::to_string(baseline_error);
RecodeBeamSearch search(recoder_, null_char_, SimpleTextOutput(), dict_);
for (double r = min_dict_ratio; r < max_dict_ratio; r += dict_ratio_step) {
for (double c = min_cert_offset; c < max_cert_offset;
c += cert_offset_step) {
search.Decode(fwd_outputs, r, c, RecodeBeamSearch::kMinCertainty,
nullptr);
search.ExtractBestPathAsLabels(&ocr_labels, &xcoords);
truth_text = DecodeLabels(truth_labels);
ocr_text = DecodeLabels(ocr_labels);
// This is destructive on both strings.
double word_error = ComputeWordError(&truth_text, &ocr_text);
if ((r == min_dict_ratio && c == min_cert_offset) ||
!std::isfinite(word_error)) {
std::string t = DecodeLabels(truth_labels);
std::string o = DecodeLabels(ocr_labels);
tprintf("r=%g, c=%g, truth=%s, ocr=%s, wderr=%g, truth[0]=%d\n", r, c,
t.c_str(), o.c_str(), word_error, truth_labels[0]);
}
results += " " + std::to_string(r);
results += "," + std::to_string(c);
results += "=" + std::to_string(word_error);
}
}
return result;
}
// Provides output on the distribution of weight values.
void LSTMTrainer::DebugNetwork() {
network_->DebugWeights();
}
// Loads a set of lstmf files that were created using the lstm.train config to
// tesseract into memory ready for training. Returns false if nothing was
// loaded.
bool LSTMTrainer::LoadAllTrainingData(const std::vector<std::string> &filenames,
CachingStrategy cache_strategy,
bool randomly_rotate) {
randomly_rotate_ = randomly_rotate;
training_data_.Clear();
return training_data_.LoadDocuments(filenames, cache_strategy,
LoadDataFromFile);
}
// Keeps track of best and locally worst char error_rate and launches tests
// using tester, when a new min or max is reached.
// Writes checkpoints at appropriate times and builds and returns a log message
// to indicate progress. Returns false if nothing interesting happened.
bool LSTMTrainer::MaintainCheckpoints(const TestCallback &tester,
std::stringstream &log_msg) {
PrepareLogMsg(log_msg);
double error_rate = CharError();
int iteration = learning_iteration();
if (iteration >= stall_iteration_ &&
error_rate > best_error_rate_ * (1.0 + kSubTrainerMarginFraction) &&
best_error_rate_ < kMinStartedErrorRate && !best_trainer_.empty()) {
// It hasn't got any better in a long while, and is a margin worse than the
// best, so go back to the best model and try a different learning rate.
StartSubtrainer(log_msg);
}
SubTrainerResult sub_trainer_result = STR_NONE;
if (sub_trainer_ != nullptr) {
sub_trainer_result = UpdateSubtrainer(log_msg);
if (sub_trainer_result == STR_REPLACED) {
// Reset the inputs, as we have overwritten *this.
error_rate = CharError();
iteration = learning_iteration();
PrepareLogMsg(log_msg);
}
}
bool result = true; // Something interesting happened.
std::vector<char> rec_model_data;
if (error_rate < best_error_rate_) {
SaveRecognitionDump(&rec_model_data);
log_msg << " New best BCER = " << error_rate;
log_msg << UpdateErrorGraph(iteration, error_rate, rec_model_data, tester);
// If sub_trainer_ is not nullptr, either *this beat it to a new best, or it
// just overwrote *this. In either case, we have finished with it.
sub_trainer_.reset();
stall_iteration_ = learning_iteration() + kMinStallIterations;
if (TransitionTrainingStage(kStageTransitionThreshold)) {
log_msg << " Transitioned to stage " << CurrentTrainingStage();
}
SaveTrainingDump(NO_BEST_TRAINER, *this, &best_trainer_);
if (error_rate < error_rate_of_last_saved_best_ * kBestCheckpointFraction) {
std::string best_model_name = DumpFilename();
if (!SaveDataToFile(best_trainer_, best_model_name.c_str())) {
log_msg << " failed to write best model:";
} else {
log_msg << " wrote best model:";
error_rate_of_last_saved_best_ = best_error_rate_;
}
log_msg << best_model_name;
}
} else if (error_rate > worst_error_rate_) {
SaveRecognitionDump(&rec_model_data);
log_msg << " New worst BCER = " << error_rate;
log_msg << UpdateErrorGraph(iteration, error_rate, rec_model_data, tester);
if (worst_error_rate_ > best_error_rate_ + kMinDivergenceRate &&
best_error_rate_ < kMinStartedErrorRate && !best_trainer_.empty()) {
// Error rate has ballooned. Go back to the best model.
log_msg << "\nDivergence! ";
// Copy best_trainer_ before reading it, as it will get overwritten.
std::vector<char> revert_data(best_trainer_);
if (ReadTrainingDump(revert_data, *this)) {
LogIterations("Reverted to", log_msg);
ReduceLearningRates(this, log_msg);
} else {
LogIterations("Failed to Revert at", log_msg);
}
// If it fails again, we will wait twice as long before reverting again.
stall_iteration_ = iteration + 2 * (iteration - learning_iteration());
// Re-save the best trainer with the new learning rates and stall
// iteration.
SaveTrainingDump(NO_BEST_TRAINER, *this, &best_trainer_);
}
} else {
// Something interesting happened only if the sub_trainer_ was trained.
result = sub_trainer_result != STR_NONE;
}
if (checkpoint_name_.length() > 0) {
// Write a current checkpoint.
std::vector<char> checkpoint;
if (!SaveTrainingDump(FULL, *this, &checkpoint) ||
!SaveDataToFile(checkpoint, checkpoint_name_.c_str())) {
log_msg << " failed to write checkpoint.";
} else {
log_msg << " wrote checkpoint.";
}
}
return result;
}
// Builds a string containing a progress message with current error rates.
void LSTMTrainer::PrepareLogMsg(std::stringstream &log_msg) const {
LogIterations("At", log_msg);
log_msg << std::fixed << std::setprecision(3)
<< ", mean rms=" << error_rates_[ET_RMS]
<< "%, delta=" << error_rates_[ET_DELTA]
<< "%, BCER train=" << error_rates_[ET_CHAR_ERROR]
<< "%, BWER train=" << error_rates_[ET_WORD_RECERR]
<< "%, skip ratio=" << error_rates_[ET_SKIP_RATIO] << "%,";
}
// Appends <intro_str> iteration learning_iteration()/training_iteration()/
// sample_iteration() to the log_msg.
void LSTMTrainer::LogIterations(const char *intro_str,
std::stringstream &log_msg) const {
log_msg << intro_str
<< " iteration " << learning_iteration()
<< "/" << training_iteration()
<< "/" << sample_iteration();
}
// Returns true and increments the training_stage_ if the error rate has just
// passed through the given threshold for the first time.
bool LSTMTrainer::TransitionTrainingStage(float error_threshold) {
if (best_error_rate_ < error_threshold &&
training_stage_ + 1 < num_training_stages_) {
++training_stage_;
return true;
}
return false;
}
// Writes to the given file. Returns false in case of error.
bool LSTMTrainer::Serialize(SerializeAmount serialize_amount,
const TessdataManager *mgr, TFile *fp) const {
if (!LSTMRecognizer::Serialize(mgr, fp)) {
return false;
}
if (!fp->Serialize(&learning_iteration_)) {
return false;
}
if (!fp->Serialize(&prev_sample_iteration_)) {
return false;
}
if (!fp->Serialize(&perfect_delay_)) {
return false;
}
if (!fp->Serialize(&last_perfect_training_iteration_)) {
return false;
}
for (const auto &error_buffer : error_buffers_) {
if (!fp->Serialize(error_buffer)) {
return false;
}
}
if (!fp->Serialize(&error_rates_[0], countof(error_rates_))) {
return false;
}
if (!fp->Serialize(&training_stage_)) {
return false;
}
uint8_t amount = serialize_amount;
if (!fp->Serialize(&amount)) {
return false;
}
if (serialize_amount == LIGHT) {
return true; // We are done.
}
if (!fp->Serialize(&best_error_rate_)) {
return false;
}
if (!fp->Serialize(&best_error_rates_[0], countof(best_error_rates_))) {
return false;
}
if (!fp->Serialize(&best_iteration_)) {
return false;
}
if (!fp->Serialize(&worst_error_rate_)) {
return false;
}
if (!fp->Serialize(&worst_error_rates_[0], countof(worst_error_rates_))) {
return false;
}
if (!fp->Serialize(&worst_iteration_)) {
return false;
}
if (!fp->Serialize(&stall_iteration_)) {
return false;
}
if (!fp->Serialize(best_model_data_)) {
return false;
}
if (!fp->Serialize(worst_model_data_)) {
return false;
}
if (serialize_amount != NO_BEST_TRAINER && !fp->Serialize(best_trainer_)) {
return false;
}
std::vector<char> sub_data;
if (sub_trainer_ != nullptr &&
!SaveTrainingDump(LIGHT, *sub_trainer_, &sub_data)) {
return false;
}
if (!fp->Serialize(sub_data)) {
return false;
}
if (!fp->Serialize(best_error_history_)) {
return false;
}
if (!fp->Serialize(best_error_iterations_)) {
return false;
}
return fp->Serialize(&improvement_steps_);
}
// Reads from the given file. Returns false in case of error.
// NOTE: It is assumed that the trainer is never read cross-endian.
bool LSTMTrainer::DeSerialize(const TessdataManager *mgr, TFile *fp) {
if (!LSTMRecognizer::DeSerialize(mgr, fp)) {
return false;
}
if (!fp->DeSerialize(&learning_iteration_)) {
// Special case. If we successfully decoded the recognizer, but fail here
// then it means we were just given a recognizer, so issue a warning and
// allow it.
tprintf("Warning: LSTMTrainer deserialized an LSTMRecognizer!\n");
learning_iteration_ = 0;
network_->SetEnableTraining(TS_ENABLED);
return true;
}
if (!fp->DeSerialize(&prev_sample_iteration_)) {
return false;
}
if (!fp->DeSerialize(&perfect_delay_)) {
return false;
}
if (!fp->DeSerialize(&last_perfect_training_iteration_)) {
return false;
}
for (auto &error_buffer : error_buffers_) {
if (!fp->DeSerialize(error_buffer)) {
return false;
}
}
if (!fp->DeSerialize(&error_rates_[0], countof(error_rates_))) {
return false;
}
if (!fp->DeSerialize(&training_stage_)) {
return false;
}
uint8_t amount;
if (!fp->DeSerialize(&amount)) {
return false;
}
if (amount == LIGHT) {
return true; // Don't read the rest.
}
if (!fp->DeSerialize(&best_error_rate_)) {
return false;
}
if (!fp->DeSerialize(&best_error_rates_[0], countof(best_error_rates_))) {
return false;
}
if (!fp->DeSerialize(&best_iteration_)) {
return false;
}
if (!fp->DeSerialize(&worst_error_rate_)) {
return false;
}
if (!fp->DeSerialize(&worst_error_rates_[0], countof(worst_error_rates_))) {
return false;
}
if (!fp->DeSerialize(&worst_iteration_)) {
return false;
}
if (!fp->DeSerialize(&stall_iteration_)) {
return false;
}
if (!fp->DeSerialize(best_model_data_)) {
return false;
}
if (!fp->DeSerialize(worst_model_data_)) {
return false;
}
if (amount != NO_BEST_TRAINER && !fp->DeSerialize(best_trainer_)) {
return false;
}
std::vector<char> sub_data;
if (!fp->DeSerialize(sub_data)) {
return false;
}
if (sub_data.empty()) {
sub_trainer_ = nullptr;
} else {
sub_trainer_ = std::make_unique<LSTMTrainer>();
if (!ReadTrainingDump(sub_data, *sub_trainer_)) {
return false;
}
}
if (!fp->DeSerialize(best_error_history_)) {
return false;
}
if (!fp->DeSerialize(best_error_iterations_)) {
return false;
}
return fp->DeSerialize(&improvement_steps_);
}
// De-serializes the saved best_trainer_ into sub_trainer_, and adjusts the
// learning rates (by scaling reduction, or layer specific, according to
// NF_LAYER_SPECIFIC_LR).
void LSTMTrainer::StartSubtrainer(std::stringstream &log_msg) {
sub_trainer_ = std::make_unique<LSTMTrainer>();
if (!ReadTrainingDump(best_trainer_, *sub_trainer_)) {
log_msg << " Failed to revert to previous best for trial!";
sub_trainer_.reset();
} else {
log_msg << " Trial sub_trainer_ from iteration "
<< sub_trainer_->training_iteration();
// Reduce learning rate so it doesn't diverge this time.
sub_trainer_->ReduceLearningRates(this, log_msg);
// If it fails again, we will wait twice as long before reverting again.
int stall_offset =
learning_iteration() - sub_trainer_->learning_iteration();
stall_iteration_ = learning_iteration() + 2 * stall_offset;
sub_trainer_->stall_iteration_ = stall_iteration_;
// Re-save the best trainer with the new learning rates and stall iteration.
SaveTrainingDump(NO_BEST_TRAINER, *sub_trainer_, &best_trainer_);
}
}
// While the sub_trainer_ is behind the current training iteration and its
// training error is at least kSubTrainerMarginFraction better than the
// current training error, trains the sub_trainer_, and returns STR_UPDATED if
// it did anything. If it catches up, and has a better error rate than the
// current best, as well as a margin over the current error rate, then the
// trainer in *this is replaced with sub_trainer_, and STR_REPLACED is
// returned. STR_NONE is returned if the subtrainer wasn't good enough to
// receive any training iterations.
SubTrainerResult LSTMTrainer::UpdateSubtrainer(std::stringstream &log_msg) {
double training_error = CharError();
double sub_error = sub_trainer_->CharError();
double sub_margin = (training_error - sub_error) / sub_error;
if (sub_margin >= kSubTrainerMarginFraction) {
log_msg << " sub_trainer=" << sub_error
<< " margin=" << 100.0 * sub_margin << "\n";
// Catch up to current iteration.
int end_iteration = training_iteration();
while (sub_trainer_->training_iteration() < end_iteration &&
sub_margin >= kSubTrainerMarginFraction) {
int target_iteration =
sub_trainer_->training_iteration() + kNumPagesPerBatch;
while (sub_trainer_->training_iteration() < target_iteration) {
sub_trainer_->TrainOnLine(this, false);
}
std::stringstream batch_log("Sub:");
batch_log.imbue(std::locale::classic());
sub_trainer_->PrepareLogMsg(batch_log);
batch_log << "\n";
tprintf("UpdateSubtrainer:%s", batch_log.str().c_str());
log_msg << batch_log.str();
sub_error = sub_trainer_->CharError();
sub_margin = (training_error - sub_error) / sub_error;
}
if (sub_error < best_error_rate_ &&
sub_margin >= kSubTrainerMarginFraction) {
// The sub_trainer_ has won the race to a new best. Switch to it.
std::vector<char> updated_trainer;
SaveTrainingDump(LIGHT, *sub_trainer_, &updated_trainer);
ReadTrainingDump(updated_trainer, *this);
log_msg << " Sub trainer wins at iteration "
<< training_iteration() << "\n";
return STR_REPLACED;
}
return STR_UPDATED;
}
return STR_NONE;
}
// Reduces network learning rates, either for everything, or for layers
// independently, according to NF_LAYER_SPECIFIC_LR.
void LSTMTrainer::ReduceLearningRates(LSTMTrainer *samples_trainer,
std::stringstream &log_msg) {
if (network_->TestFlag(NF_LAYER_SPECIFIC_LR)) {
int num_reduced = ReduceLayerLearningRates(
kLearningRateDecay, kNumAdjustmentIterations, samples_trainer);
log_msg << "\nReduced learning rate on layers: " << num_reduced;
} else {
ScaleLearningRate(kLearningRateDecay);
log_msg << "\nReduced learning rate to :" << learning_rate_;
}
log_msg << "\n";
}
// Considers reducing the learning rate independently for each layer down by
// factor(<1), or leaving it the same, by double-training the given number of
// samples and minimizing the amount of changing of sign of weight updates.
// Even if it looks like all weights should remain the same, an adjustment
// will be made to guarantee a different result when reverting to an old best.
// Returns the number of layer learning rates that were reduced.
int LSTMTrainer::ReduceLayerLearningRates(TFloat factor, int num_samples,
LSTMTrainer *samples_trainer) {
enum WhichWay {
LR_DOWN, // Learning rate will go down by factor.
LR_SAME, // Learning rate will stay the same.
LR_COUNT // Size of arrays.
};
std::vector<std::string> layers = EnumerateLayers();
int num_layers = layers.size();
std::vector<int> num_weights(num_layers);
std::vector<TFloat> bad_sums[LR_COUNT];
std::vector<TFloat> ok_sums[LR_COUNT];
for (int i = 0; i < LR_COUNT; ++i) {
bad_sums[i].resize(num_layers, 0.0);
ok_sums[i].resize(num_layers, 0.0);
}
auto momentum_factor = 1 / (1 - momentum_);
std::vector<char> orig_trainer;
samples_trainer->SaveTrainingDump(LIGHT, *this, &orig_trainer);
for (int i = 0; i < num_layers; ++i) {
Network *layer = GetLayer(layers[i]);
num_weights[i] = layer->IsTraining() ? layer->num_weights() : 0;
}
int iteration = sample_iteration();
for (int s = 0; s < num_samples; ++s) {
// Which way will we modify the learning rate?
for (int ww = 0; ww < LR_COUNT; ++ww) {
// Transfer momentum to learning rate and adjust by the ww factor.
auto ww_factor = momentum_factor;
if (ww == LR_DOWN) {
ww_factor *= factor;
}
// Make a copy of *this, so we can mess about without damaging anything.
LSTMTrainer copy_trainer;
samples_trainer->ReadTrainingDump(orig_trainer, copy_trainer);
// Clear the updates, doing nothing else.
copy_trainer.network_->Update(0.0, 0.0, 0.0, 0);
// Adjust the learning rate in each layer.
for (int i = 0; i < num_layers; ++i) {
if (num_weights[i] == 0) {
continue;
}
copy_trainer.ScaleLayerLearningRate(layers[i], ww_factor);
}
copy_trainer.SetIteration(iteration);
// Train on the sample, but keep the update in updates_ instead of
// applying to the weights.
const ImageData *trainingdata =
copy_trainer.TrainOnLine(samples_trainer, true);
if (trainingdata == nullptr) {
continue;
}
// We'll now use this trainer again for each layer.
std::vector<char> updated_trainer;
samples_trainer->SaveTrainingDump(LIGHT, copy_trainer, &updated_trainer);
for (int i = 0; i < num_layers; ++i) {
if (num_weights[i] == 0) {
continue;
}
LSTMTrainer layer_trainer;
samples_trainer->ReadTrainingDump(updated_trainer, layer_trainer);
Network *layer = layer_trainer.GetLayer(layers[i]);
// Update the weights in just the layer, using Adam if enabled.
layer->Update(0.0, momentum_, adam_beta_,
layer_trainer.training_iteration_ + 1);
// Zero the updates matrix again.
layer->Update(0.0, 0.0, 0.0, 0);
// Train again on the same sample, again holding back the updates.
layer_trainer.TrainOnLine(trainingdata, true);
// Count the sign changes in the updates in layer vs in copy_trainer.
float before_bad = bad_sums[ww][i];
float before_ok = ok_sums[ww][i];
layer->CountAlternators(*copy_trainer.GetLayer(layers[i]),
&ok_sums[ww][i], &bad_sums[ww][i]);
float bad_frac =
bad_sums[ww][i] + ok_sums[ww][i] - before_bad - before_ok;
if (bad_frac > 0.0f) {
bad_frac = (bad_sums[ww][i] - before_bad) / bad_frac;
}
}
}
++iteration;
}
int num_lowered = 0;
for (int i = 0; i < num_layers; ++i) {
if (num_weights[i] == 0) {
continue;
}
Network *layer = GetLayer(layers[i]);
float lr = GetLayerLearningRate(layers[i]);
TFloat total_down = bad_sums[LR_DOWN][i] + ok_sums[LR_DOWN][i];
TFloat total_same = bad_sums[LR_SAME][i] + ok_sums[LR_SAME][i];
TFloat frac_down = bad_sums[LR_DOWN][i] / total_down;
TFloat frac_same = bad_sums[LR_SAME][i] / total_same;
tprintf("Layer %d=%s: lr %g->%g%%, lr %g->%g%%", i, layer->name().c_str(),
lr * factor, 100.0 * frac_down, lr, 100.0 * frac_same);
if (frac_down < frac_same * kImprovementFraction) {
tprintf(" REDUCED\n");
ScaleLayerLearningRate(layers[i], factor);
++num_lowered;
} else {
tprintf(" SAME\n");
}
}
if (num_lowered == 0) {
// Just lower everything to make sure.
for (int i = 0; i < num_layers; ++i) {
if (num_weights[i] > 0) {
ScaleLayerLearningRate(layers[i], factor);
++num_lowered;
}
}
}
return num_lowered;
}
// Converts the string to integer class labels, with appropriate null_char_s
// in between if not in SimpleTextOutput mode. Returns false on failure.
/* static */
bool LSTMTrainer::EncodeString(const std::string &str,
const UNICHARSET &unicharset,
const UnicharCompress *recoder, bool simple_text,
int null_char, std::vector<int> *labels) {
if (str.c_str() == nullptr || str.length() <= 0) {
tprintf("Empty truth string!\n");
return false;
}
unsigned err_index;
std::vector<int> internal_labels;
labels->clear();
if (!simple_text) {
labels->push_back(null_char);
}
std::string cleaned = unicharset.CleanupString(str.c_str());
if (unicharset.encode_string(cleaned.c_str(), true, &internal_labels, nullptr,
&err_index)) {
bool success = true;
for (auto internal_label : internal_labels) {
if (recoder != nullptr) {
// Re-encode labels via recoder.
RecodedCharID code;
int len = recoder->EncodeUnichar(internal_label, &code);
if (len > 0) {
for (int j = 0; j < len; ++j) {
labels->push_back(code(j));
if (!simple_text) {
labels->push_back(null_char);
}
}
} else {
success = false;
err_index = 0;
break;
}
} else {
labels->push_back(internal_label);
if (!simple_text) {
labels->push_back(null_char);
}
}
}
if (success) {
return true;
}
}
tprintf("Encoding of string failed! Failure bytes:");
while (err_index < cleaned.size()) {
tprintf(" %x", cleaned[err_index++] & 0xff);
}
tprintf("\n");
return false;
}
// Performs forward-backward on the given trainingdata.
// Returns a Trainability enum to indicate the suitability of the sample.
Trainability LSTMTrainer::TrainOnLine(const ImageData *trainingdata,
bool batch) {
NetworkIO fwd_outputs, targets;
Trainability trainable =
PrepareForBackward(trainingdata, &fwd_outputs, &targets);
++sample_iteration_;
if (trainable == UNENCODABLE || trainable == NOT_BOXED) {
return trainable; // Sample was unusable.
}
bool debug =
debug_interval_ > 0 && training_iteration() % debug_interval_ == 0;
// Run backprop on the output.
NetworkIO bp_deltas;
if (network_->IsTraining() &&
(trainable != PERFECT ||
training_iteration() >
last_perfect_training_iteration_ + perfect_delay_)) {
network_->Backward(debug, targets, &scratch_space_, &bp_deltas);
network_->Update(learning_rate_, batch ? -1.0f : momentum_, adam_beta_,
training_iteration_ + 1);
}
#ifndef GRAPHICS_DISABLED
if (debug_interval_ == 1 && debug_win_ != nullptr) {
debug_win_->AwaitEvent(SVET_CLICK);
}
#endif // !GRAPHICS_DISABLED
// Roll the memory of past means.
RollErrorBuffers();
return trainable;
}
// Prepares the ground truth, runs forward, and prepares the targets.
// Returns a Trainability enum to indicate the suitability of the sample.
Trainability LSTMTrainer::PrepareForBackward(const ImageData *trainingdata,
NetworkIO *fwd_outputs,
NetworkIO *targets) {
if (trainingdata == nullptr) {
tprintf("Null trainingdata.\n");
return UNENCODABLE;
}
// Ensure repeatability of random elements even across checkpoints.
bool debug =
debug_interval_ > 0 && training_iteration() % debug_interval_ == 0;
std::vector<int> truth_labels;
if (!EncodeString(trainingdata->transcription(), &truth_labels)) {
tprintf("Can't encode transcription: '%s' in language '%s'\n",
trainingdata->transcription().c_str(),
trainingdata->language().c_str());
return UNENCODABLE;
}
bool upside_down = false;
if (randomly_rotate_) {
// This ensures consistent training results.
SetRandomSeed();
upside_down = randomizer_.SignedRand(1.0) > 0.0;
if (upside_down) {
// Modify the truth labels to match the rotation:
// Apart from space and null, increment the label. This changes the
// script-id to the same script-id but upside-down.
// The labels need to be reversed in order, as the first is now the last.
for (auto truth_label : truth_labels) {
if (truth_label != UNICHAR_SPACE && truth_label != null_char_) {
++truth_label;
}
}
std::reverse(truth_labels.begin(), truth_labels.end());
}
}
unsigned w = 0;
while (w < truth_labels.size() &&
(truth_labels[w] == UNICHAR_SPACE || truth_labels[w] == null_char_)) {
++w;
}
if (w == truth_labels.size()) {
tprintf("Blank transcription: %s\n", trainingdata->transcription().c_str());
return UNENCODABLE;
}
float image_scale;
NetworkIO inputs;
bool invert = trainingdata->boxes().empty();
if (!RecognizeLine(*trainingdata, invert ? 0.5f : 0.0f, debug, invert, upside_down,
&image_scale, &inputs, fwd_outputs)) {
tprintf("Image %s not trainable\n", trainingdata->imagefilename().c_str());
return UNENCODABLE;
}
targets->Resize(*fwd_outputs, network_->NumOutputs());
LossType loss_type = OutputLossType();
if (loss_type == LT_SOFTMAX) {
if (!ComputeTextTargets(*fwd_outputs, truth_labels, targets)) {
tprintf("Compute simple targets failed for %s!\n",
trainingdata->imagefilename().c_str());
return UNENCODABLE;
}
} else if (loss_type == LT_CTC) {
if (!ComputeCTCTargets(truth_labels, fwd_outputs, targets)) {
tprintf("Compute CTC targets failed for %s!\n",
trainingdata->imagefilename().c_str());
return UNENCODABLE;
}
} else {
tprintf("Logistic outputs not implemented yet!\n");
return UNENCODABLE;
}
std::vector<int> ocr_labels;
std::vector<int> xcoords;
LabelsFromOutputs(*fwd_outputs, &ocr_labels, &xcoords);
// CTC does not produce correct target labels to begin with.
if (loss_type != LT_CTC) {
LabelsFromOutputs(*targets, &truth_labels, &xcoords);
}
if (!DebugLSTMTraining(inputs, *trainingdata, *fwd_outputs, truth_labels,
*targets)) {
tprintf("Input width was %d\n", inputs.Width());
return UNENCODABLE;
}
std::string ocr_text = DecodeLabels(ocr_labels);
std::string truth_text = DecodeLabels(truth_labels);
targets->SubtractAllFromFloat(*fwd_outputs);
if (debug_interval_ != 0) {
if (truth_text != ocr_text) {
tprintf("Iteration %d: BEST OCR TEXT : %s\n", training_iteration(),
ocr_text.c_str());
}
}
double char_error = ComputeCharError(truth_labels, ocr_labels);
double word_error = ComputeWordError(&truth_text, &ocr_text);
double delta_error = ComputeErrorRates(*targets, char_error, word_error);
if (debug_interval_ != 0) {
tprintf("File %s line %d %s:\n", trainingdata->imagefilename().c_str(),
trainingdata->page_number(), delta_error == 0.0 ? "(Perfect)" : "");
}
if (delta_error == 0.0) {
return PERFECT;
}
if (targets->AnySuspiciousTruth(kHighConfidence)) {
return HI_PRECISION_ERR;
}
return TRAINABLE;
}
// Writes the trainer to memory, so that the current training state can be
// restored. *this must always be the master trainer that retains the only
// copy of the training data and language model. trainer is the model that is
// actually serialized.
bool LSTMTrainer::SaveTrainingDump(SerializeAmount serialize_amount,
const LSTMTrainer &trainer,
std::vector<char> *data) const {
TFile fp;
fp.OpenWrite(data);
return trainer.Serialize(serialize_amount, &mgr_, &fp);
}
// Restores the model to *this.
bool LSTMTrainer::ReadLocalTrainingDump(const TessdataManager *mgr,
const char *data, int size) {
if (size == 0) {
tprintf("Warning: data size is 0 in LSTMTrainer::ReadLocalTrainingDump\n");
return false;
}
TFile fp;
fp.Open(data, size);
return DeSerialize(mgr, &fp);
}
// Writes the full recognition traineddata to the given filename.
bool LSTMTrainer::SaveTraineddata(const char *filename) {
std::vector<char> recognizer_data;
SaveRecognitionDump(&recognizer_data);
mgr_.OverwriteEntry(TESSDATA_LSTM, &recognizer_data[0],
recognizer_data.size());
return mgr_.SaveFile(filename, SaveDataToFile);
}
// Writes the recognizer to memory, so that it can be used for testing later.
void LSTMTrainer::SaveRecognitionDump(std::vector<char> *data) const {
TFile fp;
fp.OpenWrite(data);
network_->SetEnableTraining(TS_TEMP_DISABLE);
ASSERT_HOST(LSTMRecognizer::Serialize(&mgr_, &fp));
network_->SetEnableTraining(TS_RE_ENABLE);
}
// Returns a suitable filename for a training dump, based on the model_base_,
// best_error_rate_, best_iteration_ and training_iteration_.
std::string LSTMTrainer::DumpFilename() const {
std::stringstream filename;
filename.imbue(std::locale::classic());
filename << model_base_ << std::fixed << std::setprecision(3)
<< "_" << best_error_rate_
<< "_" << best_iteration_
<< "_" << training_iteration_
<< ".checkpoint";
return filename.str();
}
// Fills the whole error buffer of the given type with the given value.
void LSTMTrainer::FillErrorBuffer(double new_error, ErrorTypes type) {
for (int i = 0; i < kRollingBufferSize_; ++i) {
error_buffers_[type][i] = new_error;
}
error_rates_[type] = 100.0 * new_error;
}
// Helper generates a map from each current recoder_ code (ie softmax index)
// to the corresponding old_recoder code, or -1 if there isn't one.
std::vector<int> LSTMTrainer::MapRecoder(
const UNICHARSET &old_chset, const UnicharCompress &old_recoder) const {
int num_new_codes = recoder_.code_range();
int num_new_unichars = GetUnicharset().size();
std::vector<int> code_map(num_new_codes, -1);
for (int c = 0; c < num_new_codes; ++c) {
int old_code = -1;
// Find all new unichar_ids that recode to something that includes c.
// The <= is to include the null char, which may be beyond the unicharset.
for (int uid = 0; uid <= num_new_unichars; ++uid) {
RecodedCharID codes;
int length = recoder_.EncodeUnichar(uid, &codes);
int code_index = 0;
while (code_index < length && codes(code_index) != c) {
++code_index;
}
if (code_index == length) {
continue;
}
// The old unicharset must have the same unichar.
int old_uid =
uid < num_new_unichars
? old_chset.unichar_to_id(GetUnicharset().id_to_unichar(uid))
: old_chset.size() - 1;
if (old_uid == INVALID_UNICHAR_ID) {
continue;
}
// The encoding of old_uid at the same code_index is the old code.
RecodedCharID old_codes;
if (code_index < old_recoder.EncodeUnichar(old_uid, &old_codes)) {
old_code = old_codes(code_index);
break;
}
}
code_map[c] = old_code;
}
return code_map;
}
// Private version of InitCharSet above finishes the job after initializing
// the mgr_ data member.
void LSTMTrainer::InitCharSet() {
EmptyConstructor();
training_flags_ = TF_COMPRESS_UNICHARSET;
// Initialize the unicharset and recoder.
if (!LoadCharsets(&mgr_)) {
ASSERT_HOST(
"Must provide a traineddata containing lstm_unicharset and"
" lstm_recoder!\n" != nullptr);
}
SetNullChar();
}
// Helper computes and sets the null_char_.
void LSTMTrainer::SetNullChar() {
null_char_ = GetUnicharset().has_special_codes() ? UNICHAR_BROKEN
: GetUnicharset().size();
RecodedCharID code;
recoder_.EncodeUnichar(null_char_, &code);
null_char_ = code(0);
}
// Factored sub-constructor sets up reasonable default values.
void LSTMTrainer::EmptyConstructor() {
#ifndef GRAPHICS_DISABLED
align_win_ = nullptr;
target_win_ = nullptr;
ctc_win_ = nullptr;
recon_win_ = nullptr;
#endif
checkpoint_iteration_ = 0;
training_stage_ = 0;
num_training_stages_ = 2;
InitIterations();
}
// Outputs the string and periodically displays the given network inputs
// as an image in the given window, and the corresponding labels at the
// corresponding x_starts.
// Returns false if the truth string is empty.
bool LSTMTrainer::DebugLSTMTraining(const NetworkIO &inputs,
const ImageData &trainingdata,
const NetworkIO &fwd_outputs,
const std::vector<int> &truth_labels,
const NetworkIO &outputs) {
const std::string &truth_text = DecodeLabels(truth_labels);
if (truth_text.c_str() == nullptr || truth_text.length() <= 0) {
tprintf("Empty truth string at decode time!\n");
return false;
}
if (debug_interval_ != 0) {
// Get class labels, xcoords and string.
std::vector<int> labels;
std::vector<int> xcoords;
LabelsFromOutputs(outputs, &labels, &xcoords);
std::string text = DecodeLabels(labels);
tprintf("Iteration %d: GROUND TRUTH : %s\n", training_iteration(),
truth_text.c_str());
if (truth_text != text) {
tprintf("Iteration %d: ALIGNED TRUTH : %s\n", training_iteration(),
text.c_str());
}
if (debug_interval_ > 0 && training_iteration() % debug_interval_ == 0) {
tprintf("TRAINING activation path for truth string %s\n",
truth_text.c_str());
DebugActivationPath(outputs, labels, xcoords);
#ifndef GRAPHICS_DISABLED
DisplayForward(inputs, labels, xcoords, "LSTMTraining", &align_win_);
if (OutputLossType() == LT_CTC) {
DisplayTargets(fwd_outputs, "CTC Outputs", &ctc_win_);
DisplayTargets(outputs, "CTC Targets", &target_win_);
}
#endif
}
}
return true;
}
#ifndef GRAPHICS_DISABLED
// Displays the network targets as line a line graph.
void LSTMTrainer::DisplayTargets(const NetworkIO &targets,
const char *window_name, ScrollView **window) {
int width = targets.Width();
int num_features = targets.NumFeatures();
Network::ClearWindow(true, window_name, width * kTargetXScale, kTargetYScale,
window);
for (int c = 0; c < num_features; ++c) {
int color = c % (ScrollView::GREEN_YELLOW - 1) + 2;
(*window)->Pen(static_cast<ScrollView::Color>(color));
int start_t = -1;
for (int t = 0; t < width; ++t) {
double target = targets.f(t)[c];
target *= kTargetYScale;
if (target >= 1) {
if (start_t < 0) {
(*window)->SetCursor(t - 1, 0);
start_t = t;
}
(*window)->DrawTo(t, target);
} else if (start_t >= 0) {
(*window)->DrawTo(t, 0);
(*window)->DrawTo(start_t - 1, 0);
start_t = -1;
}
}
if (start_t >= 0) {
(*window)->DrawTo(width, 0);
(*window)->DrawTo(start_t - 1, 0);
}
}
(*window)->Update();
}
#endif // !GRAPHICS_DISABLED
// Builds a no-compromises target where the first positions should be the
// truth labels and the rest is padded with the null_char_.
bool LSTMTrainer::ComputeTextTargets(const NetworkIO &outputs,
const std::vector<int> &truth_labels,
NetworkIO *targets) {
if (truth_labels.size() > targets->Width()) {
tprintf("Error: transcription %s too long to fit into target of width %d\n",
DecodeLabels(truth_labels).c_str(), targets->Width());
return false;
}
int i = 0;
for (auto truth_label : truth_labels) {
targets->SetActivations(i, truth_label, 1.0);
++i;
}
for (i = truth_labels.size(); i < targets->Width(); ++i) {
targets->SetActivations(i, null_char_, 1.0);
}
return true;
}
// Builds a target using standard CTC. truth_labels should be pre-padded with
// nulls wherever desired. They don't have to be between all labels.
// outputs is input-output, as it gets clipped to minimum probability.
bool LSTMTrainer::ComputeCTCTargets(const std::vector<int> &truth_labels,
NetworkIO *outputs, NetworkIO *targets) {
// Bottom-clip outputs to a minimum probability.
CTC::NormalizeProbs(outputs);
return CTC::ComputeCTCTargets(truth_labels, null_char_,
outputs->float_array(), targets);
}
// Computes network errors, and stores the results in the rolling buffers,
// along with the supplied text_error.
// Returns the delta error of the current sample (not running average.)
double LSTMTrainer::ComputeErrorRates(const NetworkIO &deltas,
double char_error, double word_error) {
UpdateErrorBuffer(ComputeRMSError(deltas), ET_RMS);
// Delta error is the fraction of timesteps with >0.5 error in the top choice
// score. If zero, then the top choice characters are guaranteed correct,
// even when there is residue in the RMS error.
double delta_error = ComputeWinnerError(deltas);
UpdateErrorBuffer(delta_error, ET_DELTA);
UpdateErrorBuffer(word_error, ET_WORD_RECERR);
UpdateErrorBuffer(char_error, ET_CHAR_ERROR);
// Skip ratio measures the difference between sample_iteration_ and
// training_iteration_, which reflects the number of unusable samples,
// usually due to unencodable truth text, or the text not fitting in the
// space for the output.
double skip_count = sample_iteration_ - prev_sample_iteration_;
UpdateErrorBuffer(skip_count, ET_SKIP_RATIO);
return delta_error;
}
// Computes the network activation RMS error rate.
double LSTMTrainer::ComputeRMSError(const NetworkIO &deltas) {
double total_error = 0.0;
int width = deltas.Width();
int num_classes = deltas.NumFeatures();
for (int t = 0; t < width; ++t) {
const float *class_errs = deltas.f(t);
for (int c = 0; c < num_classes; ++c) {
double error = class_errs[c];
total_error += error * error;
}
}
return sqrt(total_error / (width * num_classes));
}
// Computes network activation winner error rate. (Number of values that are
// in error by >= 0.5 divided by number of time-steps.) More closely related
// to final character error than RMS, but still directly calculable from
// just the deltas. Because of the binary nature of the targets, zero winner
// error is a sufficient but not necessary condition for zero char error.
double LSTMTrainer::ComputeWinnerError(const NetworkIO &deltas) {
int num_errors = 0;
int width = deltas.Width();
int num_classes = deltas.NumFeatures();
for (int t = 0; t < width; ++t) {
const float *class_errs = deltas.f(t);
for (int c = 0; c < num_classes; ++c) {
float abs_delta = std::fabs(class_errs[c]);
// TODO(rays) Filtering cases where the delta is very large to cut out
// GT errors doesn't work. Find a better way or get better truth.
if (0.5 <= abs_delta) {
++num_errors;
}
}
}
return static_cast<double>(num_errors) / width;
}
// Computes a very simple bag of chars char error rate.
double LSTMTrainer::ComputeCharError(const std::vector<int> &truth_str,
const std::vector<int> &ocr_str) {
std::vector<int> label_counts(NumOutputs());
unsigned truth_size = 0;
for (auto ch : truth_str) {
if (ch != null_char_) {
++label_counts[ch];
++truth_size;
}
}
for (auto ch : ocr_str) {
if (ch != null_char_) {
--label_counts[ch];
}
}
unsigned char_errors = 0;
for (auto label_count : label_counts) {
char_errors += abs(label_count);
}
// Limit BCER to interval [0,1] and avoid division by zero.
if (truth_size <= char_errors) {
return (char_errors == 0) ? 0.0 : 1.0;
}
return static_cast<double>(char_errors) / truth_size;
}
// Computes word recall error rate using a very simple bag of words algorithm.
// NOTE that this is destructive on both input strings.
double LSTMTrainer::ComputeWordError(std::string *truth_str,
std::string *ocr_str) {
using StrMap = std::unordered_map<std::string, int, std::hash<std::string>>;
std::vector<std::string> truth_words = split(*truth_str, ' ');
if (truth_words.empty()) {
return 0.0;
}
std::vector<std::string> ocr_words = split(*ocr_str, ' ');
StrMap word_counts;
for (const auto &truth_word : truth_words) {
std::string truth_word_string(truth_word.c_str());
auto it = word_counts.find(truth_word_string);
if (it == word_counts.end()) {
word_counts.insert(std::make_pair(truth_word_string, 1));
} else {
++it->second;
}
}
for (const auto &ocr_word : ocr_words) {
std::string ocr_word_string(ocr_word.c_str());
auto it = word_counts.find(ocr_word_string);
if (it == word_counts.end()) {
word_counts.insert(std::make_pair(ocr_word_string, -1));
} else {
--it->second;
}
}
int word_recall_errs = 0;
for (const auto &word_count : word_counts) {
if (word_count.second > 0) {
word_recall_errs += word_count.second;
}
}
return static_cast<double>(word_recall_errs) / truth_words.size();
}
// Updates the error buffer and corresponding mean of the given type with
// the new_error.
void LSTMTrainer::UpdateErrorBuffer(double new_error, ErrorTypes type) {
int index = training_iteration_ % kRollingBufferSize_;
error_buffers_[type][index] = new_error;
// Compute the mean error.
int mean_count =
std::min<int>(training_iteration_ + 1, error_buffers_[type].size());
double buffer_sum = 0.0;
for (int i = 0; i < mean_count; ++i) {
buffer_sum += error_buffers_[type][i];
}
double mean = buffer_sum / mean_count;
// Trim precision to 1/1000 of 1%.
error_rates_[type] = IntCastRounded(100000.0 * mean) / 1000.0;
}
// Rolls error buffers and reports the current means.
void LSTMTrainer::RollErrorBuffers() {
prev_sample_iteration_ = sample_iteration_;
if (NewSingleError(ET_DELTA) > 0.0) {
++learning_iteration_;
} else {
last_perfect_training_iteration_ = training_iteration_;
}
++training_iteration_;
if (debug_interval_ != 0) {
tprintf("Mean rms=%g%%, delta=%g%%, train=%g%%(%g%%), skip ratio=%g%%\n",
error_rates_[ET_RMS], error_rates_[ET_DELTA],
error_rates_[ET_CHAR_ERROR], error_rates_[ET_WORD_RECERR],
error_rates_[ET_SKIP_RATIO]);
}
}
// Given that error_rate is either a new min or max, updates the best/worst
// error rates, and record of progress.
// Tester is an externally supplied callback function that tests on some
// data set with a given model and records the error rates in a graph.
std::string LSTMTrainer::UpdateErrorGraph(int iteration, double error_rate,
const std::vector<char> &model_data,
const TestCallback &tester) {
if (error_rate > best_error_rate_ &&
iteration < best_iteration_ + kErrorGraphInterval) {
// Too soon to record a new point.
if (tester != nullptr && !worst_model_data_.empty()) {
mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0],
worst_model_data_.size());
return tester(worst_iteration_, nullptr, mgr_, CurrentTrainingStage());
} else {
return "";
}
}
std::string result;
// NOTE: there are 2 asymmetries here:
// 1. We are computing the global minimum, but the local maximum in between.
// 2. If the tester returns an empty string, indicating that it is busy,
// call it repeatedly on new local maxima to test the previous min, but
// not the other way around, as there is little point testing the maxima
// between very frequent minima.
if (error_rate < best_error_rate_) {
// This is a new (global) minimum.
if (tester != nullptr && !worst_model_data_.empty()) {
mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0],
worst_model_data_.size());
result = tester(worst_iteration_, worst_error_rates_, mgr_,
CurrentTrainingStage());
worst_model_data_.clear();
best_model_data_ = model_data;
}
best_error_rate_ = error_rate;
memcpy(best_error_rates_, error_rates_, sizeof(error_rates_));
best_iteration_ = iteration;
best_error_history_.push_back(error_rate);
best_error_iterations_.push_back(iteration);
// Compute 2% decay time.
double two_percent_more = error_rate + 2.0;
int i;
for (i = best_error_history_.size() - 1;
i >= 0 && best_error_history_[i] < two_percent_more; --i) {
}
int old_iteration = i >= 0 ? best_error_iterations_[i] : 0;
improvement_steps_ = iteration - old_iteration;
tprintf("2 Percent improvement time=%d, best error was %g @ %d\n",
improvement_steps_, i >= 0 ? best_error_history_[i] : 100.0,
old_iteration);
} else if (error_rate > best_error_rate_) {
// This is a new (local) maximum.
if (tester != nullptr) {
if (!best_model_data_.empty()) {
mgr_.OverwriteEntry(TESSDATA_LSTM, &best_model_data_[0],
best_model_data_.size());
result = tester(best_iteration_, best_error_rates_, mgr_,
CurrentTrainingStage());
} else if (!worst_model_data_.empty()) {
// Allow for multiple data points with "worst" error rate.
mgr_.OverwriteEntry(TESSDATA_LSTM, &worst_model_data_[0],
worst_model_data_.size());
result = tester(worst_iteration_, worst_error_rates_, mgr_,
CurrentTrainingStage());
}
if (result.length() > 0) {
best_model_data_.clear();
}
worst_model_data_ = model_data;
}
}
worst_error_rate_ = error_rate;
memcpy(worst_error_rates_, error_rates_, sizeof(error_rates_));
worst_iteration_ = iteration;
return result;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/training/unicharset/lstmtrainer.cpp
|
C++
|
apache-2.0
| 56,797
|
///////////////////////////////////////////////////////////////////////
// File: lstmtrainer.h
// Description: Top-level line trainer class for LSTM-based networks.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_LSTM_LSTMTRAINER_H_
#define TESSERACT_LSTM_LSTMTRAINER_H_
#include "export.h"
#include "imagedata.h" // for DocumentCache
#include "lstmrecognizer.h"
#include "rect.h"
#include <functional> // for std::function
#include <sstream> // for std::stringstream
namespace tesseract {
class LSTM;
class LSTMTester;
class LSTMTrainer;
class Parallel;
class Reversed;
class Softmax;
class Series;
// Enum for the types of errors that are counted.
enum ErrorTypes {
ET_RMS, // RMS activation error.
ET_DELTA, // Number of big errors in deltas.
ET_WORD_RECERR, // Output text string word recall error.
ET_CHAR_ERROR, // Output text string total char error.
ET_SKIP_RATIO, // Fraction of samples skipped.
ET_COUNT // For array sizing.
};
// Enum for the trainability_ flags.
enum Trainability {
TRAINABLE, // Non-zero delta error.
PERFECT, // Zero delta error.
UNENCODABLE, // Not trainable due to coding/alignment trouble.
HI_PRECISION_ERR, // Hi confidence disagreement.
NOT_BOXED, // Early in training and has no character boxes.
};
// Enum to define the amount of data to get serialized.
enum SerializeAmount {
LIGHT, // Minimal data for remote training.
NO_BEST_TRAINER, // Save an empty vector in place of best_trainer_.
FULL, // All data including best_trainer_.
};
// Enum to indicate how the sub_trainer_ training went.
enum SubTrainerResult {
STR_NONE, // Did nothing as not good enough.
STR_UPDATED, // Subtrainer was updated, but didn't replace *this.
STR_REPLACED // Subtrainer replaced *this.
};
class LSTMTrainer;
// Function to compute and record error rates on some external test set(s).
// Args are: iteration, mean errors, model, training stage.
// Returns a string containing logging information about the tests.
using TestCallback = std::function<std::string(int, const double *,
const TessdataManager &, int)>;
// Trainer class for LSTM networks. Most of the effort is in creating the
// ideal target outputs from the transcription. A box file is used if it is
// available, otherwise estimates of the char widths from the unicharset are
// used to guide a DP search for the best fit to the transcription.
class TESS_UNICHARSET_TRAINING_API LSTMTrainer : public LSTMRecognizer {
public:
LSTMTrainer();
LSTMTrainer(const std::string &model_base,
const std::string &checkpoint_name,
int debug_interval, int64_t max_memory);
virtual ~LSTMTrainer();
// Tries to deserialize a trainer from the given file and silently returns
// false in case of failure. If old_traineddata is not null, then it is
// assumed that the character set is to be re-mapped from old_traineddata to
// the new, with consequent change in weight matrices etc.
bool TryLoadingCheckpoint(const char *filename, const char *old_traineddata);
// Initializes the character set encode/decode mechanism directly from a
// previously setup traineddata containing dawgs, UNICHARSET and
// UnicharCompress. Note: Call before InitNetwork!
bool InitCharSet(const std::string &traineddata_path) {
bool success = mgr_.Init(traineddata_path.c_str());
if (success) {
InitCharSet();
}
return success;
}
void InitCharSet(const TessdataManager &mgr) {
mgr_ = mgr;
InitCharSet();
}
// Initializes the trainer with a network_spec in the network description
// net_flags control network behavior according to the NetworkFlags enum.
// There isn't really much difference between them - only where the effects
// are implemented.
// For other args see NetworkBuilder::InitNetwork.
// Note: Be sure to call InitCharSet before InitNetwork!
bool InitNetwork(const char *network_spec, int append_index, int net_flags,
float weight_range, float learning_rate, float momentum,
float adam_beta);
// Initializes a trainer from a serialized TFNetworkModel proto.
// Returns the global step of TensorFlow graph or 0 if failed.
// Building a compatible TF graph: See tfnetwork.proto.
int InitTensorFlowNetwork(const std::string &tf_proto);
// Resets all the iteration counters for fine tuning or training a head,
// where we want the error reporting to reset.
void InitIterations();
// Accessors.
double ActivationError() const {
return error_rates_[ET_DELTA];
}
double CharError() const {
return error_rates_[ET_CHAR_ERROR];
}
const double *error_rates() const {
return error_rates_;
}
double best_error_rate() const {
return best_error_rate_;
}
int best_iteration() const {
return best_iteration_;
}
int learning_iteration() const {
return learning_iteration_;
}
int32_t improvement_steps() const {
return improvement_steps_;
}
void set_perfect_delay(int delay) {
perfect_delay_ = delay;
}
const std::vector<char> &best_trainer() const {
return best_trainer_;
}
// Returns the error that was just calculated by PrepareForBackward.
double NewSingleError(ErrorTypes type) const {
return error_buffers_[type][training_iteration() % kRollingBufferSize_];
}
// Returns the error that was just calculated by TrainOnLine. Since
// TrainOnLine rolls the error buffers, this is one further back than
// NewSingleError.
double LastSingleError(ErrorTypes type) const {
return error_buffers_[type]
[(training_iteration() + kRollingBufferSize_ - 1) %
kRollingBufferSize_];
}
const DocumentCache &training_data() const {
return training_data_;
}
DocumentCache *mutable_training_data() {
return &training_data_;
}
// If the training sample is usable, grid searches for the optimal
// dict_ratio/cert_offset, and returns the results in a string of space-
// separated triplets of ratio,offset=worderr.
Trainability GridSearchDictParams(
const ImageData *trainingdata, int iteration, double min_dict_ratio,
double dict_ratio_step, double max_dict_ratio, double min_cert_offset,
double cert_offset_step, double max_cert_offset, std::string &results);
// Provides output on the distribution of weight values.
void DebugNetwork();
// Loads a set of lstmf files that were created using the lstm.train config to
// tesseract into memory ready for training. Returns false if nothing was
// loaded.
bool LoadAllTrainingData(const std::vector<std::string> &filenames,
CachingStrategy cache_strategy,
bool randomly_rotate);
// Keeps track of best and locally worst error rate, using internally computed
// values. See MaintainCheckpointsSpecific for more detail.
bool MaintainCheckpoints(const TestCallback &tester, std::stringstream &log_msg);
// Keeps track of best and locally worst error_rate (whatever it is) and
// launches tests using rec_model, when a new min or max is reached.
// Writes checkpoints using train_model at appropriate times and builds and
// returns a log message to indicate progress. Returns false if nothing
// interesting happened.
bool MaintainCheckpointsSpecific(int iteration,
const std::vector<char> *train_model,
const std::vector<char> *rec_model,
TestCallback tester, std::stringstream &log_msg);
// Builds a progress message with current error rates.
void PrepareLogMsg(std::stringstream &log_msg) const;
// Appends <intro_str> iteration learning_iteration()/training_iteration()/
// sample_iteration() to the log_msg.
void LogIterations(const char *intro_str, std::stringstream &log_msg) const;
// TODO(rays) Add curriculum learning.
// Returns true and increments the training_stage_ if the error rate has just
// passed through the given threshold for the first time.
bool TransitionTrainingStage(float error_threshold);
// Returns the current training stage.
int CurrentTrainingStage() const {
return training_stage_;
}
// Writes to the given file. Returns false in case of error.
bool Serialize(SerializeAmount serialize_amount, const TessdataManager *mgr,
TFile *fp) const;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(const TessdataManager *mgr, TFile *fp);
// De-serializes the saved best_trainer_ into sub_trainer_, and adjusts the
// learning rates (by scaling reduction, or layer specific, according to
// NF_LAYER_SPECIFIC_LR).
void StartSubtrainer(std::stringstream &log_msg);
// While the sub_trainer_ is behind the current training iteration and its
// training error is at least kSubTrainerMarginFraction better than the
// current training error, trains the sub_trainer_, and returns STR_UPDATED if
// it did anything. If it catches up, and has a better error rate than the
// current best, as well as a margin over the current error rate, then the
// trainer in *this is replaced with sub_trainer_, and STR_REPLACED is
// returned. STR_NONE is returned if the subtrainer wasn't good enough to
// receive any training iterations.
SubTrainerResult UpdateSubtrainer(std::stringstream &log_msg);
// Reduces network learning rates, either for everything, or for layers
// independently, according to NF_LAYER_SPECIFIC_LR.
void ReduceLearningRates(LSTMTrainer *samples_trainer, std::stringstream &log_msg);
// Considers reducing the learning rate independently for each layer down by
// factor(<1), or leaving it the same, by double-training the given number of
// samples and minimizing the amount of changing of sign of weight updates.
// Even if it looks like all weights should remain the same, an adjustment
// will be made to guarantee a different result when reverting to an old best.
// Returns the number of layer learning rates that were reduced.
int ReduceLayerLearningRates(TFloat factor, int num_samples,
LSTMTrainer *samples_trainer);
// Converts the string to integer class labels, with appropriate null_char_s
// in between if not in SimpleTextOutput mode. Returns false on failure.
bool EncodeString(const std::string &str, std::vector<int> *labels) const {
return EncodeString(str, GetUnicharset(),
IsRecoding() ? &recoder_ : nullptr, SimpleTextOutput(),
null_char_, labels);
}
// Static version operates on supplied unicharset, encoder, simple_text.
static bool EncodeString(const std::string &str, const UNICHARSET &unicharset,
const UnicharCompress *recoder, bool simple_text,
int null_char, std::vector<int> *labels);
// Performs forward-backward on the given trainingdata.
// Returns the sample that was used or nullptr if the next sample was deemed
// unusable. samples_trainer could be this or an alternative trainer that
// holds the training samples.
const ImageData *TrainOnLine(LSTMTrainer *samples_trainer, bool batch) {
int sample_index = sample_iteration();
const ImageData *image =
samples_trainer->training_data_.GetPageBySerial(sample_index);
if (image != nullptr) {
Trainability trainable = TrainOnLine(image, batch);
if (trainable == UNENCODABLE || trainable == NOT_BOXED) {
return nullptr; // Sample was unusable.
}
} else {
++sample_iteration_;
}
return image;
}
Trainability TrainOnLine(const ImageData *trainingdata, bool batch);
// Prepares the ground truth, runs forward, and prepares the targets.
// Returns a Trainability enum to indicate the suitability of the sample.
Trainability PrepareForBackward(const ImageData *trainingdata,
NetworkIO *fwd_outputs, NetworkIO *targets);
// Writes the trainer to memory, so that the current training state can be
// restored. *this must always be the master trainer that retains the only
// copy of the training data and language model. trainer is the model that is
// actually serialized.
bool SaveTrainingDump(SerializeAmount serialize_amount,
const LSTMTrainer &trainer,
std::vector<char> *data) const;
// Reads previously saved trainer from memory. *this must always be the
// master trainer that retains the only copy of the training data and
// language model. trainer is the model that is restored.
bool ReadTrainingDump(const std::vector<char> &data,
LSTMTrainer &trainer) const {
if (data.empty()) {
return false;
}
return ReadSizedTrainingDump(&data[0], data.size(), trainer);
}
bool ReadSizedTrainingDump(const char *data, int size,
LSTMTrainer &trainer) const {
return trainer.ReadLocalTrainingDump(&mgr_, data, size);
}
// Restores the model to *this.
bool ReadLocalTrainingDump(const TessdataManager *mgr, const char *data,
int size);
// Sets up the data for MaintainCheckpoints from a light ReadTrainingDump.
void SetupCheckpointInfo();
// Writes the full recognition traineddata to the given filename.
bool SaveTraineddata(const char *filename);
// Writes the recognizer to memory, so that it can be used for testing later.
void SaveRecognitionDump(std::vector<char> *data) const;
// Returns a suitable filename for a training dump, based on the model_base_,
// the iteration and the error rates.
std::string DumpFilename() const;
// Fills the whole error buffer of the given type with the given value.
void FillErrorBuffer(double new_error, ErrorTypes type);
// Helper generates a map from each current recoder_ code (ie softmax index)
// to the corresponding old_recoder code, or -1 if there isn't one.
std::vector<int> MapRecoder(const UNICHARSET &old_chset,
const UnicharCompress &old_recoder) const;
protected:
// Private version of InitCharSet above finishes the job after initializing
// the mgr_ data member.
void InitCharSet();
// Helper computes and sets the null_char_.
void SetNullChar();
// Factored sub-constructor sets up reasonable default values.
void EmptyConstructor();
// Outputs the string and periodically displays the given network inputs
// as an image in the given window, and the corresponding labels at the
// corresponding x_starts.
// Returns false if the truth string is empty.
bool DebugLSTMTraining(const NetworkIO &inputs, const ImageData &trainingdata,
const NetworkIO &fwd_outputs,
const std::vector<int> &truth_labels,
const NetworkIO &outputs);
// Displays the network targets as line a line graph.
void DisplayTargets(const NetworkIO &targets, const char *window_name,
ScrollView **window);
// Builds a no-compromises target where the first positions should be the
// truth labels and the rest is padded with the null_char_.
bool ComputeTextTargets(const NetworkIO &outputs,
const std::vector<int> &truth_labels,
NetworkIO *targets);
// Builds a target using standard CTC. truth_labels should be pre-padded with
// nulls wherever desired. They don't have to be between all labels.
// outputs is input-output, as it gets clipped to minimum probability.
bool ComputeCTCTargets(const std::vector<int> &truth_labels,
NetworkIO *outputs, NetworkIO *targets);
// Computes network errors, and stores the results in the rolling buffers,
// along with the supplied text_error.
// Returns the delta error of the current sample (not running average.)
double ComputeErrorRates(const NetworkIO &deltas, double char_error,
double word_error);
// Computes the network activation RMS error rate.
double ComputeRMSError(const NetworkIO &deltas);
// Computes network activation winner error rate. (Number of values that are
// in error by >= 0.5 divided by number of time-steps.) More closely related
// to final character error than RMS, but still directly calculable from
// just the deltas. Because of the binary nature of the targets, zero winner
// error is a sufficient but not necessary condition for zero char error.
double ComputeWinnerError(const NetworkIO &deltas);
// Computes a very simple bag of chars char error rate.
double ComputeCharError(const std::vector<int> &truth_str,
const std::vector<int> &ocr_str);
// Computes a very simple bag of words word recall error rate.
// NOTE that this is destructive on both input strings.
double ComputeWordError(std::string *truth_str, std::string *ocr_str);
// Updates the error buffer and corresponding mean of the given type with
// the new_error.
void UpdateErrorBuffer(double new_error, ErrorTypes type);
// Rolls error buffers and reports the current means.
void RollErrorBuffers();
// Given that error_rate is either a new min or max, updates the best/worst
// error rates, and record of progress.
std::string UpdateErrorGraph(int iteration, double error_rate,
const std::vector<char> &model_data,
const TestCallback &tester);
protected:
#ifndef GRAPHICS_DISABLED
// Alignment display window.
ScrollView *align_win_;
// CTC target display window.
ScrollView *target_win_;
// CTC output display window.
ScrollView *ctc_win_;
// Reconstructed image window.
ScrollView *recon_win_;
#endif
// How often to display a debug image.
int debug_interval_;
// Iteration at which the last checkpoint was dumped.
int checkpoint_iteration_;
// Basename of files to save best models to.
std::string model_base_;
// Checkpoint filename.
std::string checkpoint_name_;
// Training data.
bool randomly_rotate_;
DocumentCache training_data_;
// Name to use when saving best_trainer_.
std::string best_model_name_;
// Number of available training stages.
int num_training_stages_;
// ===Serialized data to ensure that a restart produces the same results.===
// These members are only serialized when serialize_amount != LIGHT.
// Best error rate so far.
double best_error_rate_;
// Snapshot of all error rates at best_iteration_.
double best_error_rates_[ET_COUNT];
// Iteration of best_error_rate_.
int best_iteration_;
// Worst error rate since best_error_rate_.
double worst_error_rate_;
// Snapshot of all error rates at worst_iteration_.
double worst_error_rates_[ET_COUNT];
// Iteration of worst_error_rate_.
int worst_iteration_;
// Iteration at which the process will be thought stalled.
int stall_iteration_;
// Saved recognition models for computing test error for graph points.
std::vector<char> best_model_data_;
std::vector<char> worst_model_data_;
// Saved trainer for reverting back to last known best.
std::vector<char> best_trainer_;
// A subsidiary trainer running with a different learning rate until either
// *this or sub_trainer_ hits a new best.
std::unique_ptr<LSTMTrainer> sub_trainer_;
// Error rate at which last best model was dumped.
float error_rate_of_last_saved_best_;
// Current stage of training.
int training_stage_;
// History of best error rate against iteration. Used for computing the
// number of steps to each 2% improvement.
std::vector<double> best_error_history_;
std::vector<int32_t> best_error_iterations_;
// Number of iterations since the best_error_rate_ was 2% more than it is now.
int32_t improvement_steps_;
// Number of iterations that yielded a non-zero delta error and thus provided
// significant learning. learning_iteration_ <= training_iteration_.
// learning_iteration_ is used to measure rate of learning progress.
int learning_iteration_;
// Saved value of sample_iteration_ before looking for the next sample.
int prev_sample_iteration_;
// How often to include a PERFECT training sample in backprop.
// A PERFECT training sample is used if the current
// training_iteration_ > last_perfect_training_iteration_ + perfect_delay_,
// so with perfect_delay_ == 0, all samples are used, and with
// perfect_delay_ == 4, at most 1 in 5 samples will be perfect.
int perfect_delay_;
// Value of training_iteration_ at which the last PERFECT training sample
// was used in back prop.
int last_perfect_training_iteration_;
// Rolling buffers storing recent training errors are indexed by
// training_iteration % kRollingBufferSize_.
static const int kRollingBufferSize_ = 1000;
std::vector<double> error_buffers_[ET_COUNT];
// Rounded mean percent trailing training errors in the buffers.
double error_rates_[ET_COUNT]; // RMS training error.
// Traineddata file with optional dawgs + UNICHARSET and recoder.
TessdataManager mgr_;
};
} // namespace tesseract.
#endif // TESSERACT_LSTM_LSTMTRAINER_H_
|
2301_81045437/tesseract
|
src/training/unicharset/lstmtrainer.h
|
C++
|
apache-2.0
| 21,977
|
/**********************************************************************
* File: normstrngs.cpp
* Description: Utilities to normalize and manipulate UTF-32 and
* UTF-8 strings.
* Author: Ranjith Unnikrishnan
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "normstrngs.h"
#include <string>
#include <unordered_map>
#include <vector>
#include <tesseract/unichar.h>
#include "errcode.h"
#include "icuerrorcode.h"
#include "unicode/normalizer2.h" // From libicu
#include "unicode/translit.h" // From libicu
#include "unicode/uchar.h" // From libicu
#include "unicode/unorm2.h" // From libicu
#include "unicode/uscript.h" // From libicu
namespace tesseract {
static bool is_hyphen_punc(const char32 ch) {
static const char32 kHyphenPuncUnicodes[] = {
'-',
0x2010, // hyphen
0x2011, // non-breaking hyphen
0x2012, // figure dash
0x2013, // en dash
0x2014, // em dash
0x2015, // horizontal bar
// how about 0x2043 hyphen bullet?
// how about 0x2500 box drawings light horizontal?
0x207b, // superscript minus
0x208b, // subscript minus
0x2212, // minus sign
0xfe58, // small em dash
0xfe63, // small hyphen-minus
0xff0d, // fullwidth hyphen-minus
0x2e17 // double oblique hyphen (Fraktur)
};
for (int kHyphenPuncUnicode : kHyphenPuncUnicodes) {
if (kHyphenPuncUnicode == ch) {
return true;
}
}
return false;
}
static bool is_single_quote(const char32 ch) {
static const char32 kSingleQuoteUnicodes[] = {
'\'', '`',
0x2018, // left single quotation mark (English, others)
0x2019, // right single quotation mark (Danish, Finnish, Swedish, Norw.)
// We may have to introduce a comma set with 0x201a
0x201A, // single low-9 quotation mark (German)
0x201B, // single high-reversed-9 quotation mark (PropList.txt)
0x2032, // prime
0x300C, // left corner bracket (East Asian languages)
0xFF07 // fullwidth apostrophe
};
for (int kSingleQuoteUnicode : kSingleQuoteUnicodes) {
if (kSingleQuoteUnicode == ch) {
return true;
}
}
return false;
}
static bool is_double_quote(const char32 ch) {
static const char32 kDoubleQuoteUnicodes[] = {
'"',
0x201C, // left double quotation mark (English, others)
0x201D, // right double quotation mark (Danish, Finnish, Swedish, Norw.)
0x201F, // double high-reversed-9 quotation mark (PropList.txt)
0x2033, // double prime
0x201E, // double low-9 quotation mark (German)
0x301D, // reversed double prime quotation mark (East Asian langs,
// horiz.)
0x301E, // close double prime (East Asian languages written horizontally)
0xFF02 // fullwidth quotation mark
};
for (int kDoubleQuoteUnicode : kDoubleQuoteUnicodes) {
if (kDoubleQuoteUnicode == ch) {
return true;
}
}
return false;
}
// Helper runs a standard unicode normalization, optional OCR normalization,
// and leaves the result as char32 for subsequent processing.
static void NormalizeUTF8ToUTF32(UnicodeNormMode u_mode, OCRNorm ocr_normalize, const char *str8,
std::vector<char32> *normed32) {
// Convert to ICU string for unicode normalization.
icu::UnicodeString uch_str(str8, "UTF-8");
IcuErrorCode error_code;
// Convert the enum to the new weird icu representation.
const char *norm_type =
u_mode == UnicodeNormMode::kNFKD || u_mode == UnicodeNormMode::kNFKC ? "nfkc" : "nfc";
UNormalization2Mode compose = u_mode == UnicodeNormMode::kNFC || u_mode == UnicodeNormMode::kNFKC
? UNORM2_COMPOSE
: UNORM2_DECOMPOSE;
// Pointer to singleton does not require deletion.
const icu::Normalizer2 *normalizer =
icu::Normalizer2::getInstance(nullptr, norm_type, compose, error_code);
error_code.assertSuccess();
error_code.reset();
icu::UnicodeString norm_str = normalizer->normalize(uch_str, error_code);
error_code.assertSuccess();
// Convert to char32 for output. OCR normalization if required.
normed32->reserve(norm_str.length()); // An approximation.
for (int offset = 0; offset < norm_str.length(); offset = norm_str.moveIndex32(offset, 1)) {
char32 ch = norm_str.char32At(offset);
// Skip all ZWS, RTL and LTR marks.
if (Validator::IsZeroWidthMark(ch)) {
continue;
}
if (ocr_normalize == OCRNorm::kNormalize) {
ch = OCRNormalize(ch);
}
normed32->push_back(ch);
}
}
// Helper removes joiners from strings that contain no letters.
static void StripJoiners(std::vector<char32> *str32) {
for (char32 ch : *str32) {
if (u_isalpha(ch)) {
return;
}
}
int len = 0;
for (char32 ch : *str32) {
if (ch != Validator::kZeroWidthJoiner && ch != Validator::kZeroWidthNonJoiner) {
(*str32)[len++] = ch;
}
}
str32->resize(len);
}
// Normalizes a UTF8 string according to the given modes. Returns true on
// success. If false is returned, some failure or invalidity was present, and
// the result string is produced on a "best effort" basis.
bool NormalizeUTF8String(UnicodeNormMode u_mode, OCRNorm ocr_normalize,
GraphemeNorm grapheme_normalize, const char *str8,
std::string *normalized) {
std::vector<char32> normed32;
NormalizeUTF8ToUTF32(u_mode, ocr_normalize, str8, &normed32);
if (grapheme_normalize == GraphemeNorm::kNormalize) {
StripJoiners(&normed32);
std::vector<std::vector<char32>> graphemes;
bool success = Validator::ValidateCleanAndSegment(GraphemeNormMode::kSingleString, false,
normed32, &graphemes);
if (graphemes.empty() || graphemes[0].empty()) {
success = false;
} else if (normalized != nullptr) {
*normalized = UNICHAR::UTF32ToUTF8(graphemes[0]);
}
return success;
}
if (normalized != nullptr) {
*normalized = UNICHAR::UTF32ToUTF8(normed32);
}
return true;
}
// Normalizes a UTF8 string according to the given modes and splits into
// graphemes according to g_mode. Returns true on success. If false is returned,
// some failure or invalidity was present, and the result string is produced on
// a "best effort" basis.
bool NormalizeCleanAndSegmentUTF8(UnicodeNormMode u_mode, OCRNorm ocr_normalize,
GraphemeNormMode g_mode, bool report_errors, const char *str8,
std::vector<std::string> *graphemes) {
std::vector<char32> normed32;
NormalizeUTF8ToUTF32(u_mode, ocr_normalize, str8, &normed32);
StripJoiners(&normed32);
std::vector<std::vector<char32>> graphemes32;
bool success = Validator::ValidateCleanAndSegment(g_mode, report_errors, normed32, &graphemes32);
if (g_mode != GraphemeNormMode::kSingleString && success) {
// If we modified the string to clean it up, the segmentation may not be
// correct, so check for changes and do it again.
std::vector<char32> cleaned32;
for (const auto &g : graphemes32) {
cleaned32.insert(cleaned32.end(), g.begin(), g.end());
}
if (cleaned32 != normed32) {
graphemes32.clear();
success = Validator::ValidateCleanAndSegment(g_mode, report_errors, cleaned32, &graphemes32);
}
}
graphemes->clear();
graphemes->reserve(graphemes32.size());
for (const auto &grapheme : graphemes32) {
graphemes->push_back(UNICHAR::UTF32ToUTF8(grapheme));
}
return success;
}
// Apply just the OCR-specific normalizations and return the normalized char.
char32 OCRNormalize(char32 ch) {
if (is_hyphen_punc(ch)) {
return '-';
} else if (is_single_quote(ch)) {
return '\'';
} else if (is_double_quote(ch)) {
return '"';
}
return ch;
}
bool IsOCREquivalent(char32 ch1, char32 ch2) {
return OCRNormalize(ch1) == OCRNormalize(ch2);
}
bool IsValidCodepoint(const char32 ch) {
// In the range [0, 0xD800) or [0xE000, 0x10FFFF]
return (static_cast<uint32_t>(ch) < 0xD800) || (ch >= 0xE000 && ch <= 0x10FFFF);
}
bool IsWhitespace(const char32 ch) {
ASSERT_HOST_MSG(IsValidCodepoint(ch), "Invalid Unicode codepoint: 0x%x\n", ch);
return u_isUWhiteSpace(static_cast<UChar32>(ch));
}
bool IsUTF8Whitespace(const char *text) {
return SpanUTF8Whitespace(text) == strlen(text);
}
unsigned int SpanUTF8Whitespace(const char *text) {
int n_white = 0;
for (UNICHAR::const_iterator it = UNICHAR::begin(text, strlen(text));
it != UNICHAR::end(text, strlen(text)); ++it) {
if (!IsWhitespace(*it)) {
break;
}
n_white += it.utf8_len();
}
return n_white;
}
unsigned int SpanUTF8NotWhitespace(const char *text) {
int n_notwhite = 0;
for (UNICHAR::const_iterator it = UNICHAR::begin(text, strlen(text));
it != UNICHAR::end(text, strlen(text)); ++it) {
if (IsWhitespace(*it)) {
break;
}
n_notwhite += it.utf8_len();
}
return n_notwhite;
}
bool IsInterchangeValid(const char32 ch) {
return IsValidCodepoint(ch) && !(ch >= 0xFDD0 && ch <= 0xFDEF) && // Noncharacters.
!(ch >= 0xFFFE && ch <= 0xFFFF) && !(ch >= 0x1FFFE && ch <= 0x1FFFF) &&
!(ch >= 0x2FFFE && ch <= 0x2FFFF) && !(ch >= 0x3FFFE && ch <= 0x3FFFF) &&
!(ch >= 0x4FFFE && ch <= 0x4FFFF) && !(ch >= 0x5FFFE && ch <= 0x5FFFF) &&
!(ch >= 0x6FFFE && ch <= 0x6FFFF) && !(ch >= 0x7FFFE && ch <= 0x7FFFF) &&
!(ch >= 0x8FFFE && ch <= 0x8FFFF) && !(ch >= 0x9FFFE && ch <= 0x9FFFF) &&
!(ch >= 0xAFFFE && ch <= 0xAFFFF) && !(ch >= 0xBFFFE && ch <= 0xBFFFF) &&
!(ch >= 0xCFFFE && ch <= 0xCFFFF) && !(ch >= 0xDFFFE && ch <= 0xDFFFF) &&
!(ch >= 0xEFFFE && ch <= 0xEFFFF) && !(ch >= 0xFFFFE && ch <= 0xFFFFF) &&
!(ch >= 0x10FFFE && ch <= 0x10FFFF) &&
(!u_isISOControl(static_cast<UChar32>(ch)) || ch == '\n' || ch == '\f' || ch == '\t' ||
ch == '\r');
}
bool IsInterchangeValid7BitAscii(const char32 ch) {
return IsValidCodepoint(ch) && ch <= 128 &&
(!u_isISOControl(static_cast<UChar32>(ch)) || ch == '\n' || ch == '\f' || ch == '\t' ||
ch == '\r');
}
char32 FullwidthToHalfwidth(const char32 ch) {
// Return unchanged if not in the fullwidth-halfwidth Unicode block.
if (ch < 0xFF00 || ch > 0xFFEF || !IsValidCodepoint(ch)) {
if (ch != 0x3000) {
return ch;
}
}
// Special case for fullwidth left and right "white parentheses".
if (ch == 0xFF5F) {
return 0x2985;
}
if (ch == 0xFF60) {
return 0x2986;
}
// Construct a full-to-half width transliterator.
IcuErrorCode error_code;
icu::UnicodeString uch_str(static_cast<UChar32>(ch));
const icu::Transliterator *fulltohalf =
icu::Transliterator::createInstance("Fullwidth-Halfwidth", UTRANS_FORWARD, error_code);
error_code.assertSuccess();
error_code.reset();
fulltohalf->transliterate(uch_str);
delete fulltohalf;
ASSERT_HOST(uch_str.length() != 0);
return uch_str[0];
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/normstrngs.cpp
|
C++
|
apache-2.0
| 11,677
|
/**********************************************************************
* File: normstrngs.h
* Description: Utilities to normalize and manipulate UTF-32 and
* UTF-8 strings.
* Author: Ranjith Unnikrishnan
* Created: Thu July 4 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_NORMSTRNGS_H_
#define TESSERACT_CCUTIL_NORMSTRNGS_H_
#include "export.h"
#include "validator.h"
#include <string>
#include <vector>
namespace tesseract {
// The standard unicode normalizations.
enum class UnicodeNormMode {
kNFD,
kNFC,
kNFKD,
kNFKC,
};
// To normalize away differences in punctuation that are ambiguous, like
// curly quotes and different widths of dash.
enum class OCRNorm {
kNone,
kNormalize,
};
// To validate and normalize away some subtle differences that can occur in
// Indic scripts, eg ensuring that an explicit virama is always followed by
// a zero-width non-joiner.
enum class GraphemeNorm {
kNone,
kNormalize,
};
// Normalizes a UTF8 string according to the given modes. Returns true on
// success. If false is returned, some failure or invalidity was present, and
// the result string is produced on a "best effort" basis.
TESS_UNICHARSET_TRAINING_API
bool NormalizeUTF8String(UnicodeNormMode u_mode, OCRNorm ocr_normalize,
GraphemeNorm grapheme_normalize, const char *str8,
std::string *normalized);
// Normalizes a UTF8 string according to the given modes and splits into
// graphemes according to g_mode. Returns true on success. If false is returned,
// some failure or invalidity was present, and the result string is produced on
// a "best effort" basis.
TESS_UNICHARSET_TRAINING_API
bool NormalizeCleanAndSegmentUTF8(UnicodeNormMode u_mode, OCRNorm ocr_normalize,
GraphemeNormMode g_mode, bool report_errors, const char *str8,
std::vector<std::string> *graphemes);
// Applies just the OCR-specific normalizations and return the normalized char.
char32 OCRNormalize(char32 ch);
// Returns true if the OCRNormalized ch1 and ch2 are the same.
bool IsOCREquivalent(char32 ch1, char32 ch2);
// Returns true if the value lies in the range of valid unicodes.
bool IsValidCodepoint(const char32 ch);
// Returns true a code point has the White_Space Unicode property.
TESS_UNICHARSET_TRAINING_API
bool IsWhitespace(const char32 ch);
// Returns true if every char in the given (null-terminated) string has the
// White_Space Unicode property.
TESS_UNICHARSET_TRAINING_API
bool IsUTF8Whitespace(const char *text);
// Returns the length of bytes of the prefix of 'text' that have the White_Space
// unicode property.
TESS_UNICHARSET_TRAINING_API
unsigned int SpanUTF8Whitespace(const char *text);
// Returns the length of bytes of the prefix of 'text' that DO NOT have the
// White_Space unicode property.
TESS_UNICHARSET_TRAINING_API
unsigned int SpanUTF8NotWhitespace(const char *text);
// Returns true if the char is interchange valid i.e. no C0 or C1 control codes
// (other than CR LF HT FF) and no non-characters.
TESS_UNICHARSET_TRAINING_API
bool IsInterchangeValid(const char32 ch);
// Same as above but restricted to 7-bit ASCII.
TESS_UNICHARSET_TRAINING_API
bool IsInterchangeValid7BitAscii(const char32 ch);
// Convert a full-width UTF-8 string to half-width.
TESS_UNICHARSET_TRAINING_API
char32 FullwidthToHalfwidth(const char32 ch);
} // namespace tesseract
#endif // TESSERACT_CCUTIL_NORMSTRNGS_H_
|
2301_81045437/tesseract
|
src/training/unicharset/normstrngs.h
|
C++
|
apache-2.0
| 4,143
|
///////////////////////////////////////////////////////////////////////
// File: unicharset_training_utils.cpp
// Description: Training utilities for UNICHARSET.
// Author: Ray Smith
// Created: Fri Oct 17 17:09:01 PDT 2014
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "unicharset_training_utils.h"
#include <cstdlib>
#include <cstring>
#include <string>
#include <vector>
#include <tesseract/unichar.h>
#include "fileio.h"
#include "icuerrorcode.h"
#include "normstrngs.h"
#include "statistc.h"
#include "unicharset.h"
#include "unicode/uchar.h" // from libicu
#include "unicode/uscript.h" // from libicu
namespace tesseract {
// Helper sets the character attribute properties and sets up the script table.
// Does not set tops and bottoms.
void SetupBasicProperties(bool report_errors, bool decompose, UNICHARSET *unicharset) {
for (size_t unichar_id = 0; unichar_id < unicharset->size(); ++unichar_id) {
// Convert any custom ligatures.
const char *unichar_str = unicharset->id_to_unichar(unichar_id);
for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != nullptr; ++i) {
if (!strcmp(UNICHARSET::kCustomLigatures[i][1], unichar_str)) {
unichar_str = UNICHARSET::kCustomLigatures[i][0];
break;
}
}
// Convert the unichar to UTF32 representation
std::vector<char32> uni_vector = UNICHAR::UTF8ToUTF32(unichar_str);
// Assume that if the property is true for any character in the string,
// then it holds for the whole "character".
bool unichar_isalpha = false;
bool unichar_islower = false;
bool unichar_isupper = false;
bool unichar_isdigit = false;
bool unichar_ispunct = false;
for (char32 u_ch : uni_vector) {
if (u_isalpha(u_ch)) {
unichar_isalpha = true;
}
if (u_islower(u_ch)) {
unichar_islower = true;
}
if (u_isupper(u_ch)) {
unichar_isupper = true;
}
if (u_isdigit(u_ch)) {
unichar_isdigit = true;
}
if (u_ispunct(u_ch)) {
unichar_ispunct = true;
}
}
unicharset->set_isalpha(unichar_id, unichar_isalpha);
unicharset->set_islower(unichar_id, unichar_islower);
unicharset->set_isupper(unichar_id, unichar_isupper);
unicharset->set_isdigit(unichar_id, unichar_isdigit);
unicharset->set_ispunctuation(unichar_id, unichar_ispunct);
tesseract::IcuErrorCode err;
unicharset->set_script(unichar_id, uscript_getName(uscript_getScript(uni_vector[0], err)));
const int num_code_points = uni_vector.size();
// Obtain the lower/upper case if needed and record it in the properties.
unicharset->set_other_case(unichar_id, unichar_id);
if (unichar_islower || unichar_isupper) {
std::vector<char32> other_case(num_code_points, 0);
for (int i = 0; i < num_code_points; ++i) {
// TODO(daria): Ideally u_strToLower()/ustrToUpper() should be used.
// However since they deal with UChars (so need a conversion function
// from char32 or UTF8string) and require a meaningful locale string,
// for now u_tolower()/u_toupper() are used.
other_case[i] = unichar_islower ? u_toupper(uni_vector[i]) : u_tolower(uni_vector[i]);
}
std::string other_case_uch = UNICHAR::UTF32ToUTF8(other_case);
UNICHAR_ID other_case_id = unicharset->unichar_to_id(other_case_uch.c_str());
if (other_case_id != INVALID_UNICHAR_ID) {
unicharset->set_other_case(unichar_id, other_case_id);
} else if (unichar_id >= SPECIAL_UNICHAR_CODES_COUNT && report_errors) {
tprintf("Other case %s of %s is not in unicharset\n", other_case_uch.c_str(), unichar_str);
}
}
// Set RTL property and obtain mirror unichar ID from ICU.
std::vector<char32> mirrors(num_code_points, 0);
for (int i = 0; i < num_code_points; ++i) {
mirrors[i] = u_charMirror(uni_vector[i]);
if (i == 0) { // set directionality to that of the 1st code point
unicharset->set_direction(
unichar_id, static_cast<UNICHARSET::Direction>(u_charDirection(uni_vector[i])));
}
}
std::string mirror_uch = UNICHAR::UTF32ToUTF8(mirrors);
UNICHAR_ID mirror_uch_id = unicharset->unichar_to_id(mirror_uch.c_str());
if (mirror_uch_id != INVALID_UNICHAR_ID) {
unicharset->set_mirror(unichar_id, mirror_uch_id);
} else if (report_errors) {
tprintf("Mirror %s of %s is not in unicharset\n", mirror_uch.c_str(), unichar_str);
}
// Record normalized version of this unichar.
std::string normed_str;
if (unichar_id != 0 &&
tesseract::NormalizeUTF8String(
decompose ? tesseract::UnicodeNormMode::kNFD : tesseract::UnicodeNormMode::kNFC,
tesseract::OCRNorm::kNormalize, tesseract::GraphemeNorm::kNone, unichar_str,
&normed_str) &&
!normed_str.empty()) {
unicharset->set_normed(unichar_id, normed_str.c_str());
} else {
unicharset->set_normed(unichar_id, unichar_str);
}
ASSERT_HOST(unicharset->get_other_case(unichar_id) < unicharset->size());
}
unicharset->post_load_setup();
}
// Helper sets the properties from universal script unicharsets, if found.
void SetScriptProperties(const std::string &script_dir, UNICHARSET *unicharset) {
for (int s = 0; s < unicharset->get_script_table_size(); ++s) {
// Load the unicharset for the script if available.
std::string filename =
script_dir + "/" + unicharset->get_script_from_script_id(s) + ".unicharset";
UNICHARSET script_set;
if (script_set.load_from_file(filename.c_str())) {
unicharset->SetPropertiesFromOther(script_set);
} else if (s != unicharset->common_sid() && s != unicharset->null_sid()) {
tprintf("Failed to load script unicharset from:%s\n", filename.c_str());
}
}
for (int c = SPECIAL_UNICHAR_CODES_COUNT; c < unicharset->size(); ++c) {
if (unicharset->PropertiesIncomplete(c)) {
tprintf("Warning: properties incomplete for index %d = %s\n", c,
unicharset->id_to_unichar(c));
}
}
}
// Helper gets the combined x-heights string.
std::string GetXheightString(const std::string &script_dir, const UNICHARSET &unicharset) {
std::string xheights_str;
for (int s = 0; s < unicharset.get_script_table_size(); ++s) {
// Load the xheights for the script if available.
std::string filename = script_dir + "/" + unicharset.get_script_from_script_id(s) + ".xheights";
std::string script_heights;
if (File::ReadFileToString(filename, &script_heights)) {
xheights_str += script_heights;
}
}
return xheights_str;
}
// Helper to set the properties for an input unicharset file, writes to the
// output file. If an appropriate script unicharset can be found in the
// script_dir directory, then the tops and bottoms are expanded using the
// script unicharset.
// If non-empty, xheight data for the fonts are written to the xheights_file.
void SetPropertiesForInputFile(const std::string &script_dir,
const std::string &input_unicharset_file,
const std::string &output_unicharset_file,
const std::string &output_xheights_file) {
UNICHARSET unicharset;
// Load the input unicharset
unicharset.load_from_file(input_unicharset_file.c_str());
tprintf("Loaded unicharset of size %zu from file %s\n", unicharset.size(),
input_unicharset_file.c_str());
// Set unichar properties
tprintf("Setting unichar properties\n");
SetupBasicProperties(true, false, &unicharset);
tprintf("Setting script properties\n");
SetScriptProperties(script_dir, &unicharset);
if (!output_xheights_file.empty()) {
std::string xheights_str = GetXheightString(script_dir, unicharset);
File::WriteStringToFileOrDie(xheights_str, output_xheights_file);
}
// Write the output unicharset
tprintf("Writing unicharset to file %s\n", output_unicharset_file.c_str());
unicharset.save_to_file(output_unicharset_file.c_str());
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/unicharset_training_utils.cpp
|
C++
|
apache-2.0
| 8,688
|
///////////////////////////////////////////////////////////////////////
// File: unicharset_training_utils.h
// Description: Training utilities for UNICHARSET.
// Author: Ray Smith
//
// (C) Copyright 2014, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TRAINING_UNICHARSET_TRAINING_UTILS_H_
#define TESSERACT_TRAINING_UNICHARSET_TRAINING_UTILS_H_
#include "export.h"
#include <tesseract/export.h>
#include <string>
namespace tesseract {
class UNICHARSET;
// Helper sets the character attribute properties and sets up the script table.
// Does not set tops and bottoms.
TESS_UNICHARSET_TRAINING_API
void SetupBasicProperties(bool report_errors, bool decompose, UNICHARSET *unicharset);
// Default behavior is to compose, until it is proven that decomposed benefits
// at least one language.
inline void SetupBasicProperties(bool report_errors, UNICHARSET *unicharset) {
SetupBasicProperties(report_errors, false, unicharset);
}
// Helper sets the properties from universal script unicharsets, if found.
TESS_UNICHARSET_TRAINING_API
void SetScriptProperties(const std::string &script_dir, UNICHARSET *unicharset);
// Helper gets the combined x-heights string.
std::string GetXheightString(const std::string &script_dir, const UNICHARSET &unicharset);
// Helper to set the properties for an input unicharset file, writes to the
// output file. If an appropriate script unicharset can be found in the
// script_dir directory, then the tops and bottoms are expanded using the
// script unicharset.
// If non-empty, xheight data for the fonts are written to the xheights_file.
TESS_UNICHARSET_TRAINING_API
void SetPropertiesForInputFile(const std::string &script_dir,
const std::string &input_unicharset_file,
const std::string &output_unicharset_file,
const std::string &output_xheights_file);
} // namespace tesseract.
#endif // TESSERACT_TRAINING_UNICHARSET_TRAINING_UTILS_H_
|
2301_81045437/tesseract
|
src/training/unicharset/unicharset_training_utils.h
|
C++
|
apache-2.0
| 2,594
|
#include "validate_grapheme.h"
#include "tprintf.h"
#include "unicode/uchar.h" // From libicu
namespace tesseract {
bool ValidateGrapheme::ConsumeGraphemeIfValid() {
const unsigned num_codes = codes_.size();
char32 prev_prev_ch = ' ';
char32 prev_ch = ' ';
CharClass prev_cc = CharClass::kWhitespace;
int num_codes_in_grapheme = 0;
while (codes_used_ < num_codes) {
CharClass cc = codes_[codes_used_].first;
char32 ch = codes_[codes_used_].second;
const bool is_combiner = cc == CharClass::kCombiner || cc == CharClass::kVirama;
// TODO: Make this code work well with RTL text.
// See
// https://github.com/tesseract-ocr/tesseract/pull/2266#issuecomment-467114751
#if 0
// Reject easily detected badly formed sequences.
if (prev_cc == CharClass::kWhitespace && is_combiner) {
if (report_errors_) tprintf("Word started with a combiner:0x%x\n", ch);
return false;
}
#endif
if (prev_cc == CharClass::kVirama && cc == CharClass::kVirama) {
if (report_errors_) {
tprintf("Two grapheme links in a row:0x%x 0x%x\n", prev_ch, ch);
}
return false;
}
if (prev_cc != CharClass::kWhitespace && cc != CharClass::kWhitespace &&
IsBadlyFormed(prev_ch, ch)) {
return false;
}
bool prev_is_fwd_combiner = prev_ch == kZeroWidthJoiner || prev_cc == CharClass::kVirama ||
(prev_ch == kZeroWidthNonJoiner &&
(cc == CharClass::kVirama || prev_prev_ch == kZeroWidthJoiner));
if (num_codes_in_grapheme > 0 && !is_combiner && !prev_is_fwd_combiner) {
break;
}
CodeOnlyToOutput();
++num_codes_in_grapheme;
prev_prev_ch = prev_ch;
prev_ch = ch;
prev_cc = cc;
}
if (num_codes_in_grapheme > 0) {
MultiCodePart(num_codes_in_grapheme);
}
return true;
}
Validator::CharClass ValidateGrapheme::UnicodeToCharClass(char32 ch) const {
if (IsVedicAccent(ch)) {
return CharClass::kVedicMark;
}
// The ZeroWidth[Non]Joiner characters are mapped to kCombiner as they
// always combine with the previous character.
if (u_hasBinaryProperty(ch, UCHAR_GRAPHEME_LINK)) {
return CharClass::kVirama;
}
if (u_isUWhiteSpace(ch)) {
return CharClass::kWhitespace;
}
// Workaround for Javanese Aksara's Taling, do not label it as a combiner
if (ch == 0xa9ba) {
return CharClass::kConsonant;
}
int char_type = u_charType(ch);
if (char_type == U_NON_SPACING_MARK || char_type == U_ENCLOSING_MARK ||
char_type == U_COMBINING_SPACING_MARK || ch == kZeroWidthNonJoiner ||
ch == kZeroWidthJoiner) {
return CharClass::kCombiner;
}
return CharClass::kOther;
}
// Helper returns true if the sequence prev_ch,ch is invalid.
bool ValidateGrapheme::IsBadlyFormed(char32 prev_ch, char32 ch) {
// Reject badly formed Indic vowels.
if (IsBadlyFormedIndicVowel(prev_ch, ch)) {
if (report_errors_) {
tprintf("Badly formed Indic vowel sequence:0x%x 0x%x\n", prev_ch, ch);
}
return true;
}
if (IsBadlyFormedThai(prev_ch, ch)) {
if (report_errors_) {
tprintf("Badly formed Thai:0x%x 0x%x\n", prev_ch, ch);
}
return true;
}
return false;
}
// Helper returns true if the sequence prev_ch,ch is an invalid Indic vowel.
// Some vowels in Indic scripts may be analytically decomposed into atomic pairs
// of components that are themselves valid unicode symbols. (See Table 12-1 in
// http://www.unicode.org/versions/Unicode9.0.0/ch12.pdf
// for examples in Devanagari). The Unicode standard discourages specifying
// vowels this way, but they are sometimes encountered in text, probably because
// some editors still permit it. Renderers however dislike such pairs, and so
// this function may be used to detect their occurrence for removal.
// TODO(rays) This function only covers a subset of Indic languages and doesn't
// include all rules. Add rules as appropriate to support other languages or
// find a way to generalize these existing rules that makes use of the
// regularity of the mapping from ISCII to Unicode.
/* static */
bool ValidateGrapheme::IsBadlyFormedIndicVowel(char32 prev_ch, char32 ch) {
return ((prev_ch == 0x905 && (ch == 0x946 || ch == 0x93E)) || (prev_ch == 0x909 && ch == 0x941) ||
(prev_ch == 0x90F && (ch >= 0x945 && ch <= 0x947)) ||
(prev_ch == 0x905 && (ch >= 0x949 && ch <= 0x94C)) ||
(prev_ch == 0x906 && (ch >= 0x949 && ch <= 0x94C)) ||
// Illegal combinations of two dependent Devanagari vowels.
(prev_ch == 0x93E && (ch >= 0x945 && ch <= 0x948)) ||
// Dependent Devanagari vowels following a virama.
(prev_ch == 0x94D && (ch >= 0x93E && ch <= 0x94C)) ||
// Bengali vowels (Table 9-5, pg 313)
(prev_ch == 0x985 && ch == 0x9BE) ||
// Telugu vowels (Table 9-19, pg 331)
(prev_ch == 0xC12 && (ch == 0xC55 || ch == 0xC4C)) ||
// Kannada vowels (Table 9-20, pg 332)
(prev_ch == 0xC92 && ch == 0xCCC));
}
// Helper returns true if ch is a Thai consonant.
static bool IsThaiConsonant(char32 ch) {
return 0xe01 <= ch && ch <= 0xe2e;
}
// Helper returns true is ch is a before-consonant vowel.
static bool IsThaiBeforeConsonantVowel(char32 ch) {
return 0xe40 <= ch && ch <= 0xe44;
}
// Helper returns true if ch is a Thai tone mark.
static bool IsThaiToneMark(char32 ch) {
return 0xe48 <= ch && ch <= 0xe4b;
}
// Helper returns true if ch is a Thai vowel that may be followed by a tone
// mark.
static bool IsThaiTonableVowel(char32 ch) {
return (0xe34 <= ch && ch <= 0xe39) || ch == 0xe31;
}
// Helper returns true if the sequence prev_ch,ch is invalid Thai.
// These rules come from a native Thai speaker, and are not covered by the
// Thai section in the unicode book:
// http://www.unicode.org/versions/Unicode9.0.0/ch16.pdf
// Comments below added by Ray interpreting the code ranges.
/* static */
bool ValidateGrapheme::IsBadlyFormedThai(char32 prev_ch, char32 ch) {
// Tone marks must follow consonants or specific vowels.
if (IsThaiToneMark(ch) && !(IsThaiConsonant(prev_ch) || IsThaiTonableVowel(prev_ch))) {
return true;
}
// Tonable vowels must follow consonants.
if ((IsThaiTonableVowel(ch) || ch == 0xe47) && !IsThaiConsonant(prev_ch)) {
return true;
}
// Thanthakhat must follow consonant or specific vowels.
if (ch == 0xe4c && !(IsThaiConsonant(prev_ch) || prev_ch == 0xe38 || prev_ch == 0xe34)) {
return true;
}
// Nikkhahit must follow a consonant ?or certain markers?.
// TODO(rays) confirm this, but there were so many in the ground truth of the
// validation set that it seems reasonable to assume it is valid.
if (ch == 0xe4d && !(IsThaiConsonant(prev_ch) || prev_ch == 0xe48 || prev_ch == 0xe49)) {
return true;
}
// The vowels e30, e32, e33 can be used more liberally.
if ((ch == 0xe30 || ch == 0xe32 || ch == 0xe33) &&
!(IsThaiConsonant(prev_ch) || IsThaiToneMark(prev_ch)) &&
!(prev_ch == 0xe32 && ch == 0xe30) && !(prev_ch == 0xe4d && ch == 0xe32)) {
return true;
}
// Some vowels come before consonants, and therefore cannot follow things
// that cannot end a syllable.
if (IsThaiBeforeConsonantVowel(ch) &&
(IsThaiBeforeConsonantVowel(prev_ch) || prev_ch == 0xe31 || prev_ch == 0xe37)) {
return true;
}
// Don't allow the standalone vowel U+0e24 to be followed by other vowels.
if ((0xe30 <= ch && ch <= 0xe4D) && prev_ch == 0xe24) {
return true;
}
return false;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/validate_grapheme.cpp
|
C++
|
apache-2.0
| 7,543
|
#ifndef TESSERACT_TRAINING_VALIDATE_GRAPHEME_H_
#define TESSERACT_TRAINING_VALIDATE_GRAPHEME_H_
#include "validator.h"
namespace tesseract {
// Subclass of Validator that validates and segments generic unicode into
// grapheme clusters, including Latin with diacritics.
class ValidateGrapheme : public Validator {
public:
ValidateGrapheme(ViramaScript script, bool report_errors) : Validator(script, report_errors) {}
~ValidateGrapheme() override = default;
protected:
// Consumes the next Grapheme in codes_[codes_used_++...] and copies it to
// parts_ and output_. Returns true if a valid Grapheme was consumed,
// otherwise does not increment codes_used_.
bool ConsumeGraphemeIfValid() override;
// Returns the CharClass corresponding to the given Unicode ch.
CharClass UnicodeToCharClass(char32 ch) const override;
private:
// Helper returns true if the sequence prev_ch,ch is invalid.
bool IsBadlyFormed(char32 prev_ch, char32 ch);
// Helper returns true if the sequence prev_ch,ch is an invalid Indic vowel.
static bool IsBadlyFormedIndicVowel(char32 prev_ch, char32 ch);
// Helper returns true if the sequence prev_ch,ch is invalid Thai.
static bool IsBadlyFormedThai(char32 prev_ch, char32 ch);
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_VALIDATE_GRAPHEME_H_
|
2301_81045437/tesseract
|
src/training/unicharset/validate_grapheme.h
|
C++
|
apache-2.0
| 1,316
|
#include "validate_indic.h"
#include "errcode.h"
#include "tprintf.h"
namespace tesseract {
// Returns whether codes matches the pattern for an Indic Grapheme.
// The ISCII standard http://varamozhi.sourceforge.net/iscii91.pdf
// has a BNF for valid syllables (Graphemes) which is modified slightly
// for Unicode. Notably U+200C and U+200D are used before/after the
// virama/virama to express explicit or soft viramas.
// Also the unicode v.9 Malayalam entry states that CZHC can be used in several
// Indic languages to request traditional ligatures, and CzHC is Malayalam-
// specific for requesting open conjuncts.
//
// + vowel Grapheme: V[D](v)*
// + consonant Grapheme: (C[N](H|HZ|Hz|ZH)?)*C[N](H|Hz)?[M[P]][D](v)*
bool ValidateIndic::ConsumeGraphemeIfValid() {
switch (codes_[codes_used_].first) {
case CharClass::kConsonant:
return ConsumeConsonantHeadIfValid() && ConsumeConsonantTailIfValid();
case CharClass::kVowel:
case CharClass::kVedicMark:
return ConsumeVowelIfValid();
case CharClass::kZeroWidthJoiner:
case CharClass::kZeroWidthNonJoiner:
// Apart from within an aksara, joiners are silently dropped.
if (report_errors_) {
tprintf("Dropping isolated joiner: 0x%x\n", codes_[codes_used_].second);
}
++codes_used_;
return true;
case CharClass::kOther:
UseMultiCode(1);
return true;
default:
if (report_errors_) {
tprintf("Invalid start of grapheme sequence:%c=0x%x\n",
static_cast<int>(codes_[codes_used_].first),
codes_[codes_used_].second);
}
return false;
}
}
Validator::CharClass ValidateIndic::UnicodeToCharClass(char32 ch) const {
if (IsVedicAccent(ch)) {
return CharClass::kVedicMark;
}
if (ch == kZeroWidthNonJoiner) {
return CharClass::kZeroWidthNonJoiner;
}
if (ch == kZeroWidthJoiner) {
return CharClass::kZeroWidthJoiner;
}
// Offset from the start of the relevant unicode code block aka code page.
int base = static_cast<char32>(script_);
int off = ch - base;
// Anything in another code block is other.
if (off < 0 || off >= kIndicCodePageSize) {
return CharClass::kOther;
}
// Exception for Tamil. The aytham character is considered a letter.
if (script_ == ViramaScript::kTamil && off == 0x03) {
return CharClass::kVowel;
}
if (off < 0x4) {
return CharClass::kVowelModifier;
}
if (script_ == ViramaScript::kSinhala) {
// Sinhala is an exception.
if (off <= 0x19) {
return CharClass::kVowel;
}
if (off <= 0x49) {
return CharClass::kConsonant;
}
if (off == 0x4a) {
return CharClass::kVirama;
}
if (off <= 0x5f) {
return CharClass::kMatra;
}
} else {
if (off <= 0x14 || off == 0x50) {
return CharClass::kVowel;
}
if (off <= 0x3b || (0x58 <= off && off <= 0x5f)) {
return CharClass::kConsonant;
}
// Sinhala doesn't have Nukta or Avagraha.
if (off == 0x3c) {
return CharClass::kNukta;
}
if (off == 0x3d) {
return CharClass::kVowel; // avagraha
}
if (off <= 0x4c || (0x51 <= off && off <= 0x54)) {
return CharClass::kMatra;
}
if (0x55 <= off && off <= 0x57) {
return CharClass::kMatraPiece;
}
if (off == 0x4d) {
return CharClass::kVirama;
}
}
if (off == 0x60 || off == 0x61) {
return CharClass::kVowel;
}
if (off == 0x62 || off == 0x63) {
return CharClass::kMatra;
}
// Danda and digits up to 6f are OK as other.
// 70-7f are script-specific.
// 0BF0-0BF2 are Tamil numbers 10, 100 and 1000; treat as other.
if (script_ == ViramaScript::kTamil && (0x70 <= off && off <= 0x72)) {
return CharClass::kOther;
}
// 0BF3-0BFA are other Tamil symbols.
if (script_ == ViramaScript::kTamil && (0x73 <= off && off <= 0x7A)) {
return CharClass::kOther;
}
if (script_ == ViramaScript::kBengali && (off == 0x70 || off == 0x71)) {
return CharClass::kConsonant;
}
if (script_ == ViramaScript::kGurmukhi && (off == 0x72 || off == 0x73)) {
return CharClass::kConsonant;
}
if (script_ == ViramaScript::kSinhala && off == 0x70) {
return CharClass::kConsonant;
}
if (script_ == ViramaScript::kDevanagari && off == 0x70) {
return CharClass::kOther;
}
if (0x70 <= off && off <= 0x73) {
return CharClass::kVowelModifier;
}
// Non Indic, Digits, Measures, danda, etc.
return CharClass::kOther;
}
// Helper consumes/copies a virama and any associated post-virama joiners.
// A linking virama (with either type of pre-virama joiner, post-virama ZWJ, or
// no joiner at all) must be followed by a consonant.
// A non-linking (explicit) virama is indicated by a ZWNJ after it, or a non
// consonant, space, or character from a different script. We clean up the
// representation to make it consistent by adding a ZWNJ if missing from a
// non-linking virama. Returns false with an invalid sequence.
bool ValidateIndic::ConsumeViramaIfValid(IndicPair joiner, bool post_matra) {
const unsigned num_codes = codes_.size();
if (joiner.first == CharClass::kOther) {
CodeOnlyToOutput();
if (codes_used_ < num_codes && codes_[codes_used_].second == kZeroWidthJoiner) {
// Post-matra viramas must be explicit, so no joiners allowed here.
if (post_matra) {
if (report_errors_) {
tprintf("ZWJ after a post-matra virama!!\n");
}
return false;
}
if (codes_used_ + 1 < num_codes && codes_[codes_used_ - 2].second != kRayana &&
(codes_[codes_used_ + 1].second == kZeroWidthNonJoiner ||
codes_[codes_used_ + 1].second == kYayana ||
codes_[codes_used_ + 1].second == kRayana)) {
// This combination will be picked up later.
ASSERT_HOST(!CodeOnlyToOutput());
} else {
// Half-form with optional Nukta.
unsigned len = output_.size() + 1 - output_used_;
if (UseMultiCode(len)) {
return true;
}
}
if (codes_used_ < num_codes && codes_[codes_used_].second == kZeroWidthNonJoiner) {
if (output_used_ == output_.size() || output_[output_used_] != kRayana) {
if (report_errors_) {
tprintf("Virama ZWJ ZWNJ in non-Sinhala: base=0x%x!\n", static_cast<int>(script_));
}
return false;
}
// Special Sinhala case of Stand-alone Repaya. ['RA' H Z z]
if (UseMultiCode(4)) {
return true;
}
}
} else if (codes_used_ == num_codes || codes_[codes_used_].first != CharClass::kConsonant ||
post_matra) {
if (codes_used_ == num_codes || codes_[codes_used_].second != kZeroWidthNonJoiner) {
// It is valid to have an unterminated virama at the end of a word, but
// for consistency, we will always add ZWNJ if not present.
output_.push_back(kZeroWidthNonJoiner);
} else {
CodeOnlyToOutput();
}
// Explicit virama [H z]
MultiCodePart(2);
}
} else {
// Pre-virama joiner [{Z|z} H] requests specific conjunct.
if (UseMultiCode(2)) {
if (report_errors_) {
tprintf("Invalid pre-virama joiner with no 2nd consonant!!\n");
}
return false;
}
if (codes_[codes_used_].second == kZeroWidthJoiner ||
codes_[codes_used_].second == kZeroWidthNonJoiner) {
if (report_errors_) {
tprintf("JHJ!!: 0x%x 0x%x 0x%x\n", joiner.second, output_.back(),
codes_[codes_used_].second);
}
return false;
}
}
// It is good so far as it goes.
return true;
}
// Helper consumes/copies a series of consonants separated by viramas while
// valid, but not any vowel or other modifiers.
bool ValidateIndic::ConsumeConsonantHeadIfValid() {
const unsigned num_codes = codes_.size();
// Consonant aksara
do {
CodeOnlyToOutput();
// Special Sinhala case of [H Z Yayana/Rayana].
int index = output_.size() - 3;
if (output_used_ + 3 <= output_.size() &&
(output_.back() == kYayana || output_.back() == kRayana) && IsVirama(output_[index]) &&
output_[index + 1] == kZeroWidthJoiner) {
MultiCodePart(3);
}
bool have_nukta = false;
if (codes_used_ < num_codes && codes_[codes_used_].first == CharClass::kNukta) {
have_nukta = true;
CodeOnlyToOutput();
}
// Test for subscript conjunct.
index = output_.size() - 2 - have_nukta;
if (output_used_ + 2 + have_nukta <= output_.size() && IsSubscriptScript() &&
IsVirama(output_[index])) {
// Output previous virama, consonant + optional nukta.
MultiCodePart(2 + have_nukta);
}
IndicPair joiner(CharClass::kOther, 0);
if (codes_used_ < num_codes && (codes_[codes_used_].second == kZeroWidthJoiner ||
(codes_[codes_used_].second == kZeroWidthNonJoiner &&
script_ == ViramaScript::kMalayalam))) {
joiner = codes_[codes_used_];
if (++codes_used_ == num_codes) {
if (report_errors_) {
tprintf("Skipping ending joiner: 0x%x 0x%x\n", output_.back(), joiner.second);
}
return true;
}
if (codes_[codes_used_].first == CharClass::kVirama) {
output_.push_back(joiner.second);
} else {
if (report_errors_) {
tprintf("Skipping unnecessary joiner: 0x%x 0x%x 0x%x\n", output_.back(), joiner.second,
codes_[codes_used_].second);
}
joiner = std::make_pair(CharClass::kOther, 0);
}
}
if (codes_used_ < num_codes && codes_[codes_used_].first == CharClass::kVirama) {
if (!ConsumeViramaIfValid(joiner, false)) {
return false;
}
} else {
break; // No virama, so the run of consonants is over.
}
} while (codes_used_ < num_codes && codes_[codes_used_].first == CharClass::kConsonant);
if (output_used_ < output_.size()) {
MultiCodePart(1);
}
return true;
}
// Helper consumes/copies a tail part of a consonant, comprising optional
// matra/piece, vowel modifier, vedic mark, terminating virama.
bool ValidateIndic::ConsumeConsonantTailIfValid() {
if (codes_used_ == codes_.size()) {
return true;
}
// No virama: Finish the grapheme.
// Are multiple matras allowed?
if (codes_[codes_used_].first == CharClass::kMatra) {
if (UseMultiCode(1)) {
return true;
}
if (codes_[codes_used_].first == CharClass::kMatraPiece) {
if (UseMultiCode(1)) {
return true;
}
}
}
while (codes_[codes_used_].first == CharClass::kVowelModifier) {
if (UseMultiCode(1)) {
return true;
}
// Only Malayalam allows only repeated 0xd02.
if (script_ != ViramaScript::kMalayalam || output_.back() != 0xd02) {
break;
}
}
while (codes_[codes_used_].first == CharClass::kVedicMark) {
if (UseMultiCode(1)) {
return true;
}
}
if (codes_[codes_used_].first == CharClass::kVirama) {
if (!ConsumeViramaIfValid(IndicPair(CharClass::kOther, 0), true)) {
return false;
}
}
// What we have consumed so far is a valid consonant cluster.
if (output_used_ < output_.size()) {
MultiCodePart(1);
}
return true;
}
// Helper consumes/copies a vowel and optional modifiers.
bool ValidateIndic::ConsumeVowelIfValid() {
if (UseMultiCode(1)) {
return true;
}
while (codes_[codes_used_].first == CharClass::kVowelModifier) {
if (UseMultiCode(1)) {
return true;
}
// Only Malayalam allows repeated modifiers?
if (script_ != ViramaScript::kMalayalam) {
break;
}
}
while (codes_[codes_used_].first == CharClass::kVedicMark) {
if (UseMultiCode(1)) {
return true;
}
}
// What we have consumed so far is a valid vowel cluster.
return true;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/validate_indic.cpp
|
C++
|
apache-2.0
| 11,862
|
#ifndef TESSERACT_TRAINING_VALIDATE_INDIC_H_
#define TESSERACT_TRAINING_VALIDATE_INDIC_H_
#include "validator.h"
namespace tesseract {
// Subclass of Validator that validates and segments Indic scripts in the
// unicode range 0x900-0xdff (Devanagari-Sinhala).
class ValidateIndic : public Validator {
public:
ValidateIndic(ViramaScript script, bool report_errors) : Validator(script, report_errors) {}
~ValidateIndic() override = default;
protected:
// Returns whether codes matches the pattern for an Indic Grapheme.
// Consumes the next Grapheme in codes_[codes_used_++...] and copies it to
// parts_ and output_. Returns true if a valid Grapheme was consumed,
// otherwise does not increment codes_used_.
bool ConsumeGraphemeIfValid() override;
// Returns the CharClass corresponding to the given Unicode ch.
Validator::CharClass UnicodeToCharClass(char32 ch) const override;
private:
// Helper consumes/copies a virama and any associated post-virama joiners.
bool ConsumeViramaIfValid(IndicPair joiner, bool post_matra);
// Helper consumes/copies a series of consonants separated by viramas while
// valid, but not any vowel or other modifiers.
bool ConsumeConsonantHeadIfValid();
// Helper consumes/copies a tail part of a consonant, comprising optional
// matra/piece, vowel modifier, vedic mark, terminating virama.
bool ConsumeConsonantTailIfValid();
// Helper consumes/copies a vowel and optional modifiers.
bool ConsumeVowelIfValid();
// Some special unicodes used only for Indic processing.
static const char32 kYayana = 0xdba; // Sinhala Ya
static const char32 kRayana = 0xdbb; // Sinhala Ra
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_VALIDATE_INDIC_H_
|
2301_81045437/tesseract
|
src/training/unicharset/validate_indic.h
|
C++
|
apache-2.0
| 1,731
|
/**********************************************************************
* File: validate_javanese.cpp
* Description: Text validator for Javanese Script - aksara jawa.
* Author: Shree Devi Kumar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "validate_javanese.h"
#include "errcode.h"
#include "tprintf.h"
namespace tesseract {
// Returns whether codes matches the pattern for a Javanese Grapheme.
// Taken from unicode standard:
// http://www.unicode.org/charts/PDF/UA980.pdf
// http://www.unicode.org/versions/Unicode11.0.0/ch17.pdf
// The Consonant class here includes independent vowels.
// The order of components in an orthographic syllable as expressed in BNF is:
// {C F} C {{R}Y} {V{A}} {Z}
// Translated to the codes used by the CharClass enum:
// [(V|C[N])(H)] (V|C[N]) [[N]N] [M[D]] [v]
// Also see https://r12a.github.io/scripts/javanese/ for detailed notes.
// Validation rules copied from validate_indic.cpp and modified for Javanese.
// Indic - for reference
// + vowel Grapheme: V[D](v)*
// + consonant Grapheme: (C[N](H|HZ|Hz|ZH)?)*C[N](H|Hz)?[M[P]][D](v)*
bool ValidateJavanese::ConsumeGraphemeIfValid() {
switch (codes_[codes_used_].first) {
case CharClass::kConsonant:
return ConsumeConsonantHeadIfValid() && ConsumeConsonantTailIfValid();
case CharClass::kVowel:
case CharClass::kVedicMark:
return ConsumeVowelIfValid();
case CharClass::kZeroWidthJoiner:
case CharClass::kZeroWidthNonJoiner:
// Apart from within an aksara, joiners are silently dropped.
if (report_errors_) {
tprintf("Dropping isolated joiner: 0x%x\n", codes_[codes_used_].second);
}
++codes_used_;
return true;
case CharClass::kOther:
UseMultiCode(1);
return true;
default:
if (report_errors_) {
tprintf("Invalid start of grapheme sequence:%c=0x%x\n",
static_cast<int>(codes_[codes_used_].first),
codes_[codes_used_].second);
}
return false;
}
}
// Helper consumes/copies a virama and any associated post-virama joiners.
// A linking virama (with either type of pre-virama joiner, post-virama ZWJ, or
// no joiner at all) must be followed by a consonant.
// A non-linking (explicit) virama is indicated by a ZWNJ after it, or a non
// consonant, space, or character from a different script. We clean up the
// representation to make it consistent by adding a ZWNJ if missing from a
// non-linking virama. Returns false with an invalid sequence.
bool ValidateJavanese::ConsumeViramaIfValid(IndicPair joiner, bool post_matra) {
const unsigned num_codes = codes_.size();
if (joiner.first == CharClass::kOther) {
CodeOnlyToOutput();
if (codes_used_ < num_codes && codes_[codes_used_].second == kZeroWidthJoiner) {
// Post-matra viramas must be explicit, so no joiners allowed here.
if (post_matra) {
if (report_errors_) {
tprintf("ZWJ after a post-matra virama!!\n");
}
return false;
}
if (codes_used_ + 1 < num_codes && codes_[codes_used_ - 2].second != kCakra &&
(codes_[codes_used_ + 1].second == kZeroWidthNonJoiner ||
codes_[codes_used_ + 1].second == kPengkal ||
codes_[codes_used_ + 1].second == kCakra)) {
// This combination will be picked up later.
ASSERT_HOST(!CodeOnlyToOutput());
} else {
// Half-form with optional Nukta.
unsigned len = output_.size() + 1 - output_used_;
if (UseMultiCode(len)) {
return true;
}
}
if (codes_used_ < num_codes && codes_[codes_used_].second == kZeroWidthNonJoiner) {
if (output_used_ == output_.size() || output_[output_used_] != kCakra) {
if (report_errors_) {
tprintf("Virama ZWJ ZWNJ in non-Sinhala: base=0x%x!\n", static_cast<int>(script_));
}
return false;
}
// Special Sinhala case of Stand-alone Repaya. ['RA' H Z z]
if (UseMultiCode(4)) {
return true;
}
}
} else if (codes_used_ == num_codes || codes_[codes_used_].first != CharClass::kConsonant ||
post_matra) {
if (codes_used_ == num_codes || codes_[codes_used_].second != kZeroWidthNonJoiner) {
// It is valid to have an unterminated virama at the end of a word, but
// for consistency, we will always add ZWNJ if not present.
CodeOnlyToOutput();
} else {
CodeOnlyToOutput();
}
// Explicit virama [H z]
MultiCodePart(2);
}
} else {
// Pre-virama joiner [{Z|z} H] requests specific conjunct.
if (UseMultiCode(2)) {
if (report_errors_) {
tprintf("Invalid pre-virama joiner with no 2nd consonant!!\n");
}
return false;
}
if (codes_[codes_used_].second == kZeroWidthJoiner ||
codes_[codes_used_].second == kZeroWidthNonJoiner) {
if (report_errors_) {
tprintf("JHJ!!: 0x%x 0x%x 0x%x\n", joiner.second, output_.back(),
codes_[codes_used_].second);
}
return false;
}
}
// It is good so far as it goes.
return true;
}
// Helper consumes/copies a series of consonants separated by viramas while
// valid, but not any vowel or other modifiers.
bool ValidateJavanese::ConsumeConsonantHeadIfValid() {
const unsigned num_codes = codes_.size();
// Consonant aksara
do {
CodeOnlyToOutput();
// Special Sinhala case of [H Z Yayana/Rayana].
int index = output_.size() - 3;
if (output_used_ + 3 <= output_.size() &&
(output_.back() == kPengkal || output_.back() == kCakra) && IsVirama(output_[index]) &&
output_[index + 1] == kZeroWidthJoiner) {
MultiCodePart(3);
}
bool have_nukta = false;
if (codes_used_ < num_codes && codes_[codes_used_].first == CharClass::kNukta) {
have_nukta = true;
CodeOnlyToOutput();
}
// Test for subscript conjunct.
index = output_.size() - 2 - have_nukta;
if (output_used_ + 2 + have_nukta <= output_.size() && IsSubscriptScript() &&
IsVirama(output_[index])) {
// Output previous virama, consonant + optional nukta.
MultiCodePart(2 + have_nukta);
}
IndicPair joiner(CharClass::kOther, 0);
if (codes_used_ < num_codes && (codes_[codes_used_].second == kZeroWidthJoiner ||
(codes_[codes_used_].second == kZeroWidthNonJoiner &&
script_ == ViramaScript::kMalayalam))) {
joiner = codes_[codes_used_];
if (++codes_used_ == num_codes) {
if (report_errors_) {
tprintf("Skipping ending joiner: 0x%x 0x%x\n", output_.back(), joiner.second);
}
return true;
}
if (codes_[codes_used_].first == CharClass::kVirama) {
output_.push_back(joiner.second);
} else {
if (report_errors_) {
tprintf("Skipping unnecessary joiner: 0x%x 0x%x 0x%x\n", output_.back(), joiner.second,
codes_[codes_used_].second);
}
joiner = std::make_pair(CharClass::kOther, 0);
}
}
if (codes_used_ < num_codes && codes_[codes_used_].first == CharClass::kVirama) {
if (!ConsumeViramaIfValid(joiner, false)) {
return false;
}
} else {
break; // No virama, so the run of consonants is over.
}
} while (codes_used_ < num_codes && codes_[codes_used_].first == CharClass::kConsonant);
if (output_used_ < output_.size()) {
MultiCodePart(1);
}
return true;
}
// Helper consumes/copies a tail part of a consonant, comprising optional
// matra/piece, vowel modifier, vedic mark, terminating virama.
bool ValidateJavanese::ConsumeConsonantTailIfValid() {
if (codes_used_ == codes_.size()) {
return true;
}
// No virama: Finish the grapheme.
// Are multiple matras allowed?
if (codes_[codes_used_].first == CharClass::kMatra) {
if (UseMultiCode(1)) {
return true;
}
if (codes_[codes_used_].first == CharClass::kMatraPiece) {
if (UseMultiCode(1)) {
return true;
}
}
}
// Tarung also used for long versions of u and o vowels and vocalic r
// Taling + Tarung is valid eg. ꦏ + ◌ꦺ + ◌ꦴ
while (codes_[codes_used_].first == CharClass::kMatraPiece) {
if (UseMultiCode(1)) {
return true;
}
}
while (codes_[codes_used_].first == CharClass::kVowelModifier) {
if (UseMultiCode(1)) {
return true;
}
// Only Malayalam allows only repeated 0xd02.
if (script_ != ViramaScript::kMalayalam || output_.back() != 0xd02) {
break;
}
}
while (codes_[codes_used_].first == CharClass::kVedicMark) {
if (UseMultiCode(1)) {
return true;
}
}
if (codes_[codes_used_].first == CharClass::kVirama) {
if (!ConsumeViramaIfValid(IndicPair(CharClass::kOther, 0), true)) {
return false;
}
}
// What we have consumed so far is a valid consonant cluster.
if (output_used_ < output_.size()) {
MultiCodePart(1);
}
return true;
}
// Helper consumes/copies a vowel and optional modifiers.
bool ValidateJavanese::ConsumeVowelIfValid() {
if (UseMultiCode(1)) {
return true;
}
while (codes_[codes_used_].first == CharClass::kVowelModifier) {
if (UseMultiCode(1)) {
return true;
}
// Only Malayalam allows repeated modifiers?
if (script_ != ViramaScript::kMalayalam) {
break;
}
}
while (codes_[codes_used_].first == CharClass::kVedicMark) {
if (UseMultiCode(1)) {
return true;
}
}
// What we have consumed so far is a valid vowel cluster.
return true;
}
Validator::CharClass ValidateJavanese::UnicodeToCharClass(char32 ch) const {
if (ch == kZeroWidthNonJoiner) {
return CharClass::kZeroWidthNonJoiner;
}
if (ch == kZeroWidthJoiner) {
return CharClass::kZeroWidthJoiner;
}
// Offset from the start of the relevant unicode code block aka code page.
int off = ch - static_cast<char32>(script_);
// Anything in another code block is other.
if (off < 0 || off >= kIndicCodePageSize) {
return CharClass::kOther;
}
if (off < 0x4) {
return CharClass::kVowelModifier;
}
if (off <= 0x32) {
return CharClass::kConsonant; // includes independent vowels
}
if (off == 0x33) {
return CharClass::kNukta; // A9B3 CECAK TELU
}
if (off == 0x34) {
return CharClass::kMatraPiece; // A9B4 TARUNG two part vowels
}
if (off <= 0x39) {
return CharClass::kMatra;
}
if (off <= 0x3a) {
return CharClass::kConsonant; // A9BA TALING - pre base vowel
}
if (off <= 0x3d) {
return CharClass::kMatra;
}
if (off <= 0x3f) {
return CharClass::kNukta; // A9BE-A9BF PENGKAL-CAKRA medial consonants
}
if (off == 0x40) {
return CharClass::kVirama; // A9C0 PANGKON
}
return CharClass::kOther;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/validate_javanese.cpp
|
C++
|
apache-2.0
| 11,430
|
/**********************************************************************
* File: validate_javanese.h
* Description: Text validator for Javanese Script - aksara jawa.
* Author: Shree Devi Kumar
* Created: August 03, 2018
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_VALIDATE_JAVANESE_H_
#define TESSERACT_TRAINING_VALIDATE_JAVANESE_H_
#include "validator.h"
namespace tesseract {
// Subclass of Validator that validates and segments Javanese scripts
class ValidateJavanese : public Validator {
public:
ValidateJavanese(ViramaScript script, bool report_errors) : Validator(script, report_errors) {}
~ValidateJavanese() override = default;
protected:
// Returns whether codes matches the pattern for an Javanese Grapheme.
// Consumes the next Grapheme in codes_[codes_used_++...] and copies it to
// parts_ and output_. Returns true if a valid Grapheme was consumed,
// otherwise does not increment codes_used_.
bool ConsumeGraphemeIfValid() override;
// Returns the CharClass corresponding to the given Unicode ch.
Validator::CharClass UnicodeToCharClass(char32 ch) const override;
private:
// Helper consumes/copies a virama and any associated post-virama joiners.
bool ConsumeViramaIfValid(IndicPair joiner, bool post_matra);
// Helper consumes/copies a series of consonants separated by viramas while
// valid, but not any vowel or other modifiers.
bool ConsumeConsonantHeadIfValid();
// Helper consumes/copies a tail part of a consonant, comprising optional
// matra/piece, vowel modifier, vedic mark, terminating virama.
bool ConsumeConsonantTailIfValid();
// Helper consumes/copies a vowel and optional modifiers.
bool ConsumeVowelIfValid();
// Some special unicodes used only for Javanese processing.
static const char32 kPengkal = 0xa9be; // Javanese Ya
static const char32 kCakra = 0xa9bf; // Javanese Ra
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_VALIDATE_JAVANESE_H_
|
2301_81045437/tesseract
|
src/training/unicharset/validate_javanese.h
|
C++
|
apache-2.0
| 2,571
|
#include "validate_khmer.h"
#include "errcode.h"
#include "tprintf.h"
namespace tesseract {
// Returns whether codes matches the pattern for a Khmer Grapheme.
// Taken from unicode standard:
// http://www.unicode.org/versions/Unicode9.0.0/ch16.pdf.
// where it gives: B {R | C} {S {R}}* {{Z} V} {O} {S}, using different notation
// to the ISCII standard http://varamozhi.sourceforge.net/iscii91.pdf.
// Translated to the codes used by the CharClass enum:
// C {R | N} {HC {R}}* {{Z|z} M{P}} {D} {HC}
// Where R is a new symbol (Robat) and N is repurposed as a consonant shifter.
// Also the Consonant class here includes independent vowels, as they are
// treated the same anyway.
// In the split grapheme mode, the only characters that get grouped are the
// HC and the {Z|z}M The unicode chapter on Khmer only mentions the joiners in
// the BNF syntax, so who knows what they do.
bool ValidateKhmer::ConsumeGraphemeIfValid() {
const unsigned num_codes = codes_.size();
if (codes_used_ == num_codes) {
return false;
}
if (codes_[codes_used_].first == CharClass::kOther) {
UseMultiCode(1);
return true;
}
if (codes_[codes_used_].first != CharClass::kConsonant) {
if (report_errors_) {
tprintf("Invalid start of Khmer syllable:0x%x\n", codes_[codes_used_].second);
}
return false;
}
if (UseMultiCode(1)) {
return true;
}
if (codes_[codes_used_].first == CharClass::kRobat ||
codes_[codes_used_].first == CharClass::kNukta) {
if (UseMultiCode(1)) {
return true;
}
}
while (codes_used_ + 1 < num_codes && codes_[codes_used_].first == CharClass::kVirama &&
codes_[codes_used_ + 1].first == CharClass::kConsonant) {
ASSERT_HOST(!CodeOnlyToOutput());
if (UseMultiCode(2)) {
return true;
}
if (codes_[codes_used_].first == CharClass::kRobat) {
if (UseMultiCode(1)) {
return true;
}
}
}
unsigned num_matra_parts = 0;
if (codes_[codes_used_].second == kZeroWidthJoiner ||
codes_[codes_used_].second == kZeroWidthNonJoiner) {
if (CodeOnlyToOutput()) {
if (report_errors_) {
tprintf("Unterminated joiner: 0x%x\n", output_.back());
}
return false;
}
++num_matra_parts;
}
// Not quite as shown by the BNF, the matra piece is allowed as a matra on its
// own or as an addition to other matras.
if (codes_[codes_used_].first == CharClass::kMatra ||
codes_[codes_used_].first == CharClass::kMatraPiece) {
++num_matra_parts;
if (UseMultiCode(num_matra_parts)) {
return true;
}
} else if (num_matra_parts) {
if (report_errors_) {
tprintf("Joiner with non-dependent vowel after it!:0x%x 0x%x\n", output_.back(),
codes_[codes_used_].second);
}
return false;
}
if (codes_[codes_used_].first == CharClass::kMatraPiece &&
codes_[codes_used_ - 1].first != CharClass::kMatraPiece) {
if (UseMultiCode(1)) {
return true;
}
}
if (codes_[codes_used_].first == CharClass::kVowelModifier) {
if (UseMultiCode(1)) {
return true;
}
}
if (codes_used_ + 1 < num_codes && codes_[codes_used_].first == CharClass::kVirama &&
codes_[codes_used_ + 1].first == CharClass::kConsonant) {
ASSERT_HOST(!CodeOnlyToOutput());
if (UseMultiCode(2)) {
return true;
}
}
return true;
}
Validator::CharClass ValidateKhmer::UnicodeToCharClass(char32 ch) const {
if (IsVedicAccent(ch)) {
return CharClass::kVedicMark;
}
if (ch == kZeroWidthNonJoiner) {
return CharClass::kZeroWidthNonJoiner;
}
if (ch == kZeroWidthJoiner) {
return CharClass::kZeroWidthJoiner;
}
// Offset from the start of the relevant unicode code block aka code page.
int off = ch - static_cast<char32>(script_);
// Anything in another code block is other.
if (off < 0 || off >= kIndicCodePageSize) {
return CharClass::kOther;
}
if (off <= 0x33) {
return CharClass::kConsonant;
}
if (off <= 0x45) {
return CharClass::kMatra;
}
if (off == 0x46) {
return CharClass::kMatraPiece;
}
if (off == 0x4c) {
return CharClass::kRobat;
}
if (off == 0x49 || off == 0x4a) {
return CharClass::kNukta;
}
if (off <= 0x51) {
return CharClass::kVowelModifier;
}
if (off == 0x52) {
return CharClass::kVirama;
}
return CharClass::kOther;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/validate_khmer.cpp
|
C++
|
apache-2.0
| 4,380
|
#ifndef TESSERACT_TRAINING_VALIDATE_KHMER_H_
#define TESSERACT_TRAINING_VALIDATE_KHMER_H_
#include "validator.h"
namespace tesseract {
// Subclass of Validator that validates and segments Khmer.
class ValidateKhmer : public Validator {
public:
ValidateKhmer(ViramaScript script, bool report_errors) : Validator(script, report_errors) {}
~ValidateKhmer() override = default;
protected:
// Returns whether codes matches the pattern for an Khmer Grapheme.
// Consumes the next Grapheme in codes_[codes_used_++...] and copies it to
// parts_ and output_. Returns true if a valid Grapheme was consumed,
// otherwise does not increment codes_used_.
bool ConsumeGraphemeIfValid() override;
// Returns the CharClass corresponding to the given Unicode ch.
CharClass UnicodeToCharClass(char32 ch) const override;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_VALIDATE_KHMER_H_
|
2301_81045437/tesseract
|
src/training/unicharset/validate_khmer.h
|
C++
|
apache-2.0
| 902
|
#include "validate_myanmar.h"
#include "errcode.h"
#include "icuerrorcode.h"
#include "tprintf.h"
#include "unicode/uchar.h" // From libicu
#include "unicode/uscript.h" // From libicu
namespace tesseract {
// Returns whether codes matches the pattern for a Myanmar Grapheme.
// Taken directly from the unicode table 16-3.
// See http://www.unicode.org/versions/Unicode9.0.0/ch16.pdf
bool ValidateMyanmar::ConsumeGraphemeIfValid() {
const unsigned num_codes = codes_.size();
if (codes_used_ == num_codes) {
return true;
}
// Other.
if (IsMyanmarOther(codes_[codes_used_].second)) {
UseMultiCode(1);
return true;
}
// Kinzi.
if (codes_used_ + 2 < num_codes && codes_[codes_used_].second == 0x1004 &&
codes_[codes_used_ + 1].second == kMyanmarAsat &&
codes_[codes_used_ + 2].second == kMyanmarVirama) {
ASSERT_HOST(!CodeOnlyToOutput());
ASSERT_HOST(!CodeOnlyToOutput());
if (UseMultiCode(3)) {
return true;
}
}
// Base consonant/vowel. NOTE that since everything in Myanmar appears to be
// optional, except the base, this is the only place where invalid input can
// be detected and false returned.
if (IsMyanmarLetter(codes_[codes_used_].second)) {
if (UseMultiCode(1)) {
return true;
}
} else {
if (report_errors_) {
tprintf("Invalid start of Myanmar syllable:0x%x\n", codes_[codes_used_].second);
}
return false; // One of these is required.
}
if (ConsumeSubscriptIfPresent()) {
return true;
}
ConsumeOptionalSignsIfPresent();
// What we have consumed so far is a valid syllable.
return true;
}
// TODO(rays) Doesn't use intermediate coding like the other scripts, as there
// is little correspondence between the content of table 16-3 and the char
// classes of the Indic languages. (Experts may disagree and improve!)
// In unicode table 16-3 there is basically a long list of optional characters,
// which can be coded quite easily.
// Unfortunately, table 16-3 doesn't include even half the Myanmar unicodes!!
// The table also allows sequences that still result in dotted circles!!
// So with a lot of guesswork the rest have been added in a reasonable place.
Validator::CharClass ValidateMyanmar::UnicodeToCharClass(char32 ch) const {
if (IsMyanmarLetter(ch)) {
return CharClass::kConsonant;
}
return CharClass::kOther;
}
// Helper consumes/copies a virama and any subscript consonant.
// Returns true if the end of input is reached.
bool ValidateMyanmar::ConsumeSubscriptIfPresent() {
// Subscript consonant. It appears there can be only one.
const unsigned num_codes = codes_.size();
if (codes_used_ + 1 < num_codes && codes_[codes_used_].second == kMyanmarVirama) {
if (IsMyanmarLetter(codes_[codes_used_ + 1].second)) {
ASSERT_HOST(!CodeOnlyToOutput());
if (UseMultiCode(2)) {
return true;
}
}
}
return false;
}
// Helper consumes/copies a series of optional signs.
// Returns true if the end of input is reached.
bool ValidateMyanmar::ConsumeOptionalSignsIfPresent() {
// The following characters are allowed, all optional, and in sequence.
// An exception is kMyanmarMedialYa, which can include kMyanmarAsat.
const std::vector<char32> kMedials({kMyanmarAsat, kMyanmarMedialYa, 0x103c, 0x103d, 0x103e,
0x105e, 0x105f, 0x1060, 0x1081, 0x1031});
for (char32 ch : kMedials) {
if (codes_[codes_used_].second == ch) {
if (UseMultiCode(1)) {
return true;
}
if (ch == kMyanmarMedialYa && codes_[codes_used_].second == kMyanmarAsat) {
if (UseMultiCode(1)) {
return true;
}
}
}
}
// Vowel sign i, ii, ai.
char32 ch = codes_[codes_used_].second;
if (ch == 0x102d || ch == 0x102e || ch == 0x1032) {
if (UseMultiCode(1)) {
return true;
}
}
// Vowel sign u, uu, and extensions.
ch = codes_[codes_used_].second;
if (ch == 0x102f || ch == 0x1030 || (0x1056 <= ch && ch <= 0x1059) || ch == 0x1062 ||
ch == 0x1067 || ch == 0x1068 || (0x1071 <= ch && ch <= 0x1074) ||
(0x1083 <= ch && ch <= 0x1086) || ch == 0x109c || ch == 0x109d) {
if (UseMultiCode(1)) {
return true;
}
}
// Tall aa, aa with optional asat.
if (codes_[codes_used_].second == 0x102b || codes_[codes_used_].second == 0x102c) {
if (UseMultiCode(1)) {
return true;
}
if (codes_[codes_used_].second == kMyanmarAsat) {
if (UseMultiCode(1)) {
return true;
}
}
}
// The following characters are allowed, all optional, and in sequence.
// Anusvar, Dot below, Visarga
const std::vector<char32> kSigns({0x1036, 0x1037, 0x1038});
for (char32 ch : kSigns) {
if (codes_[codes_used_].second == ch) {
if (UseMultiCode(1)) {
return true;
}
}
}
// Tone mark extensions.
ch = codes_[codes_used_].second;
if (ch == 0x102c || ch == 0x1038 || ch == kMyanmarAsat || (0x1062 <= ch && ch <= 0x1064) ||
(0x1069 <= ch && ch <= 0x106d) || (0x1087 <= ch && ch <= 0x108d) || ch == 0x108f ||
ch == 0x109a || ch == 0x109b || (0xaa7b <= ch && ch <= 0xaa7d)) {
if (UseMultiCode(1)) {
return true;
}
}
// Sgaw tones 0x1062, 0x1063 must be followed by asat.
// W Pwo tones 0x1069, 0x106a, and 0x106b may be followed by dot below or visarga (nasal).
ch = codes_[codes_used_].second;
if (ch == 0x103a || ch == 0x1037 || ch == 0x1038) {
if (UseMultiCode(1)) {
return true;
}
}
return false;
}
// Returns true if the unicode is a Myanmar "letter" including consonants
// and independent vowels. Although table 16-3 distinguishes between some
// base consonants and vowels, the extensions make no such distinction, so we
// put them all into a single bucket.
// Update MYANMAR LETTER based on following:
// https://unicode.org/charts/PDF/U1000.pdf - Myanmar
// http://unicode.org/charts/PDF/UAA60.pdf - Myanmar Extended-A
// http://unicode.org/charts/PDF/UA9E0.pdf - Myanmar Extended-B
/* static */
bool ValidateMyanmar::IsMyanmarLetter(char32 ch) {
return (0x1000 <= ch && ch <= 0x102a) || ch == 0x103f || (0x104c <= ch && ch <= 0x1055) ||
(0x105a <= ch && ch <= 0x105d) || ch == 0x1061 || ch == 0x1065 || ch == 0x1066 ||
(0x106e <= ch && ch <= 0x1070) || (0x1075 <= ch && ch <= 0x1081) || ch == 0x108e ||
(0xa9e0 <= ch && ch <= 0xa9e4) || (0xa9e7 <= ch && ch <= 0xa9ef) ||
(0xa9fa <= ch && ch <= 0xa9fe) || (0xaa60 <= ch && ch <= 0xaa6f) ||
(0xaa71 <= ch && ch <= 0xaa73) || ch == 0xaa7a || ch == 0xaa7e || ch == 0xaa7f;
}
// Returns true if ch is a Myanmar digit or other symbol that does not take
// part in being a syllable eg. punctuation marks.
// MYANMAR DIGIT, MYANMAR SYMBOL, MYANMAR LOGOGRAM
// REDUPLICATION MARKS
/* static */
bool ValidateMyanmar::IsMyanmarOther(char32 ch) {
IcuErrorCode err;
UScriptCode script_code = uscript_getScript(ch, err);
if (script_code != USCRIPT_MYANMAR && ch != Validator::kZeroWidthJoiner &&
ch != Validator::kZeroWidthNonJoiner) {
return true;
}
return (0x1040 <= ch && ch <= 0x104f) || (0x1090 <= ch && ch <= 0x1099) ||
(0x109e <= ch && ch <= 0x109f) || (0xa9f0 <= ch && ch <= 0xa9f9) ||
(ch == 0xa9e6 || ch == 0xaa70) || (0xaa74 <= ch && ch <= 0xaa79);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/validate_myanmar.cpp
|
C++
|
apache-2.0
| 7,335
|
#ifndef TESSERACT_TRAINING_VALIDATE_MYANMAR_H_
#define TESSERACT_TRAINING_VALIDATE_MYANMAR_H_
#include "validator.h"
namespace tesseract {
// Subclass of Validator that validates and segments Myanmar.
class ValidateMyanmar : public Validator {
public:
ValidateMyanmar(ViramaScript script, bool report_errors) : Validator(script, report_errors) {}
~ValidateMyanmar() override = default;
protected:
// Returns whether codes matches the pattern for a Myanmar Grapheme.
// Consumes the next Grapheme in codes_[codes_used_++...] and copies it to
// parts_ and output_. Returns true if a valid Grapheme was consumed,
// otherwise does not increment codes_used_.
bool ConsumeGraphemeIfValid() override;
// Returns the CharClass corresponding to the given Unicode ch.
Validator::CharClass UnicodeToCharClass(char32 ch) const override;
private:
// Helper consumes/copies a virama and any subscript consonant.
// Returns true if the end of input is reached.
bool ConsumeSubscriptIfPresent();
// Helper consumes/copies a series of optional signs.
// Returns true if the end of input is reached.
bool ConsumeOptionalSignsIfPresent();
// Returns true if the unicode is a Myanmar "letter" including consonants
// and independent vowels. Although table 16-3 distinguishes between some
// base consonants and vowels, the extensions make no such distinction, so we
// put them all into a single bucket.
static bool IsMyanmarLetter(char32 ch);
// Returns true if ch is a Myanmar digit or other symbol that does not take
// part in being a syllable.
static bool IsMyanmarOther(char32 ch);
// Some special unicodes used only for Myanmar processing.
static const char32 kMyanmarAsat = 0x103a;
static const char32 kMyanmarMedialYa = 0x103b;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_VALIDATE_MYANMAR_H_
|
2301_81045437/tesseract
|
src/training/unicharset/validate_myanmar.h
|
C++
|
apache-2.0
| 1,856
|
#include "validator.h"
#include <algorithm>
#include <iterator>
#include <unordered_map>
#include <vector>
#include "icuerrorcode.h"
#include "unicode/uchar.h" // From libicu
#include "unicode/uscript.h" // From libicu
#include "validate_grapheme.h"
#include "validate_indic.h"
#include "validate_javanese.h"
#include "validate_khmer.h"
#include "validate_myanmar.h"
namespace tesseract {
// Some specific but universally useful unicodes.
const char32 Validator::kZeroWidthSpace = 0x200B;
const char32 Validator::kZeroWidthNonJoiner = 0x200C;
const char32 Validator::kZeroWidthJoiner = 0x200D;
const char32 Validator::kLeftToRightMark = 0x200E;
const char32 Validator::kRightToLeftMark = 0x200F;
const char32 Validator::kInvalid = 0xfffd;
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
Validator::~Validator() = default;
// Validates and cleans the src vector of unicodes to the *dest, according to
// g_mode. In the case of kSingleString, a single vector containing the whole
// result is added to *dest. With kCombined, multiple vectors are added to
// *dest with one grapheme in each. With kGlyphSplit, multiple vectors are
// added to *dest with a smaller unit representing a glyph in each.
// In case of validation error, returns false and as much as possible of the
// input, without discarding invalid text.
/* static */
bool Validator::ValidateCleanAndSegment(GraphemeNormMode g_mode, bool report_errors,
const std::vector<char32> &src,
std::vector<std::vector<char32>> *dest) {
ValidateGrapheme g_validator(ViramaScript::kNonVirama, report_errors);
std::vector<std::vector<char32>> graphemes;
ViramaScript script = MostFrequentViramaScript(src);
bool success = true;
if (script == ViramaScript::kNonVirama) {
// The grapheme segmenter's maximum segmentation is the grapheme unit, so
// up the mode by 1 to get the desired effect.
if (g_mode == GraphemeNormMode::kCombined) {
g_mode = GraphemeNormMode::kGlyphSplit;
} else if (g_mode == GraphemeNormMode::kGlyphSplit) {
g_mode = GraphemeNormMode::kIndividualUnicodes;
}
// Just do grapheme segmentation.
success = g_validator.ValidateCleanAndSegmentInternal(g_mode, src, dest);
} else {
success =
g_validator.ValidateCleanAndSegmentInternal(GraphemeNormMode::kGlyphSplit, src, &graphemes);
std::unique_ptr<Validator> validator(ScriptValidator(script, report_errors));
for (const auto &grapheme : graphemes) {
if (!validator->ValidateCleanAndSegmentInternal(g_mode, grapheme, dest)) {
success = false;
}
}
}
return success;
}
// Factory method that understands how to map script to the right subclass.
std::unique_ptr<Validator> Validator::ScriptValidator(ViramaScript script, bool report_errors) {
switch (script) {
#define CASE(e, T) case ViramaScript::e: return std::make_unique<T>(script, report_errors)
CASE(kNonVirama, ValidateGrapheme);
CASE(kJavanese, ValidateJavanese);
CASE(kMyanmar, ValidateMyanmar);
CASE(kKhmer, ValidateKhmer);
#undef CASE
default:
return std::make_unique<ValidateIndic>(script, report_errors);
}
}
// Internal version of the public static ValidateCleanAndSegment.
// Validates and cleans the src vector of unicodes to the *dest, according to
// its type and the given g_mode.
// In case of validation error, returns false and returns as much as possible
// of the input, without discarding invalid text.
bool Validator::ValidateCleanAndSegmentInternal(GraphemeNormMode g_mode,
const std::vector<char32> &src,
std::vector<std::vector<char32>> *dest) {
Clear();
ComputeClassCodes(src);
bool success = true;
for (codes_used_ = 0; codes_used_ < codes_.size();) {
if (!ConsumeGraphemeIfValid()) {
success = false;
++codes_used_;
}
}
MoveResultsToDest(g_mode, dest);
return success;
}
// Moves the results from parts_ or output_ to dest according to g_mode.
void Validator::MoveResultsToDest(GraphemeNormMode g_mode, std::vector<std::vector<char32>> *dest) {
if (g_mode == GraphemeNormMode::kIndividualUnicodes) {
// Append each element of the combined output_ that we made as a new vector
// in dest.
dest->reserve(dest->size() + output_.size());
for (char32 ch : output_) {
dest->push_back({ch});
}
} else if (g_mode == GraphemeNormMode::kGlyphSplit) {
// Append all the parts_ that we made onto dest.
std::move(parts_.begin(), parts_.end(), std::back_inserter(*dest));
} else if (g_mode == GraphemeNormMode::kCombined || dest->empty()) {
// Append the combined output_ that we made onto dest as one new vector.
dest->push_back(std::vector<char32>());
output_.swap(dest->back());
} else { // kNone.
// Append the combined output_ that we made onto the last existing element
// of dest.
dest->back().insert(dest->back().end(), output_.begin(), output_.end());
}
}
static bool CmpPairSecond(const std::pair<int, int> &p1, const std::pair<int, int> &p2) {
return p1.second < p2.second;
}
// Computes and returns the ViramaScript corresponding to the most frequent
// virama-using script in the input, or kNonVirama if none are present.
/* static */
ViramaScript Validator::MostFrequentViramaScript(const std::vector<char32> &utf32) {
std::unordered_map<int, int> histogram;
for (char32 ch : utf32) {
// Determine the codepage base. For the Indic scripts, Khmer and Javanese,
// it is sufficient to divide by kIndicCodePageSize but Myanmar is all over
// the unicode code space, so use its script id.
int base = ch / kIndicCodePageSize;
IcuErrorCode err;
UScriptCode script_code = uscript_getScript(ch, err);
if ((kMinIndicUnicode <= ch && ch <= kMaxJavaneseUnicode && script_code != USCRIPT_COMMON) ||
script_code == USCRIPT_MYANMAR) {
if (script_code == USCRIPT_MYANMAR) {
base = static_cast<char32>(ViramaScript::kMyanmar) / kIndicCodePageSize;
}
++histogram[base];
}
}
if (!histogram.empty()) {
int base = std::max_element(histogram.begin(), histogram.end(), CmpPairSecond)->first;
auto codebase = static_cast<char32>(base * kIndicCodePageSize);
// Check for validity.
if (codebase == static_cast<char32>(ViramaScript::kMyanmar) ||
codebase == static_cast<char32>(ViramaScript::kJavanese) ||
codebase == static_cast<char32>(ViramaScript::kKhmer) ||
(static_cast<char32>(ViramaScript::kDevanagari) <= codebase &&
codebase <= static_cast<char32>(ViramaScript::kSinhala))) {
return static_cast<ViramaScript>(codebase);
}
}
return ViramaScript::kNonVirama;
}
// Returns true if the given UTF-32 unicode is a "virama" character.
/* static */
bool Validator::IsVirama(char32 unicode) {
return (kMinIndicUnicode <= unicode && unicode <= kMaxSinhalaUnicode &&
(unicode & 0x7f) == 0x4d) ||
unicode == kSinhalaVirama || unicode == kJavaneseVirama || unicode == kMyanmarVirama ||
unicode == kKhmerVirama;
}
// Returns true if the given UTF-32 unicode is a vedic accent.
/* static */
bool Validator::IsVedicAccent(char32 unicode) {
return (0x1cd0 <= unicode && unicode < 0x1d00) || (0xa8e0 <= unicode && unicode <= 0xa8f7) ||
(0x951 <= unicode && unicode <= 0x954);
}
// Returns true if the script is one that uses subscripts for conjuncts.
bool Validator::IsSubscriptScript() const {
return script_ == ViramaScript::kTelugu || script_ == ViramaScript::kKannada ||
script_ == ViramaScript::kJavanese || script_ == ViramaScript::kMyanmar ||
script_ == ViramaScript::kKhmer;
}
void Validator::ComputeClassCodes(const std::vector<char32> &text) {
codes_.reserve(text.size());
for (char32 c : text) {
codes_.emplace_back(UnicodeToCharClass(c), c);
}
}
// Resets to the initial state.
void Validator::Clear() {
codes_.clear();
parts_.clear();
output_.clear();
codes_used_ = 0;
output_used_ = 0;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/training/unicharset/validator.cpp
|
C++
|
apache-2.0
| 8,224
|
/**********************************************************************
* File: validator.h
* Description: Base class for various text validators. Intended mainly for
* scripts that use a virama character.
* Author: Ray Smith
*
* (C) Copyright 2017, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_TRAINING_VALIDATOR_H_
#define TESSERACT_TRAINING_VALIDATOR_H_
#include "export.h"
#include <tesseract/unichar.h>
#include <memory>
#include <vector>
namespace tesseract {
// Different kinds of grapheme normalization - not just for Indic!
// A grapheme is a syllable unit in Indic and can be several unicodes.
// In other scripts, a grapheme is a base character and accent/diacritic
// combination, as not all accented characters have a single composed form.
enum class GraphemeNormMode {
// Validation result is a single string, even if input is multi-word.
kSingleString,
// Standard unicode graphemes are validated and output as grapheme units.
kCombined,
// Graphemes are validated and sub-divided. For virama-using scripts, units
// that correspond to repeatable glyphs are generated. (Mostly single unicodes
// but viramas and joiners are paired with the most sensible neighbor.)
// For non-virama scripts, this means that base/accent pairs are separated,
// ie the output is individual unicodes.
kGlyphSplit,
// The output is always single unicodes, regardless of the script.
kIndividualUnicodes,
};
// An enum representing the scripts that use a virama character. It is
// guaranteed that the value of any element, (except kNonVirama) can be cast
// to a unicode (char32) value that represents the start of the unicode range
// of the corresponding script.
enum class ViramaScript : char32 {
kNonVirama = 0,
kDevanagari = 0x900,
kBengali = 0x980,
kGurmukhi = 0xa00,
kGujarati = 0xa80,
kOriya = 0xb00,
kTamil = 0xb80,
kTelugu = 0xc00,
kKannada = 0xc80,
kMalayalam = 0xd00,
kSinhala = 0xd80,
kMyanmar = 0x1000,
kKhmer = 0x1780,
kJavanese = 0xa980,
};
// Base class offers a validation API and protected methods to allow subclasses
// to easily build the validated/segmented output.
class TESS_UNICHARSET_TRAINING_API Validator {
public:
// Validates and cleans the src vector of unicodes to the *dest, according to
// g_mode. In the case of kSingleString, a single vector containing the whole
// result is added to *dest. With kCombined, multiple vectors are added to
// *dest with one grapheme in each. With kGlyphSplit, multiple vectors are
// added to *dest with a smaller unit representing a glyph in each.
// In case of validation error, returns false and as much as possible of the
// input, without discarding invalid text.
static bool ValidateCleanAndSegment(GraphemeNormMode g_mode, bool report_errors,
const std::vector<char32> &src,
std::vector<std::vector<char32>> *dest);
// Returns true if the unicode ch is a non-printing zero-width mark of no
// significance to OCR training or evaluation.
static bool IsZeroWidthMark(char32 ch) {
return ch == kZeroWidthSpace || ch == kLeftToRightMark || ch == kRightToLeftMark ||
ch == kInvalid;
}
virtual ~Validator();
// Some specific but universally useful unicodes.
static const char32 kZeroWidthSpace;
static const char32 kZeroWidthNonJoiner;
static const char32 kZeroWidthJoiner;
static const char32 kLeftToRightMark;
static const char32 kRightToLeftMark;
static const char32 kInvalid;
protected:
// These are more or less the character class identifiers in the ISCII
// standard, section 8. They have been augmented with the Unicode meta
// characters Zero Width Joiner and Zero Width Non Joiner, and the
// Unicode Vedic Marks.
// The best sources of information on Unicode and Indic scripts are:
// http://varamozhi.sourceforge.net/iscii91.pdf
// http://www.unicode.org/versions/Unicode9.0.0/ch12.pdf
// http://unicode.org/faq/indic.html
// http://www.microsoft.com/typography/otfntdev/teluguot/shaping.aspx
enum class CharClass {
// NOTE: The values of the enum members are meaningless and arbitrary, ie
// they are not used for sorting, or any other risky application.
// The reason they are what they are is they are a single character
// abbreviation that can be used in a regexp/BNF definition of a grammar,
// IN A COMMENT, and still not relied upon in the code.
kConsonant = 'C',
kVowel = 'V',
kVirama = 'H', // (aka Halant)
kMatra = 'M', // (aka Dependent Vowel)
kMatraPiece = 'P', // unicode provides pieces of Matras.
kVowelModifier = 'D', // (candrabindu, anusvara, visarga, other marks)
kZeroWidthNonJoiner = 'z', // Unicode Zero Width Non-Joiner U+200C
kZeroWidthJoiner = 'Z', // Unicode Zero Width Joiner U+200D
kVedicMark = 'v', // Modifiers can come modify any indic syllable.
kNukta = 'N', // Occurs only immediately after consonants.
kRobat = 'R', // Khmer only.
kOther = 'O', // (digits, measures, non-Indic, etc)
// Additional classes used only by ValidateGrapheme.
kWhitespace = ' ',
kCombiner = 'c', // Combiners other than virama.
};
using IndicPair = std::pair<CharClass, char32>;
Validator(ViramaScript script, bool report_errors)
: script_(script), codes_used_(0), output_used_(0), report_errors_(report_errors) {}
// Factory method that understands how to map script to the right subclass.
static std::unique_ptr<Validator> ScriptValidator(ViramaScript script, bool report_errors);
// Internal version of the public static ValidateCleanAndSegment.
// Validates and cleans the src vector of unicodes to the *dest, according to
// its type and the given g_mode.
// In case of validation error, returns false and returns as much as possible
// of the input, without discarding invalid text.
bool ValidateCleanAndSegmentInternal(GraphemeNormMode g_mode, const std::vector<char32> &src,
std::vector<std::vector<char32>> *dest);
// Moves the results from parts_ or output_ to dest according to g_mode.
void MoveResultsToDest(GraphemeNormMode g_mode, std::vector<std::vector<char32>> *dest);
// Computes and returns the ViramaScript corresponding to the most frequent
// virama-using script in the input, or kNonVirama if none are present.
static ViramaScript MostFrequentViramaScript(const std::vector<char32> &utf32);
// Returns true if the given UTF-32 unicode is a "virama" character.
static bool IsVirama(char32 unicode);
// Returns true if the given UTF-32 unicode is a vedic accent.
static bool IsVedicAccent(char32 unicode);
// Returns true if the script is one that uses subscripts for conjuncts.
bool IsSubscriptScript() const;
// Helper function appends the next element of codes_ only to output_,
// without touching parts_
// Returns true at the end of codes_.
bool CodeOnlyToOutput() {
output_.push_back(codes_[codes_used_].second);
return ++codes_used_ == codes_.size();
}
// Helper function adds a length-element vector to parts_ from the last length
// elements of output_. If there are more than length unused elements in
// output_, adds unicodes as single-element vectors to parts_ to catch
// output_used_ up to output->size() - length before adding the length-element
// vector.
void MultiCodePart(unsigned length) {
while (output_used_ + length < output_.size()) {
parts_.emplace_back(std::initializer_list<char32>{output_[output_used_++]});
}
parts_.emplace_back(std::initializer_list<char32>{output_[output_used_]});
while (++output_used_ < output_.size()) {
parts_.back().push_back(output_[output_used_]);
}
}
// Helper function appends the next element of codes_ to output_, and then
// calls MultiCodePart to add the appropriate components to parts_.
// Returns true at the end of codes_.
bool UseMultiCode(unsigned length) {
output_.push_back(codes_[codes_used_].second);
MultiCodePart(length);
return ++codes_used_ == codes_.size();
}
// Consumes the next Grapheme in codes_[codes_used_++...] and copies it to
// parts_ and output_. Returns true if a valid Grapheme was consumed,
// otherwise does not increment codes_used_.
virtual bool ConsumeGraphemeIfValid() = 0;
// Sets codes_ to the class codes for the given unicode text.
void ComputeClassCodes(const std::vector<char32> &text);
// Returns the CharClass corresponding to the given Unicode ch.
virtual CharClass UnicodeToCharClass(char32 ch) const = 0;
// Resets to the initial state.
void Clear();
// Number of unicodes in each Indic codepage.
static const int kIndicCodePageSize = 128;
// Lowest unicode value of any Indic script. (Devanagari).
static const char32 kMinIndicUnicode = 0x900;
// Highest unicode value of any consistent (ISCII-based) Indic script.
static const char32 kMaxSinhalaUnicode = 0xdff;
// Highest unicode value of any virama-using script. (Khmer).
static const char32 kMaxViramaScriptUnicode = 0x17ff;
// Some special unicodes.
static const char32 kSinhalaVirama = 0xdca;
static const char32 kMyanmarVirama = 0x1039;
static const char32 kKhmerVirama = 0x17d2;
// Javanese Script - aksarajawa
static const char32 kJavaneseVirama = 0xa9c0;
static const char32 kMaxJavaneseUnicode = 0xa9df;
// Script we are operating on.
ViramaScript script_;
// Input unicodes with assigned CharClass is the data to be validated.
std::vector<IndicPair> codes_;
// Glyph-like components of the input.
std::vector<std::vector<char32>> parts_;
// Copied validated unicodes from codes_ that are OK to output.
std::vector<char32> output_;
// The number of elements of codes_ that have been processed so far.
unsigned codes_used_;
// The number of elements of output_ that have already been added to parts_.
unsigned output_used_;
// Log error messages for reasons why text is invalid.
bool report_errors_;
};
} // namespace tesseract
#endif // TESSERACT_TRAINING_VALIDATOR_H_
|
2301_81045437/tesseract
|
src/training/unicharset/validator.h
|
C++
|
apache-2.0
| 10,840
|
///////////////////////////////////////////////////////////////////////
// File: unicharset_extractor.cpp
// Description: Unicode character/ligature set extractor.
// Author: Thomas Kielbus
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Given a list of box files or text files on the command line, this program
// normalizes the text according to command-line options and generates
// a unicharset.
#include <cstdlib>
#include <filesystem>
#include "boxread.h"
#include "commandlineflags.h"
#include "commontraining.h" // CheckSharedLibraryVersion
#include "lang_model_helpers.h"
#include "normstrngs.h"
#include "unicharset.h"
#include "unicharset_training_utils.h"
using namespace tesseract;
static STRING_PARAM_FLAG(output_unicharset, "unicharset", "Output file path");
static INT_PARAM_FLAG(norm_mode, 1,
"Normalization mode: 1=Combine graphemes, "
"2=Split graphemes, 3=Pure unicode");
namespace tesseract {
// Helper normalizes and segments the given strings according to norm_mode, and
// adds the segmented parts to unicharset.
static void AddStringsToUnicharset(const std::vector<std::string> &strings, int norm_mode,
UNICHARSET *unicharset) {
for (const auto &string : strings) {
std::vector<std::string> normalized;
if (NormalizeCleanAndSegmentUTF8(UnicodeNormMode::kNFC, OCRNorm::kNone,
static_cast<GraphemeNormMode>(norm_mode),
/*report_errors*/ true, string.c_str(), &normalized)) {
for (const std::string &normed : normalized) {
// normed is a UTF-8 encoded string
if (normed.empty() || IsUTF8Whitespace(normed.c_str())) {
continue;
}
unicharset->unichar_insert(normed.c_str());
}
} else {
tprintf("Normalization failed for string '%s'\n", string.c_str());
}
}
}
static int Main(int argc, char **argv) {
UNICHARSET unicharset;
// Load input files
for (int arg = 1; arg < argc; ++arg) {
std::filesystem::path filePath = argv[arg];
std::string file_data = tesseract::ReadFile(argv[arg]);
if (file_data.empty()) {
continue;
}
std::vector<std::string> texts;
if (filePath.extension() == ".box") {
tprintf("Extracting unicharset from box file %s\n", argv[arg]);
bool res = ReadMemBoxes(-1, /*skip_blanks*/ true, &file_data[0],
/*continue_on_failure*/ false, /*boxes*/ nullptr, &texts,
/*box_texts*/ nullptr, /*pages*/ nullptr);
if (!res) {
tprintf("Cannot read box data from '%s'\n", argv[arg]);
return EXIT_FAILURE;
}
} else {
tprintf("Extracting unicharset from plain text file %s\n", argv[arg]);
texts.clear();
texts = split(file_data, '\n');
}
AddStringsToUnicharset(texts, FLAGS_norm_mode, &unicharset);
}
SetupBasicProperties(/*report_errors*/ true, /*decompose*/ false, &unicharset);
// Write unicharset file.
if (unicharset.save_to_file(FLAGS_output_unicharset.c_str())) {
tprintf("Wrote unicharset file %s\n", FLAGS_output_unicharset.c_str());
} else {
tprintf("Cannot save unicharset file %s\n", FLAGS_output_unicharset.c_str());
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
} // namespace tesseract
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
if (argc > 1) {
tesseract::ParseCommandLineFlags(argv[0], &argc, &argv, true);
}
if (argc < 2) {
tprintf(
"Usage: %s [--output_unicharset filename] [--norm_mode mode]"
" box_or_text_file [...]\n",
argv[0]);
tprintf("Where mode means:\n");
tprintf(" 1=combine graphemes (use for Latin and other simple scripts)\n");
tprintf(" 2=split graphemes (use for Indic/Khmer/Myanmar)\n");
tprintf(" 3=pure unicode (use for Arabic/Hebrew/Thai/Tibetan)\n");
tprintf("Reads box or plain text files to extract the unicharset.\n");
return EXIT_FAILURE;
}
return tesseract::Main(argc, argv);
}
|
2301_81045437/tesseract
|
src/training/unicharset_extractor.cpp
|
C++
|
apache-2.0
| 4,678
|
///////////////////////////////////////////////////////////////////////
// File: wordlist2dawg.cpp
// Description: Program to generate a DAWG from a word list file
// Author: Thomas Kielbus
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
// Given a file that contains a list of words (one word per line) this program
// generates the corresponding squished DAWG file.
#include "classify.h"
#include "commontraining.h" // CheckSharedLibraryVersion
#include "dawg.h"
#include "dict.h"
#include "helpers.h"
#include "serialis.h"
#include "trie.h"
#include "unicharset.h"
using namespace tesseract;
int main(int argc, char **argv) {
tesseract::CheckSharedLibraryVersion();
if (argc > 1 && (!strcmp(argv[1], "-v") || !strcmp(argv[1], "--version"))) {
printf("%s\n", tesseract::TessBaseAPI::Version());
return EXIT_SUCCESS;
} else if (!(argc == 4 || (argc == 5 && strcmp(argv[1], "-t") == 0) ||
(argc == 6 && strcmp(argv[1], "-r") == 0))) {
printf(
"Usage: %s -v | --version |\n"
" %s [-t | -r [reverse policy] ] word_list_file"
" dawg_file unicharset_file\n",
argv[0], argv[0]);
return EXIT_FAILURE;
}
tesseract::Classify classify;
int argv_index = 0;
if (argc == 5) {
++argv_index;
}
tesseract::Trie::RTLReversePolicy reverse_policy = tesseract::Trie::RRP_DO_NO_REVERSE;
if (argc == 6) {
++argv_index;
int tmp_int;
sscanf(argv[++argv_index], "%d", &tmp_int);
reverse_policy = static_cast<tesseract::Trie::RTLReversePolicy>(tmp_int);
tprintf("Set reverse_policy to %s\n", tesseract::Trie::get_reverse_policy_name(reverse_policy));
}
const char *wordlist_filename = argv[++argv_index];
const char *dawg_filename = argv[++argv_index];
const char *unicharset_file = argv[++argv_index];
tprintf("Loading unicharset from '%s'\n", unicharset_file);
if (!classify.getDict().getUnicharset().load_from_file(unicharset_file)) {
tprintf("Failed to load unicharset from '%s'\n", unicharset_file);
return EXIT_FAILURE;
}
const UNICHARSET &unicharset = classify.getDict().getUnicharset();
if (argc == 4 || argc == 6) {
tesseract::Trie trie(
// the first 3 arguments are not used in this case
tesseract::DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM, unicharset.size(),
classify.getDict().dawg_debug_level);
tprintf("Reading word list from '%s'\n", wordlist_filename);
if (!trie.read_and_add_word_list(wordlist_filename, unicharset, reverse_policy)) {
tprintf("Failed to add word list from '%s'\n", wordlist_filename);
return EXIT_FAILURE;
}
tprintf("Reducing Trie to SquishedDawg\n");
std::unique_ptr<tesseract::SquishedDawg> dawg(trie.trie_to_dawg());
if (dawg && dawg->NumEdges() > 0) {
tprintf("Writing squished DAWG to '%s'\n", dawg_filename);
dawg->write_squished_dawg(dawg_filename);
} else {
tprintf("Dawg is empty, skip producing the output file\n");
}
} else if (argc == 5) {
tprintf("Loading dawg DAWG from '%s'\n", dawg_filename);
tesseract::SquishedDawg words(dawg_filename,
// these 3 arguments are not used in this case
tesseract::DAWG_TYPE_WORD, "", SYSTEM_DAWG_PERM,
classify.getDict().dawg_debug_level);
tprintf("Checking word list from '%s'\n", wordlist_filename);
words.check_for_words(wordlist_filename, unicharset, true);
} else { // should never get here
tprintf("Invalid command-line options\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
2301_81045437/tesseract
|
src/training/wordlist2dawg.cpp
|
C++
|
apache-2.0
| 4,228
|
///////////////////////////////////////////////////////////////////////
// File: scrollview.cpp
// Description: ScrollView
// Author: Joern Wanke
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "scrollview.h"
#include "svutil.h" // for SVNetwork
#include <allheaders.h>
#include <algorithm>
#include <climits>
#include <cstdarg>
#include <cstring>
#include <map>
#include <memory> // for std::unique_ptr
#include <mutex> // for std::mutex
#include <string>
#include <thread> // for std::thread
#include <utility>
#include <vector>
namespace tesseract {
const int kSvPort = 8461;
const int kMaxMsgSize = 4096;
const int kMaxIntPairSize = 45; // Holds %d,%d, for up to 64 bit.
struct SVPolyLineBuffer {
bool empty; // Independent indicator to allow SendMsg to call SendPolygon.
std::vector<int> xcoords;
std::vector<int> ycoords;
};
// A map between the window IDs and their corresponding pointers.
static std::map<int, ScrollView *> svmap;
static std::mutex *svmap_mu;
// A map of all semaphores waiting for a specific event on a specific window.
static std::map<std::pair<ScrollView *, SVEventType>,
std::pair<SVSemaphore *, std::unique_ptr<SVEvent>>> waiting_for_events;
static std::mutex *waiting_for_events_mu;
std::unique_ptr<SVEvent> SVEvent::copy() const {
auto any = std::unique_ptr<SVEvent>(new SVEvent);
any->command_id = command_id;
any->counter = counter;
any->parameter = new char[strlen(parameter) + 1];
strcpy(any->parameter, parameter);
any->type = type;
any->x = x;
any->y = y;
any->x_size = x_size;
any->y_size = y_size;
any->window = window;
return any;
}
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
SVEventHandler::~SVEventHandler() = default;
#ifndef GRAPHICS_DISABLED
/// This is the main loop which handles the ScrollView-logic from the server
/// to the client. It basically loops through messages, parses them to events
/// and distributes it to the waiting handlers.
/// It is run from a different thread and synchronizes via SVSync.
void ScrollView::MessageReceiver() {
int counter_event_id = 0; // ongoing counter
char *message = nullptr;
// Wait until a new message appears in the input stream_.
do {
message = ScrollView::GetStream()->Receive();
} while (message == nullptr);
// This is the main loop which iterates until the server is dead (strlen =
// -1). It basically parses for 3 different messagetypes and then distributes
// the events accordingly.
while (true) {
// The new event we create.
std::unique_ptr<SVEvent> cur(new SVEvent);
// The ID of the corresponding window.
int window_id;
int ev_type;
int n;
// Fill the new SVEvent properly.
sscanf(message, "%d,%d,%d,%d,%d,%d,%d,%n", &window_id, &ev_type, &cur->x, &cur->y, &cur->x_size,
&cur->y_size, &cur->command_id, &n);
char *p = (message + n);
svmap_mu->lock();
cur->window = svmap[window_id];
if (cur->window != nullptr) {
auto length = strlen(p);
cur->parameter = new char[length + 1];
strcpy(cur->parameter, p);
if (length > 0) { // remove the last \n
cur->parameter[length - 1] = '\0';
}
cur->type = static_cast<SVEventType>(ev_type);
// Correct selection coordinates so x,y is the min pt and size is +ve.
if (cur->x_size > 0) {
cur->x -= cur->x_size;
} else {
cur->x_size = -cur->x_size;
}
if (cur->y_size > 0) {
cur->y -= cur->y_size;
} else {
cur->y_size = -cur->y_size;
}
// Returned y will be the bottom-left if y is reversed.
if (cur->window->y_axis_is_reversed_) {
cur->y = cur->window->TranslateYCoordinate(cur->y + cur->y_size);
}
cur->counter = counter_event_id;
// Increase by 2 since we will also create an SVET_ANY event from cur,
// which will have a counter_id of cur + 1 (and thus gets processed
// after cur).
counter_event_id += 2;
// In case of an SVET_EXIT event, quit the whole application.
if (ev_type == SVET_EXIT) {
SendRawMessage("svmain:exit()");
break;
}
// Place two copies of it in the table for the window.
cur->window->SetEvent(cur.get());
// Check if any of the threads currently waiting want it.
std::pair<ScrollView *, SVEventType> awaiting_list(cur->window, cur->type);
std::pair<ScrollView *, SVEventType> awaiting_list_any(cur->window, SVET_ANY);
std::pair<ScrollView *, SVEventType> awaiting_list_any_window((ScrollView *)nullptr,
SVET_ANY);
waiting_for_events_mu->lock();
if (waiting_for_events.count(awaiting_list) > 0) {
waiting_for_events[awaiting_list].second = std::move(cur);
waiting_for_events[awaiting_list].first->Signal();
} else if (waiting_for_events.count(awaiting_list_any) > 0) {
waiting_for_events[awaiting_list_any].second = std::move(cur);
waiting_for_events[awaiting_list_any].first->Signal();
} else if (waiting_for_events.count(awaiting_list_any_window) > 0) {
waiting_for_events[awaiting_list_any_window].second = std::move(cur);
waiting_for_events[awaiting_list_any_window].first->Signal();
}
waiting_for_events_mu->unlock();
// Signal the corresponding semaphore twice (for both copies).
ScrollView *sv = svmap[window_id];
if (sv != nullptr) {
sv->Signal();
sv->Signal();
}
}
svmap_mu->unlock();
// Wait until a new message appears in the input stream_.
do {
message = ScrollView::GetStream()->Receive();
} while (message == nullptr);
}
}
// Table to implement the color index values in the old system.
static const uint8_t table_colors[ScrollView::GREEN_YELLOW + 1][4] = {
{0, 0, 0, 0}, // NONE (transparent)
{0, 0, 0, 255}, // BLACK.
{255, 255, 255, 255}, // WHITE.
{255, 0, 0, 255}, // RED.
{255, 255, 0, 255}, // YELLOW.
{0, 255, 0, 255}, // GREEN.
{0, 255, 255, 255}, // CYAN.
{0, 0, 255, 255}, // BLUE.
{255, 0, 255, 255}, // MAGENTA.
{0, 128, 255, 255}, // AQUAMARINE.
{0, 0, 64, 255}, // DARK_SLATE_BLUE.
{128, 128, 255, 255}, // LIGHT_BLUE.
{64, 64, 255, 255}, // MEDIUM_BLUE.
{0, 0, 32, 255}, // MIDNIGHT_BLUE.
{0, 0, 128, 255}, // NAVY_BLUE.
{192, 192, 255, 255}, // SKY_BLUE.
{64, 64, 128, 255}, // SLATE_BLUE.
{32, 32, 64, 255}, // STEEL_BLUE.
{255, 128, 128, 255}, // CORAL.
{128, 64, 0, 255}, // BROWN.
{128, 128, 0, 255}, // SANDY_BROWN.
{192, 192, 0, 255}, // GOLD.
{192, 192, 128, 255}, // GOLDENROD.
{0, 64, 0, 255}, // DARK_GREEN.
{32, 64, 0, 255}, // DARK_OLIVE_GREEN.
{64, 128, 0, 255}, // FOREST_GREEN.
{128, 255, 0, 255}, // LIME_GREEN.
{192, 255, 192, 255}, // PALE_GREEN.
{192, 255, 0, 255}, // YELLOW_GREEN.
{192, 192, 192, 255}, // LIGHT_GREY.
{64, 64, 128, 255}, // DARK_SLATE_GREY.
{64, 64, 64, 255}, // DIM_GREY.
{128, 128, 128, 255}, // GREY.
{64, 192, 0, 255}, // KHAKI.
{255, 0, 192, 255}, // MAROON.
{255, 128, 0, 255}, // ORANGE.
{255, 128, 64, 255}, // ORCHID.
{255, 192, 192, 255}, // PINK.
{128, 0, 128, 255}, // PLUM.
{255, 0, 64, 255}, // INDIAN_RED.
{255, 64, 0, 255}, // ORANGE_RED.
{255, 0, 192, 255}, // VIOLET_RED.
{255, 192, 128, 255}, // SALMON.
{128, 128, 0, 255}, // TAN.
{0, 255, 255, 255}, // TURQUOISE.
{0, 128, 128, 255}, // DARK_TURQUOISE.
{192, 0, 255, 255}, // VIOLET.
{128, 128, 0, 255}, // WHEAT.
{128, 255, 0, 255} // GREEN_YELLOW
};
/*******************************************************************************
* Scrollview implementation.
*******************************************************************************/
SVNetwork *ScrollView::stream_ = nullptr;
int ScrollView::nr_created_windows_ = 0;
int ScrollView::image_index_ = 0;
/// Calls Initialize with all arguments given.
ScrollView::ScrollView(const char *name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size, bool y_axis_reversed,
const char *server_name) {
Initialize(name, x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size, y_axis_reversed,
server_name);
}
/// Calls Initialize with default argument for server_name_.
ScrollView::ScrollView(const char *name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size, bool y_axis_reversed) {
Initialize(name, x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size, y_axis_reversed,
"localhost");
}
/// Calls Initialize with default argument for server_name_ & y_axis_reversed.
ScrollView::ScrollView(const char *name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size) {
Initialize(name, x_pos, y_pos, x_size, y_size, x_canvas_size, y_canvas_size, false, "localhost");
}
/// Sets up a ScrollView window, depending on the constructor variables.
void ScrollView::Initialize(const char *name, int x_pos, int y_pos, int x_size, int y_size,
int x_canvas_size, int y_canvas_size, bool y_axis_reversed,
const char *server_name) {
// If this is the first ScrollView Window which gets created, there is no
// network connection yet and we have to set it up in a different thread.
if (stream_ == nullptr) {
nr_created_windows_ = 0;
stream_ = new SVNetwork(server_name, kSvPort);
waiting_for_events_mu = new std::mutex();
svmap_mu = new std::mutex();
SendRawMessage("svmain = luajava.bindClass('com.google.scrollview.ScrollView')\n");
std::thread t(&ScrollView::MessageReceiver);
t.detach();
}
// Set up the variables on the clientside.
nr_created_windows_++;
event_handler_ = nullptr;
event_handler_ended_ = false;
y_axis_is_reversed_ = y_axis_reversed;
y_size_ = y_canvas_size;
window_name_ = name;
window_id_ = nr_created_windows_;
// Set up polygon buffering.
points_ = new SVPolyLineBuffer;
points_->empty = true;
svmap_mu->lock();
svmap[window_id_] = this;
svmap_mu->unlock();
for (auto &i : event_table_) {
i = nullptr;
}
semaphore_ = new SVSemaphore();
// Set up an actual Window on the client side.
char message[kMaxMsgSize];
snprintf(message, sizeof(message),
"w%u = luajava.newInstance('com.google.scrollview.ui"
".SVWindow','%s',%u,%u,%u,%u,%u,%u,%u)\n",
window_id_, window_name_, window_id_, x_pos, y_pos, x_size, y_size, x_canvas_size,
y_canvas_size);
SendRawMessage(message);
std::thread t(&ScrollView::StartEventHandler, this);
t.detach();
}
/// Sits and waits for events on this window.
void ScrollView::StartEventHandler() {
for (;;) {
stream_->Flush();
semaphore_->Wait();
int serial = -1;
int k = -1;
mutex_.lock();
// Check every table entry if it is valid and not already processed.
for (int i = 0; i < SVET_COUNT; i++) {
if (event_table_[i] != nullptr && (serial < 0 || event_table_[i]->counter < serial)) {
serial = event_table_[i]->counter;
k = i;
}
}
// If we didn't find anything we had an old alarm and just sleep again.
if (k != -1) {
auto new_event = std::move(event_table_[k]);
mutex_.unlock();
if (event_handler_ != nullptr) {
event_handler_->Notify(new_event.get());
}
if (new_event->type == SVET_DESTROY) {
// Signal the destructor that it is safe to terminate.
event_handler_ended_ = true;
return;
}
} else {
mutex_.unlock();
}
// The thread should run as long as its associated window is alive.
}
}
#endif // !GRAPHICS_DISABLED
ScrollView::~ScrollView() {
#ifndef GRAPHICS_DISABLED
svmap_mu->lock();
if (svmap[window_id_] != nullptr) {
svmap_mu->unlock();
// So the event handling thread can quit.
SendMsg("destroy()");
AwaitEvent(SVET_DESTROY);
svmap_mu->lock();
svmap[window_id_] = nullptr;
svmap_mu->unlock();
// The event handler thread for this window *must* receive the
// destroy event and set its pointer to this to nullptr before we allow
// the destructor to exit.
while (!event_handler_ended_) {
Update();
}
} else {
svmap_mu->unlock();
}
delete semaphore_;
delete points_;
#endif // !GRAPHICS_DISABLED
}
#ifndef GRAPHICS_DISABLED
/// Send a message to the server, attaching the window id.
void ScrollView::SendMsg(const char *format, ...) {
if (!points_->empty) {
SendPolygon();
}
va_list args;
char message[kMaxMsgSize - 4];
va_start(args, format); // variable list
vsnprintf(message, sizeof(message), format, args);
va_end(args);
char form[kMaxMsgSize];
snprintf(form, sizeof(form), "w%u:%s\n", window_id_, message);
stream_->Send(form);
}
/// Send a message to the server without a
/// window id. Used for global events like exit().
void ScrollView::SendRawMessage(const char *msg) {
stream_->Send(msg);
}
/// Add an Event Listener to this ScrollView Window
void ScrollView::AddEventHandler(SVEventHandler *listener) {
event_handler_ = listener;
}
void ScrollView::Signal() {
semaphore_->Signal();
}
void ScrollView::SetEvent(const SVEvent *svevent) {
// Copy event
auto any = svevent->copy();
auto specific = svevent->copy();
any->counter = specific->counter + 1;
// Place both events into the queue.
std::lock_guard<std::mutex> guard(mutex_);
event_table_[specific->type] = std::move(specific);
event_table_[SVET_ANY] = std::move(any);
}
/// Block until an event of the given type is received.
/// Note: The calling function is responsible for deleting the returned
/// SVEvent afterwards!
std::unique_ptr<SVEvent> ScrollView::AwaitEvent(SVEventType type) {
// Initialize the waiting semaphore.
auto *sem = new SVSemaphore();
std::pair<ScrollView *, SVEventType> ea(this, type);
waiting_for_events_mu->lock();
waiting_for_events[ea] = {sem, nullptr};
waiting_for_events_mu->unlock();
// Wait on it, but first flush.
stream_->Flush();
sem->Wait();
// Process the event we got woken up for (its in waiting_for_events pair).
waiting_for_events_mu->lock();
auto ret = std::move(waiting_for_events[ea].second);
waiting_for_events.erase(ea);
delete sem;
waiting_for_events_mu->unlock();
return ret;
}
// Send the current buffered polygon (if any) and clear it.
void ScrollView::SendPolygon() {
if (!points_->empty) {
points_->empty = true; // Allows us to use SendMsg.
int length = points_->xcoords.size();
// length == 1 corresponds to 2 SetCursors in a row and only the
// last setCursor has any effect.
if (length == 2) {
// An isolated line!
SendMsg("drawLine(%d,%d,%d,%d)", points_->xcoords[0], points_->ycoords[0],
points_->xcoords[1], points_->ycoords[1]);
} else if (length > 2) {
// A polyline.
SendMsg("createPolyline(%d)", length);
char coordpair[kMaxIntPairSize];
std::string decimal_coords;
for (int i = 0; i < length; ++i) {
snprintf(coordpair, kMaxIntPairSize, "%d,%d,", points_->xcoords[i], points_->ycoords[i]);
decimal_coords += coordpair;
}
decimal_coords += '\n';
SendRawMessage(decimal_coords.c_str());
SendMsg("drawPolyline()");
}
points_->xcoords.clear();
points_->ycoords.clear();
}
}
/*******************************************************************************
* LUA "API" functions.
*******************************************************************************/
// Sets the position from which to draw to (x,y).
void ScrollView::SetCursor(int x, int y) {
SendPolygon();
DrawTo(x, y);
}
// Draws from the current position to (x,y) and sets the new position to it.
void ScrollView::DrawTo(int x, int y) {
points_->xcoords.push_back(x);
points_->ycoords.push_back(TranslateYCoordinate(y));
points_->empty = false;
}
// Draw a line using the current pen color.
void ScrollView::Line(int x1, int y1, int x2, int y2) {
if (!points_->xcoords.empty() && x1 == points_->xcoords.back() &&
TranslateYCoordinate(y1) == points_->ycoords.back()) {
// We are already at x1, y1, so just draw to x2, y2.
DrawTo(x2, y2);
} else if (!points_->xcoords.empty() && x2 == points_->xcoords.back() &&
TranslateYCoordinate(y2) == points_->ycoords.back()) {
// We are already at x2, y2, so just draw to x1, y1.
DrawTo(x1, y1);
} else {
// This is a new line.
SetCursor(x1, y1);
DrawTo(x2, y2);
}
}
// Set the visibility of the window.
void ScrollView::SetVisible(bool visible) {
if (visible) {
SendMsg("setVisible(true)");
} else {
SendMsg("setVisible(false)");
}
}
// Set the alwaysOnTop flag.
void ScrollView::AlwaysOnTop(bool b) {
if (b) {
SendMsg("setAlwaysOnTop(true)");
} else {
SendMsg("setAlwaysOnTop(false)");
}
}
// Adds a message entry to the message box.
void ScrollView::AddMessage(const char *message) {
char form[kMaxMsgSize];
snprintf(form, sizeof(form), "w%u:%s", window_id_, message);
char *esc = AddEscapeChars(form);
SendMsg("addMessage(\"%s\")", esc);
delete[] esc;
}
void ScrollView::AddMessageF(const char *format, ...) {
va_list args;
char message[kMaxMsgSize - 4];
va_start(args, format); // variable list
vsnprintf(message, sizeof(message), format, args);
va_end(args);
AddMessage(message);
}
// Set a messagebox.
void ScrollView::AddMessageBox() {
SendMsg("addMessageBox()");
}
// Exit the client completely (and notify the server of it).
void ScrollView::Exit() {
SendRawMessage("svmain:exit()");
exit(0);
}
// Clear the canvas.
void ScrollView::Clear() {
SendMsg("clear()");
}
// Set the stroke width.
void ScrollView::Stroke(float width) {
SendMsg("setStrokeWidth(%f)", width);
}
// Draw a rectangle using the current pen color.
// The rectangle is filled with the current brush color.
void ScrollView::Rectangle(int x1, int y1, int x2, int y2) {
if (x1 == x2 && y1 == y2) {
return; // Scrollviewer locks up.
}
SendMsg("drawRectangle(%d,%d,%d,%d)", x1, TranslateYCoordinate(y1), x2, TranslateYCoordinate(y2));
}
// Draw an ellipse using the current pen color.
// The ellipse is filled with the current brush color.
void ScrollView::Ellipse(int x1, int y1, int width, int height) {
SendMsg("drawEllipse(%d,%d,%u,%u)", x1, TranslateYCoordinate(y1), width, height);
}
// Set the pen color to the given RGB values.
void ScrollView::Pen(int red, int green, int blue) {
SendMsg("pen(%d,%d,%d)", red, green, blue);
}
// Set the pen color to the given RGB values.
void ScrollView::Pen(int red, int green, int blue, int alpha) {
SendMsg("pen(%d,%d,%d,%d)", red, green, blue, alpha);
}
// Set the brush color to the given RGB values.
void ScrollView::Brush(int red, int green, int blue) {
SendMsg("brush(%d,%d,%d)", red, green, blue);
}
// Set the brush color to the given RGB values.
void ScrollView::Brush(int red, int green, int blue, int alpha) {
SendMsg("brush(%d,%d,%d,%d)", red, green, blue, alpha);
}
// Set the attributes for future Text(..) calls.
void ScrollView::TextAttributes(const char *font, int pixel_size, bool bold, bool italic,
bool underlined) {
const char *b;
const char *i;
const char *u;
if (bold) {
b = "true";
} else {
b = "false";
}
if (italic) {
i = "true";
} else {
i = "false";
}
if (underlined) {
u = "true";
} else {
u = "false";
}
SendMsg("textAttributes('%s',%u,%s,%s,%s)", font, pixel_size, b, i, u);
}
// Draw text at the given coordinates.
void ScrollView::Text(int x, int y, const char *mystring) {
SendMsg("drawText(%d,%d,'%s')", x, TranslateYCoordinate(y), mystring);
}
// Open and draw an image given a name at (x,y).
void ScrollView::Draw(const char *image, int x_pos, int y_pos) {
SendMsg("openImage('%s')", image);
SendMsg("drawImage('%s',%d,%d)", image, x_pos, TranslateYCoordinate(y_pos));
}
// Add new checkboxmenuentry to menubar.
void ScrollView::MenuItem(const char *parent, const char *name, int cmdEvent, bool flag) {
if (parent == nullptr) {
parent = "";
}
if (flag) {
SendMsg("addMenuBarItem('%s','%s',%d,true)", parent, name, cmdEvent);
} else {
SendMsg("addMenuBarItem('%s','%s',%d,false)", parent, name, cmdEvent);
}
}
// Add new menuentry to menubar.
void ScrollView::MenuItem(const char *parent, const char *name, int cmdEvent) {
if (parent == nullptr) {
parent = "";
}
SendMsg("addMenuBarItem('%s','%s',%d)", parent, name, cmdEvent);
}
// Add new submenu to menubar.
void ScrollView::MenuItem(const char *parent, const char *name) {
if (parent == nullptr) {
parent = "";
}
SendMsg("addMenuBarItem('%s','%s')", parent, name);
}
// Add new submenu to popupmenu.
void ScrollView::PopupItem(const char *parent, const char *name) {
if (parent == nullptr) {
parent = "";
}
SendMsg("addPopupMenuItem('%s','%s')", parent, name);
}
// Add new submenuentry to popupmenu.
void ScrollView::PopupItem(const char *parent, const char *name, int cmdEvent, const char *value,
const char *desc) {
if (parent == nullptr) {
parent = "";
}
char *esc = AddEscapeChars(value);
char *esc2 = AddEscapeChars(desc);
SendMsg("addPopupMenuItem('%s','%s',%d,'%s','%s')", parent, name, cmdEvent, esc, esc2);
delete[] esc;
delete[] esc2;
}
// Send an update message for a single window.
void ScrollView::UpdateWindow() {
SendMsg("update()");
}
// Note: this is an update to all windows
void ScrollView::Update() {
std::lock_guard<std::mutex> guard(*svmap_mu);
for (auto &iter : svmap) {
if (iter.second != nullptr) {
iter.second->UpdateWindow();
}
}
}
// Set the pen color, using an enum value (e.g. ScrollView::ORANGE)
void ScrollView::Pen(Color color) {
Pen(table_colors[color][0], table_colors[color][1], table_colors[color][2],
table_colors[color][3]);
}
// Set the brush color, using an enum value (e.g. ScrollView::ORANGE)
void ScrollView::Brush(Color color) {
Brush(table_colors[color][0], table_colors[color][1], table_colors[color][2],
table_colors[color][3]);
}
// Shows a modal Input Dialog which can return any kind of String
char *ScrollView::ShowInputDialog(const char *msg) {
SendMsg("showInputDialog(\"%s\")", msg);
// wait till an input event (all others are thrown away)
auto ev = AwaitEvent(SVET_INPUT);
char *p = new char[strlen(ev->parameter) + 1];
strcpy(p, ev->parameter);
return p;
}
// Shows a modal Yes/No Dialog which will return 'y' or 'n'
int ScrollView::ShowYesNoDialog(const char *msg) {
SendMsg("showYesNoDialog(\"%s\")", msg);
// Wait till an input event (all others are thrown away)
auto ev = AwaitEvent(SVET_INPUT);
int a = ev->parameter[0];
return a;
}
// Zoom the window to the rectangle given upper left corner and
// lower right corner.
void ScrollView::ZoomToRectangle(int x1, int y1, int x2, int y2) {
y1 = TranslateYCoordinate(y1);
y2 = TranslateYCoordinate(y2);
SendMsg("zoomRectangle(%d,%d,%d,%d)", std::min(x1, x2), std::min(y1, y2), std::max(x1, x2),
std::max(y1, y2));
}
// Send an image of type Pix.
void ScrollView::Draw(Image image, int x_pos, int y_pos) {
l_uint8 *data;
size_t size;
pixWriteMem(&data, &size, image, IFF_PNG);
int base64_len = (size + 2) / 3 * 4;
y_pos = TranslateYCoordinate(y_pos);
SendMsg("readImage(%d,%d,%d)", x_pos, y_pos, base64_len);
// Base64 encode the data.
const char kBase64Table[64] = {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/',
};
char *base64 = new char[base64_len + 1];
memset(base64, '=', base64_len);
base64[base64_len] = '\0';
int remainder = 0;
int bits_left = 0;
int code_len = 0;
for (size_t i = 0; i < size; ++i) {
int code = (data[i] >> (bits_left + 2)) | remainder;
base64[code_len++] = kBase64Table[code & 63];
bits_left += 2;
remainder = data[i] << (6 - bits_left);
if (bits_left == 6) {
base64[code_len++] = kBase64Table[remainder & 63];
bits_left = 0;
remainder = 0;
}
}
if (bits_left > 0) {
base64[code_len++] = kBase64Table[remainder & 63];
}
SendRawMessage(base64);
delete[] base64;
lept_free(data);
}
// Escapes the ' character with a \, so it can be processed by LUA.
// Note: The caller will have to make sure it deletes the newly allocated item.
char *ScrollView::AddEscapeChars(const char *input) {
const char *nextptr = strchr(input, '\'');
const char *lastptr = input;
char *message = new char[kMaxMsgSize];
int pos = 0;
while (nextptr != nullptr) {
strncpy(message + pos, lastptr, nextptr - lastptr);
pos += nextptr - lastptr;
message[pos] = '\\';
pos += 1;
lastptr = nextptr;
nextptr = strchr(nextptr + 1, '\'');
}
strcpy(message + pos, lastptr);
return message;
}
// Inverse the Y axis if the coordinates are actually inversed.
int ScrollView::TranslateYCoordinate(int y) {
if (!y_axis_is_reversed_) {
return y;
} else {
return y_size_ - y;
}
}
char ScrollView::Wait() {
// Wait till an input or click event (all others are thrown away)
char ret = '\0';
SVEventType ev_type = SVET_ANY;
do {
std::unique_ptr<SVEvent> ev(AwaitEvent(SVET_ANY));
ev_type = ev->type;
if (ev_type == SVET_INPUT) {
ret = ev->parameter[0];
}
} while (ev_type != SVET_INPUT && ev_type != SVET_CLICK);
return ret;
}
#endif // !GRAPHICS_DISABLED
} // namespace tesseract
|
2301_81045437/tesseract
|
src/viewer/scrollview.cpp
|
C++
|
apache-2.0
| 26,985
|
///////////////////////////////////////////////////////////////////////
// File: scrollview.h
// Description: ScrollView
// Author: Joern Wanke
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// ScrollView is designed as an UI which can be run remotely. This is the
// client code for it, the server part is written in java. The client consists
// mainly of 2 parts:
// The "core" ScrollView which sets up the remote connection,
// takes care of event handling etc.
// The other part of ScrollView consists of predefined API calls through LUA,
// which can basically be used to get a zoomable canvas in which it is possible
// to draw lines, text etc.
// Technically, thanks to LUA, its even possible to bypass the here defined LUA
// API calls at all and generate a java user interface from scratch (or
// basically generate any kind of java program, possibly even dangerous ones).
#ifndef TESSERACT_VIEWER_SCROLLVIEW_H_
#define TESSERACT_VIEWER_SCROLLVIEW_H_
#include "image.h"
#include <tesseract/export.h>
#include <cstdio>
#include <memory>
#include <mutex>
namespace tesseract {
#if !defined(__GNUC__) && !defined(__attribute__)
# define __attribute__(attr) // compiler without support for __attribute__
#endif
class ScrollView;
class SVNetwork;
class SVSemaphore;
struct SVPolyLineBuffer;
enum SVEventType {
SVET_DESTROY, // Window has been destroyed by user.
SVET_EXIT, // User has destroyed the last window by clicking on the 'X'.
SVET_CLICK, // Left button pressed.
SVET_SELECTION, // Left button selection.
SVET_INPUT, // There is some input (single key or a whole string).
SVET_MOUSE, // The mouse has moved with a button pressed.
SVET_MOTION, // The mouse has moved with no button pressed.
SVET_HOVER, // The mouse has stayed still for a second.
SVET_POPUP, // A command selected through a popup menu.
SVET_MENU, // A command selected through the menubar.
SVET_ANY, // Any of the above.
SVET_COUNT // Array sizing.
};
struct SVEvent {
~SVEvent() {
delete[] parameter;
}
std::unique_ptr<SVEvent> copy() const;
SVEventType type = SVET_DESTROY; // What kind of event.
ScrollView *window = nullptr; // Window event relates to.
char *parameter = nullptr; // Any string that might have been passed as argument.
int x = 0; // Coords of click or selection.
int y = 0;
int x_size = 0; // Size of selection.
int y_size = 0;
int command_id = 0; // The ID of the possibly associated event (e.g. MENU)
int counter = 0; // Used to detect which kind of event to process next.
SVEvent() = default;
SVEvent(const SVEvent &);
SVEvent &operator=(const SVEvent &);
};
// The SVEventHandler class is used for Event handling: If you register your
// class as SVEventHandler to a ScrollView Window, the SVEventHandler will be
// called whenever an appropriate event occurs.
class TESS_API SVEventHandler {
public:
virtual ~SVEventHandler();
// Gets called by the SV Window. Does nothing on default, overwrite this
// to implement the desired behaviour
virtual void Notify(const SVEvent *sve) {
(void)sve;
}
};
// The ScrollView class provides the external API to the scrollviewer process.
// The scrollviewer process manages windows and displays images, graphics and
// text while allowing the user to zoom and scroll the windows arbitrarily.
// Each ScrollView class instance represents one window, and stuff is drawn in
// the window through method calls on the class. The constructor is used to
// create the class instance (and the window).
class TESS_API ScrollView {
public:
// Color enum for pens and brushes.
enum Color {
NONE,
BLACK,
WHITE,
RED,
YELLOW,
GREEN,
CYAN,
BLUE,
MAGENTA,
AQUAMARINE,
DARK_SLATE_BLUE,
LIGHT_BLUE,
MEDIUM_BLUE,
MIDNIGHT_BLUE,
NAVY_BLUE,
SKY_BLUE,
SLATE_BLUE,
STEEL_BLUE,
CORAL,
BROWN,
SANDY_BROWN,
GOLD,
GOLDENROD,
DARK_GREEN,
DARK_OLIVE_GREEN,
FOREST_GREEN,
LIME_GREEN,
PALE_GREEN,
YELLOW_GREEN,
LIGHT_GREY,
DARK_SLATE_GREY,
DIM_GREY,
GREY,
KHAKI,
MAROON,
ORANGE,
ORCHID,
PINK,
PLUM,
INDIAN_RED,
ORANGE_RED,
VIOLET_RED,
SALMON,
TAN,
TURQUOISE,
DARK_TURQUOISE,
VIOLET,
WHEAT,
GREEN_YELLOW // Make sure this one is last.
};
~ScrollView();
#ifndef GRAPHICS_DISABLED
// Create a window. The pixel size of the window may be 0,0, in which case
// a default size is selected based on the size of your canvas.
// The canvas may not be 0,0 in size!
ScrollView(const char *name, int x_pos, int y_pos, int x_size, int y_size, int x_canvas_size,
int y_canvas_size);
// With a flag whether the x axis is reversed.
ScrollView(const char *name, int x_pos, int y_pos, int x_size, int y_size, int x_canvas_size,
int y_canvas_size, bool y_axis_reversed);
// Connect to a server other than localhost.
ScrollView(const char *name, int x_pos, int y_pos, int x_size, int y_size, int x_canvas_size,
int y_canvas_size, bool y_axis_reversed, const char *server_name);
/*******************************************************************************
* Event handling
* To register as listener, the class has to derive from the SVEventHandler
* class, which consists of a notifyMe(SVEvent*) function that should be
* overwritten to process the event the way you want.
*******************************************************************************/
// Add an Event Listener to this ScrollView Window.
void AddEventHandler(SVEventHandler *listener);
// Block until an event of the given type is received.
std::unique_ptr<SVEvent> AwaitEvent(SVEventType type);
/*******************************************************************************
* Getters and Setters
*******************************************************************************/
// Returns the title of the window.
const char *GetName() {
return window_name_;
}
// Returns the unique ID of the window.
int GetId() {
return window_id_;
}
/*******************************************************************************
* API functions for LUA calls
* the implementations for these can be found in svapi.cc
* (keep in mind that the window is actually created through the ScrollView
* constructor, so this is not listed here)
*******************************************************************************/
// Draw an image on (x,y).
void Draw(Image image, int x_pos, int y_pos);
// Flush buffers and update display.
static void Update();
// Exit the program.
static void Exit();
// Update the contents of a specific window.
void UpdateWindow();
// Erase all content from the window, but do not destroy it.
void Clear();
// Set pen color with an enum.
void Pen(Color color);
// Set pen color to RGB (0-255).
void Pen(int red, int green, int blue);
// Set pen color to RGBA (0-255).
void Pen(int red, int green, int blue, int alpha);
// Set brush color with an enum.
void Brush(Color color);
// Set brush color to RGB (0-255).
void Brush(int red, int green, int blue);
// Set brush color to RGBA (0-255).
void Brush(int red, int green, int blue, int alpha);
// Set attributes for future text, like font name (e.g.
// "Times New Roman"), font size etc..
// Note: The underlined flag is currently not supported
void TextAttributes(const char *font, int pixel_size, bool bold, bool italic, bool underlined);
// Draw line from (x1,y1) to (x2,y2) with the current pencolor.
void Line(int x1, int y1, int x2, int y2);
// Set the stroke width of the pen.
void Stroke(float width);
// Draw a rectangle given upper left corner and lower right corner.
// The current pencolor is used as outline, the brushcolor to fill the shape.
void Rectangle(int x1, int y1, int x2, int y2);
// Draw an ellipse centered on (x,y).
// The current pencolor is used as outline, the brushcolor to fill the shape.
void Ellipse(int x, int y, int width, int height);
// Draw text with the current pencolor
void Text(int x, int y, const char *mystring);
// Draw an image from a local filename. This should be faster than
// createImage. WARNING: This only works on a local machine. This also only
// works image types supported by java (like bmp,jpeg,gif,png) since the image
// is opened by the server.
void Draw(const char *image, int x_pos, int y_pos);
// Set the current position to draw from (x,y). In conjunction with...
void SetCursor(int x, int y);
// ...this function, which draws a line from the current to (x,y) and then
// sets the new position to the new (x,y), this can be used to easily draw
// polygons using vertices
void DrawTo(int x, int y);
// Set the SVWindow visible/invisible.
void SetVisible(bool visible);
// Set the SVWindow always on top or not always on top.
void AlwaysOnTop(bool b);
// Shows a modal dialog with "msg" as question and returns 'y' or 'n'.
int ShowYesNoDialog(const char *msg);
// Shows a modal dialog with "msg" as question and returns a char* string.
// Constraint: As return, only words (e.g. no whitespaces etc.) are allowed.
char *ShowInputDialog(const char *msg);
// Adds a messagebox to the SVWindow. This way, it can show the messages...
void AddMessageBox();
// ...which can be added by this command.
// This is intended as an "debug" output window.
void AddMessage(const char *message);
void AddMessageF(const char *format, ...) __attribute__((format(printf, 2, 3)));
// Zoom the window to the rectangle given upper left corner and
// lower right corner.
void ZoomToRectangle(int x1, int y1, int x2, int y2);
// Custom messages (manipulating java code directly) can be send through this.
// Send a message to the server and attach the Id of the corresponding window.
// Note: This should only be called if you are know what you are doing, since
// you are fiddling with the Java objects on the server directly. Calling
// this just for fun will likely break your application!
// It is public so you can actually take use of the LUA functionalities, but
// be careful!
void SendMsg(const char* msg, ...) __attribute__((format(printf, 2, 3)));
// Custom messages (manipulating java code directly) can be send through this.
// Send a message to the server without adding the
// window id. Used for global events like Exit().
// Note: This should only be called if you are know what you are doing, since
// you are fiddling with the Java objects on the server directly. Calling
// this just for fun will likely break your application!
// It is public so you can actually take use of the LUA functionalities, but
// be careful!
static void SendRawMessage(const char *msg);
/*******************************************************************************
* Add new menu entries to parent. If parent is "", the entry gets added to
*the main menubar (toplevel).
*******************************************************************************/
// This adds a new submenu to the menubar.
void MenuItem(const char *parent, const char *name);
// This adds a new (normal) menu entry with an associated eventID, which
// should be unique among menubar eventIDs.
void MenuItem(const char *parent, const char *name, int cmdEvent);
// This adds a new checkbox entry, which might initially be flagged.
void MenuItem(const char *parent, const char *name, int cmdEvent, bool flagged);
// This adds a new popup submenu to the popup menu. If parent is "", the entry
// gets added at "toplevel" popupmenu.
void PopupItem(const char *parent, const char *name);
// This adds a new popup entry with the associated eventID, which should be
// unique among popup eventIDs.
// If value and desc are given, on a click the server will ask you to modify
// the value and return the new value.
void PopupItem(const char *parent, const char *name, int cmdEvent, const char *value,
const char *desc);
// Returns the correct Y coordinate for a window, depending on whether it
// might have to be flipped (by ySize).
int TranslateYCoordinate(int y);
char Wait();
private:
// Transfers a binary Image.
void TransferBinaryImage(Image image);
// Transfers a gray scale Image.
void TransferGrayImage(Image image);
// Transfers a 32-Bit Image.
void Transfer32bppImage(Image image);
// Sets up ScrollView, depending on the variables from the constructor.
void Initialize(const char *name, int x_pos, int y_pos, int x_size, int y_size, int x_canvas_size,
int y_canvas_size, bool y_axis_reversed, const char *server_name);
// Send the current buffered polygon (if any) and clear it.
void SendPolygon();
// Start the message receiving thread.
static void MessageReceiver();
// Place an event into the event_table (synchronized).
void SetEvent(const SVEvent *svevent);
// Wake up the semaphore.
void Signal();
// Returns the unique, shared network stream.
static SVNetwork *GetStream() {
return stream_;
}
// Starts a new event handler.
// Called asynchronously whenever a new window is created.
void StartEventHandler();
// Escapes the ' character with a \, so it can be processed by LUA.
char *AddEscapeChars(const char *input);
// The event handler for this window.
SVEventHandler *event_handler_;
// The name of the window.
const char *window_name_;
// The id of the window.
int window_id_;
// The points of the currently under-construction polyline.
SVPolyLineBuffer *points_;
// Whether the axis is reversed.
bool y_axis_is_reversed_;
// Set to true only after the event handler has terminated.
bool event_handler_ended_;
// If the y axis is reversed, flip all y values by ySize.
int y_size_;
// # of created windows (used to assign an id to each ScrollView* for svmap).
static int nr_created_windows_;
// Serial number of sent images to ensure that the viewer knows they
// are distinct.
static int image_index_;
// The stream through which the c++ client is connected to the server.
static SVNetwork *stream_;
// Table of all the currently queued events.
std::unique_ptr<SVEvent> event_table_[SVET_COUNT];
// Mutex to access the event_table_ in a synchronized fashion.
std::mutex mutex_;
// Semaphore to the thread belonging to this window.
SVSemaphore *semaphore_;
#endif // !GRAPHICS_DISABLED
};
} // namespace tesseract
#endif // TESSERACT_VIEWER_SCROLLVIEW_H_
|
2301_81045437/tesseract
|
src/viewer/scrollview.h
|
C++
|
apache-2.0
| 15,338
|
///////////////////////////////////////////////////////////////////////
// File: svmnode.cpp
// description_: ScrollView Menu Node
// Author: Joern Wanke
// Created: Thu Nov 29 2007
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// A SVMenuNode is an entity which contains the mapping from a menu entry on
// the server side to the corresponding associated commands on the client.
// It is designed to be a tree structure with a root node, which can then be
// used to generate the appropriate messages to the server to display the
// menu structure there.
// A SVMenuNode can both be used in the context_ of popup menus as well as
// menu bars.
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#ifndef GRAPHICS_DISABLED
#include "svmnode.h"
#include <cstring>
#include <iostream>
#include "scrollview.h"
namespace tesseract {
// Create the empty root menu node. with just a caption. All other nodes should
// be added to this or one of the submenus.
SVMenuNode::SVMenuNode() {
cmd_event_ = -1;
child_ = nullptr;
next_ = nullptr;
parent_ = nullptr;
toggle_value_ = false;
is_check_box_entry_ = false;
}
SVMenuNode::~SVMenuNode() = default;
// Create a new sub menu node with just a caption. This is used to create
// nodes which act as parent nodes to other nodes (e.g. submenus).
SVMenuNode *SVMenuNode::AddChild(const char *txt) {
auto *s = new SVMenuNode(-1, txt, false, false);
this->AddChild(s);
return s;
}
// Create a "normal" menu node which is associated with a command event.
void SVMenuNode::AddChild(const char *txt, int command_event) {
this->AddChild(new SVMenuNode(command_event, txt, false, false));
}
// Create a menu node with an associated value (which might be changed
// through the gui).
void SVMenuNode::AddChild(const char *txt, int command_event, const char *val) {
this->AddChild(new SVMenuNode(command_event, txt, false, false, val));
}
// Create a menu node with an associated value and description_.
void SVMenuNode::AddChild(const char *txt, int command_event, const char *val, const char *desc) {
this->AddChild(new SVMenuNode(command_event, txt, false, false, val, desc));
}
// Create a flag menu node.
void SVMenuNode::AddChild(const char *txt, int command_event, int tv) {
this->AddChild(new SVMenuNode(command_event, txt, tv, true));
}
// Convenience function called from the different constructors to initialize
// the different values of the menu node.
SVMenuNode::SVMenuNode(int command_event, const char *txt, int tv, bool check_box_entry,
const char *val, const char *desc)
: text_(txt), value_(val), description_(desc) {
cmd_event_ = command_event;
child_ = nullptr;
next_ = nullptr;
parent_ = nullptr;
toggle_value_ = tv != 0;
is_check_box_entry_ = check_box_entry;
}
// Add a child node to this menu node.
void SVMenuNode::AddChild(SVMenuNode *svmn) {
svmn->parent_ = this;
// No children yet.
if (child_ == nullptr) {
child_ = svmn;
} else {
SVMenuNode *cur = child_;
while (cur->next_ != nullptr) {
cur = cur->next_;
}
cur->next_ = svmn;
}
}
// Build a menu structure for the server and send the necessary messages.
// Should be called on the root node. If menu_bar is true, a menu_bar menu
// is built (e.g. on top of the window), if it is false a popup menu is
// built which gets shown by right clicking on the window.
// Deletes itself afterwards.
void SVMenuNode::BuildMenu(ScrollView *sv, bool menu_bar) {
if ((parent_ != nullptr) && (menu_bar)) {
if (is_check_box_entry_) {
sv->MenuItem(parent_->text_.c_str(), text_.c_str(), cmd_event_, toggle_value_);
} else {
sv->MenuItem(parent_->text_.c_str(), text_.c_str(), cmd_event_);
}
} else if ((parent_ != nullptr) && (!menu_bar)) {
if (description_.length() > 0) {
sv->PopupItem(parent_->text_.c_str(), text_.c_str(), cmd_event_, value_.c_str(),
description_.c_str());
} else {
sv->PopupItem(parent_->text_.c_str(), text_.c_str());
}
}
if (child_ != nullptr) {
child_->BuildMenu(sv, menu_bar);
delete child_;
}
if (next_ != nullptr) {
next_->BuildMenu(sv, menu_bar);
delete next_;
}
}
} // namespace tesseract
#endif // !GRAPHICS_DISABLED
|
2301_81045437/tesseract
|
src/viewer/svmnode.cpp
|
C++
|
apache-2.0
| 4,969
|
///////////////////////////////////////////////////////////////////////
// File: svmnode.h
// description_: ScrollView Menu Node
// Author: Joern Wanke
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// A SVMenuNode is an entity which contains the mapping from a menu entry on
// the server side to the corresponding associated commands on the client.
// It is designed to be a tree structure with a root node, which can then be
// used to generate the appropriate messages to the server to display the
// menu structure there.
// A SVMenuNode can both be used in the context_ of popup menus as well as
// menu bars.
#ifndef TESSERACT_VIEWER_SVMNODE_H_
#define TESSERACT_VIEWER_SVMNODE_H_
#ifndef GRAPHICS_DISABLED
#include <tesseract/export.h>
#include <string>
namespace tesseract {
class ScrollView;
class TESS_API SVMenuNode {
public:
// Creating the (empty) root menu node.
SVMenuNode();
// Destructor for every node.
~SVMenuNode();
// Create a new sub menu node with just a caption. This is used to create
// nodes which act as parent nodes to other nodes (e.g. submenus).
SVMenuNode *AddChild(const char *txt);
// Create a "normal" menu node which is associated with a command event.
void AddChild(const char *txt, int command_event);
// Create a flag menu node.
void AddChild(const char *txt, int command_event, int tv);
// Create a menu node with an associated value (which might be changed
// through the gui).
void AddChild(const char *txt, int command_event, const char *val);
// Create a menu node with an associated value and description_.
void AddChild(const char *txt, int command_event, const char *val, const char *desc);
// Build a menu structure for the server and send the necessary messages.
// Should be called on the root node. If menu_bar is true, a menu_bar menu
// is built (e.g. on top of the window), if it is false a popup menu is
// built which gets shown by right clicking on the window.
void BuildMenu(ScrollView *sv, bool menu_bar = true);
private:
// Constructor holding the actual node data.
SVMenuNode(int command_event, const char *txt, int tv, bool check_box_entry, const char *val = "",
const char *desc = "");
// Adds a new menu node to the current node.
void AddChild(SVMenuNode *svmn);
// The parent node of this node.
SVMenuNode *parent_;
// The first child of this node.
SVMenuNode *child_;
// The next "sibling" of this node (e.g. same parent).
SVMenuNode *next_;
// Whether this menu node actually is a flag.
bool is_check_box_entry_;
// The value of the flag (if this menu node is a flag).
bool toggle_value_;
// The command event associated with a specific menu node. Should be unique.
int cmd_event_;
// The caption associated with a specific menu node.
std::string text_;
// The value of the menu node. (optional)
std::string value_;
// A description_ of the value. (optional)
std::string description_;
};
} // namespace tesseract
#endif // !GRAPHICS_DISABLED
#endif // TESSERACT_VIEWER_SVMNODE_H_
|
2301_81045437/tesseract
|
src/viewer/svmnode.h
|
C++
|
apache-2.0
| 3,697
|
///////////////////////////////////////////////////////////////////////
// File: svutil.cpp
// Description: ScrollView Utilities
// Author: Joern Wanke
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// SVUtil contains the SVSync and SVNetwork classes, which are used for
// thread/process creation & synchronization and network connection.
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "svutil.h"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <memory>
#include <string>
#include <thread> // for std::this_thread
#include <vector>
#ifdef _WIN32
# pragma comment(lib, "Ws2_32.lib")
# include <winsock2.h> // for fd_set, send, ..
# include <ws2tcpip.h> // for addrinfo
#else
# include <arpa/inet.h>
# include <netdb.h>
# include <netinet/in.h>
# include <semaphore.h>
# include <sys/select.h>
# include <sys/socket.h>
# include <csignal>
# ifdef __linux__
# include <sys/prctl.h>
# endif
# include <unistd.h>
#endif
#if defined(_WIN32) && !defined(__GNUC__)
# define strtok_r(str, delim, saveptr) strtok_s(str, delim, saveptr)
#endif /* _WIN32 && !__GNUC__ */
#ifndef GRAPHICS_DISABLED
namespace tesseract {
const int kMaxMsgSize = 4096;
// Starts a new process.
void SVSync::StartProcess(const char *executable, const char *args) {
std::string proc;
proc.append(executable);
proc.append(" ");
proc.append(args);
std::cout << "Starting " << proc << std::endl;
# ifdef _WIN32
STARTUPINFO start_info;
PROCESS_INFORMATION proc_info;
GetStartupInfo(&start_info);
if (!CreateProcess(nullptr, const_cast<char *>(proc.c_str()), nullptr,
nullptr, FALSE, CREATE_NO_WINDOW | DETACHED_PROCESS,
nullptr, nullptr, &start_info, &proc_info))
return;
# else
int pid = fork();
if (pid != 0) { // The father process returns
} else {
# ifdef __linux__
// Make sure the java process terminates on exit, since its
// broken socket detection seems to be useless.
prctl(PR_SET_PDEATHSIG, 2, 0, 0, 0);
# endif
std::string mutable_args(args);
int argc = 1;
for (auto ch : mutable_args) {
if (ch == ' ') {
++argc;
}
}
std::unique_ptr<char *[]> argv(new char *[argc + 2]);
std::string argv0(executable);
argv[0] = &argv0[0];
argv[1] = &mutable_args[0];
argc = 2;
bool inquote = false;
for (int i = 0; mutable_args[i]; ++i) {
if (!inquote && mutable_args[i] == ' ') {
mutable_args[i] = '\0';
argv[argc++] = &mutable_args[i + 1];
} else if (mutable_args[i] == '"') {
inquote = !inquote;
mutable_args[i] = ' ';
}
}
argv[argc] = nullptr;
execvp(executable, argv.get());
}
# endif
}
SVSemaphore::SVSemaphore() {
# ifdef _WIN32
semaphore_ = CreateSemaphore(0, 0, 10, 0);
# elif defined(__APPLE__)
auto name = std::to_string(random());
sem_unlink(name.c_str());
semaphore_ = sem_open(name.c_str(), O_CREAT, S_IWUSR, 0);
if (semaphore_ == SEM_FAILED) {
perror("sem_open");
}
# else
sem_init(&semaphore_, 0, 0);
# endif
}
SVSemaphore::~SVSemaphore() {
# ifdef _WIN32
CloseHandle(semaphore_);
# elif defined(__APPLE__)
sem_close(semaphore_);
# else
sem_close(&semaphore_);
# endif
}
void SVSemaphore::Signal() {
# ifdef _WIN32
ReleaseSemaphore(semaphore_, 1, nullptr);
# elif defined(__APPLE__)
sem_post(semaphore_);
# else
sem_post(&semaphore_);
# endif
}
void SVSemaphore::Wait() {
# ifdef _WIN32
WaitForSingleObject(semaphore_, INFINITE);
# elif defined(__APPLE__)
sem_wait(semaphore_);
# else
sem_wait(&semaphore_);
# endif
}
// Place a message in the message buffer (and flush it).
void SVNetwork::Send(const char *msg) {
std::lock_guard<std::mutex> guard(mutex_send_);
msg_buffer_out_.append(msg);
}
// Send the whole buffer.
void SVNetwork::Flush() {
std::lock_guard<std::mutex> guard(mutex_send_);
while (!msg_buffer_out_.empty()) {
int i = send(stream_, msg_buffer_out_.c_str(), msg_buffer_out_.length(), 0);
msg_buffer_out_.erase(0, i);
}
}
// Receive a message from the server.
// This will always return one line of char* (denoted by \n).
char *SVNetwork::Receive() {
char *result = nullptr;
if (buffer_ptr_ != nullptr) {
result = strtok_r(nullptr, "\n", &buffer_ptr_);
}
// This means there is something left in the buffer and we return it.
if (result != nullptr) {
return result;
// Otherwise, we read from the stream_.
} else {
buffer_ptr_ = nullptr;
// The timeout length is not really important since we are looping anyway
// until a new message is delivered.
struct timeval tv;
tv.tv_sec = 10;
tv.tv_usec = 0;
// Set the flags to return when the stream_ is ready to be read.
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(stream_, &readfds);
int i = select(stream_ + 1, &readfds, nullptr, nullptr, &tv);
// The stream_ died.
if (i == 0) {
return nullptr;
}
// Read the message buffer.
i = recv(stream_, msg_buffer_in_, kMaxMsgSize, 0);
// Server quit (0) or error (-1).
if (i <= 0) {
return nullptr;
}
msg_buffer_in_[i] = '\0';
// Setup a new string tokenizer.
return strtok_r(msg_buffer_in_, "\n", &buffer_ptr_);
}
}
// Close the connection to the server.
void SVNetwork::Close() {
# ifdef _WIN32
closesocket(stream_);
# else
close(stream_);
# endif
// Mark stream_ as invalid.
stream_ = -1;
}
// The program to invoke to start ScrollView
static const char *ScrollViewProg() {
# ifdef _WIN32
const char *prog = "java -Xms512m -Xmx1024m";
# else
const char *prog = "sh";
# endif
return prog;
}
// The arguments to the program to invoke to start ScrollView
static std::string ScrollViewCommand(const std::string &scrollview_path) {
// Quote our paths on Windows to deal with spaces
# ifdef _WIN32
const char cmd_template[] =
"-Djava.library.path=\"%s\" -jar \"%s/ScrollView.jar\"";
# else
const char cmd_template[] =
"-c \"trap 'kill %%1' 0 1 2 ; java "
"-Xms1024m -Xmx2048m -jar %s/ScrollView.jar"
" & wait\"";
# endif
size_t cmdlen = sizeof(cmd_template) + 2 * scrollview_path.size() + 1;
std::vector<char> cmd(cmdlen);
const char *sv_path = scrollview_path.c_str();
# ifdef _WIN32
snprintf(&cmd[0], cmdlen, cmd_template, sv_path, sv_path);
# else
snprintf(&cmd[0], cmdlen, cmd_template, sv_path);
# endif
std::string command(&cmd[0]);
return command;
}
// Set up a connection to a ScrollView on hostname:port.
SVNetwork::SVNetwork(const char *hostname, int port) {
msg_buffer_in_ = new char[kMaxMsgSize + 1];
msg_buffer_in_[0] = '\0';
buffer_ptr_ = nullptr;
struct addrinfo *addr_info = nullptr;
struct addrinfo hints = {0, PF_INET, SOCK_STREAM};
auto port_string = std::to_string(port);
# ifdef _WIN32
// Initialize Winsock
WSADATA wsaData;
int iResult = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (iResult != 0) {
std::cerr << "WSAStartup failed: " << iResult << std::endl;
}
# endif // _WIN32
if (getaddrinfo(hostname, port_string.c_str(), &hints, &addr_info) != 0) {
std::cerr << "Error resolving name for ScrollView host "
<< std::string(hostname) << ":" << port << std::endl;
# ifdef _WIN32
WSACleanup();
# endif // _WIN32
}
if (addr_info == nullptr) {
// Mark stream_ as invalid.
stream_ = -1;
} else {
stream_ = socket(addr_info->ai_family, addr_info->ai_socktype,
addr_info->ai_protocol);
}
if (stream_ < 0) {
std::cerr << "Failed to open socket" << std::endl;
} else if (connect(stream_, addr_info->ai_addr, addr_info->ai_addrlen) < 0) {
// If server is not there, we will start a new server as local child
// process.
const char *scrollview_path = getenv("SCROLLVIEW_PATH");
if (scrollview_path == nullptr) {
# ifdef SCROLLVIEW_PATH
# define _STR(a) # a
# define _XSTR(a) _STR(a)
scrollview_path = _XSTR(SCROLLVIEW_PATH);
# undef _XSTR
# undef _STR
# else
scrollview_path = ".";
# endif
}
const char *prog = ScrollViewProg();
std::string command = ScrollViewCommand(scrollview_path);
SVSync::StartProcess(prog, command.c_str());
// Wait for server to show up.
// Note: There is no exception handling in case the server never turns up.
Close();
for (;;) {
stream_ = socket(addr_info->ai_family, addr_info->ai_socktype,
addr_info->ai_protocol);
if (stream_ >= 0) {
if (connect(stream_, addr_info->ai_addr, addr_info->ai_addrlen) == 0) {
break;
}
Close();
std::cout << "ScrollView: Waiting for server...\n";
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
}
# ifdef _WIN32
// WSACleanup(); // This cause ScrollView windows is not displayed
# endif // _WIN32
freeaddrinfo(addr_info);
}
SVNetwork::~SVNetwork() {
Close();
delete[] msg_buffer_in_;
}
} // namespace tesseract
#endif // !GRAPHICS_DISABLED
|
2301_81045437/tesseract
|
src/viewer/svutil.cpp
|
C++
|
apache-2.0
| 9,780
|
///////////////////////////////////////////////////////////////////////
// File: svutil.h
// Description: ScrollView Utilities
// Author: Joern Wanke
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
// SVUtil contains the SVSync, SVSemaphore and SVNetwork
// classes, which are used for thread/process creation & synchronization
// and network connection.
#ifndef TESSERACT_VIEWER_SVUTIL_H_
#define TESSERACT_VIEWER_SVUTIL_H_
#ifdef _WIN32
# include "host.h" // also includes windows.h
#else
# include <semaphore.h>
#endif
#include <mutex>
#include <string>
namespace tesseract {
/// The SVSync class provides functionality for Thread & Process Creation
class SVSync {
public:
/// Starts a new process.
static void StartProcess(const char *executable, const char *args);
};
/// A semaphore class which encapsulates the main signaling
/// and wait abilities of semaphores for windows and unix.
class SVSemaphore {
public:
/// Sets up a semaphore.
SVSemaphore();
/// Cleans up the mutex
~SVSemaphore();
/// Signal a semaphore.
void Signal();
/// Wait on a semaphore.
void Wait();
private:
#ifdef _WIN32
HANDLE semaphore_;
#elif defined(__APPLE__)
sem_t *semaphore_;
#else
sem_t semaphore_;
#endif
};
/// The SVNetwork class takes care of the remote connection for ScrollView
/// This means setting up and maintaining a remote connection, sending and
/// receiving messages and closing the connection.
/// It is designed to work on both Linux and Windows.
class SVNetwork {
public:
/// Set up a connection to hostname on port.
SVNetwork(const char *hostname, int port);
/// Destructor.
~SVNetwork();
/// Put a message in the messagebuffer to the server and try to send it.
void Send(const char *msg);
/// Receive a message from the server.
/// This will always return one line of char* (denoted by \\n).
char *Receive();
/// Close the connection to the server.
void Close();
/// Flush the buffer.
void Flush();
private:
/// The mutex for access to Send() and Flush().
std::mutex mutex_send_;
/// The actual stream_ to the server.
int stream_;
/// Stores the last received message-chunk from the server.
char *msg_buffer_in_;
/// Stores the messages which are supposed to go out.
std::string msg_buffer_out_;
/// Where we are at in our msg_buffer_in_
char *buffer_ptr_; // strtok_r, strtok_s
};
} // namespace tesseract
#endif // TESSERACT_VIEWER_SVUTIL_H_
|
2301_81045437/tesseract
|
src/viewer/svutil.h
|
C++
|
apache-2.0
| 3,069
|
///////////////////////////////////////////////////////////////////////
// File: associate.cpp
// Description: Functions for scoring segmentation paths according to
// their character widths, gap widths and seam cuts.
// Author: Daria Antonova
// Created: Mon Mar 8 11:26:43 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <cmath>
#include <cstdio>
#include "associate.h"
#include "normalis.h"
#include "pageres.h"
namespace tesseract {
const float AssociateUtils::kMaxFixedPitchCharAspectRatio = 2.0f;
const float AssociateUtils::kMinGap = 0.03f;
void AssociateUtils::ComputeStats(int col, int row, const AssociateStats *parent_stats,
int parent_path_length, bool fixed_pitch, float max_char_wh_ratio,
WERD_RES *word_res, bool debug, AssociateStats *stats) {
stats->Clear();
ASSERT_HOST(word_res != nullptr);
if (word_res->blob_widths.empty()) {
return;
}
if (debug) {
tprintf("AssociateUtils::ComputeStats() for col=%d, row=%d%s\n", col, row,
fixed_pitch ? " (fixed pitch)" : "");
}
float normalizing_height = kBlnXHeight;
ROW *blob_row = word_res->blob_row;
// TODO(rays/daria) Can unicharset.script_has_xheight be useful here?
if (fixed_pitch && blob_row != nullptr) {
// For fixed pitch language like CJK, we use the full text height
// as the normalizing factor so we are not dependent on xheight
// calculation.
if (blob_row->body_size() > 0.0f) {
normalizing_height = word_res->denorm.y_scale() * blob_row->body_size();
} else {
normalizing_height =
word_res->denorm.y_scale() * (blob_row->x_height() + blob_row->ascenders());
}
if (debug) {
tprintf("normalizing height = %g (scale %g xheight %g ascenders %g)\n", normalizing_height,
word_res->denorm.y_scale(), blob_row->x_height(), blob_row->ascenders());
}
}
float wh_ratio = word_res->GetBlobsWidth(col, row) / normalizing_height;
if (wh_ratio > max_char_wh_ratio) {
stats->bad_shape = true;
}
// Compute the gap sum for this shape. If there are only negative or only
// positive gaps, record their sum in stats->gap_sum. However, if there is
// a mixture, record only the sum of the positive gaps.
// TODO(antonova): explain fragment.
int negative_gap_sum = 0;
for (int c = col; c < row; ++c) {
int gap = word_res->GetBlobsGap(c);
(gap > 0) ? stats->gap_sum += gap : negative_gap_sum += gap;
}
if (stats->gap_sum == 0) {
stats->gap_sum = negative_gap_sum;
}
if (debug) {
tprintf("wh_ratio=%g (max_char_wh_ratio=%g) gap_sum=%d %s\n", wh_ratio, max_char_wh_ratio,
stats->gap_sum, stats->bad_shape ? "bad_shape" : "");
}
// Compute shape_cost (for fixed pitch mode).
if (fixed_pitch) {
bool end_row = (row == (word_res->ratings->dimension() - 1));
// Ensure that the blob has gaps on the left and the right sides
// (except for beginning and ending punctuation) and that there is
// no cutting through ink at the blob boundaries.
if (col > 0) {
float left_gap = word_res->GetBlobsGap(col - 1) / normalizing_height;
SEAM *left_seam = word_res->seam_array[col - 1];
if ((!end_row && left_gap < kMinGap) || left_seam->priority() > 0.0f) {
stats->bad_shape = true;
}
if (debug) {
tprintf("left_gap %g, left_seam %g %s\n", left_gap, left_seam->priority(),
stats->bad_shape ? "bad_shape" : "");
}
}
float right_gap = 0.0f;
if (!end_row) {
right_gap = word_res->GetBlobsGap(row) / normalizing_height;
SEAM *right_seam = word_res->seam_array[row];
if (right_gap < kMinGap || right_seam->priority() > 0.0f) {
stats->bad_shape = true;
if (right_gap < kMinGap) {
stats->bad_fixed_pitch_right_gap = true;
}
}
if (debug) {
tprintf("right_gap %g right_seam %g %s\n", right_gap, right_seam->priority(),
stats->bad_shape ? "bad_shape" : "");
}
}
// Impose additional segmentation penalties if blob widths or gaps
// distribution don't fit a fixed-pitch model.
// Since we only know the widths and gaps of the path explored so far,
// the means and variances are computed for the path so far (not
// considering characters to the right of the last character on the path).
stats->full_wh_ratio = wh_ratio + right_gap;
if (parent_stats != nullptr) {
stats->full_wh_ratio_total = (parent_stats->full_wh_ratio_total + stats->full_wh_ratio);
float mean = stats->full_wh_ratio_total / static_cast<float>(parent_path_length + 1);
stats->full_wh_ratio_var =
parent_stats->full_wh_ratio_var + pow(mean - stats->full_wh_ratio, 2);
} else {
stats->full_wh_ratio_total = stats->full_wh_ratio;
}
if (debug) {
tprintf("full_wh_ratio %g full_wh_ratio_total %g full_wh_ratio_var %g\n",
stats->full_wh_ratio, stats->full_wh_ratio_total, stats->full_wh_ratio_var);
}
stats->shape_cost = FixedPitchWidthCost(wh_ratio, right_gap, end_row, max_char_wh_ratio);
// For some reason Tesseract prefers to treat the whole CJ words
// as one blob when the initial segmentation is particularly bad.
// This hack is to avoid favoring such states.
if (col == 0 && end_row && wh_ratio > max_char_wh_ratio) {
stats->shape_cost += 10;
}
stats->shape_cost += stats->full_wh_ratio_var;
if (debug) {
tprintf("shape_cost %g\n", stats->shape_cost);
}
}
}
float AssociateUtils::FixedPitchWidthCost(float norm_width, float right_gap, bool end_pos,
float max_char_wh_ratio) {
float cost = 0.0f;
if (norm_width > max_char_wh_ratio) {
cost += norm_width;
}
if (norm_width > kMaxFixedPitchCharAspectRatio) {
cost += norm_width * norm_width; // extra penalty for merging CJK chars
}
// Penalize skinny blobs, except for punctuation in the last position.
if (norm_width + right_gap < 0.5f && !end_pos) {
cost += 1.0f - (norm_width + right_gap);
}
return cost;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/associate.cpp
|
C++
|
apache-2.0
| 6,818
|
///////////////////////////////////////////////////////////////////////
// File: associate.h
// Description: Structs, classes, typedefs useful for the segmentation
// search. Functions for scoring segmentation paths according
// to their character widths, gap widths and seam cuts.
// Author: Daria Antonova
// Created: Mon Mar 8 11:26:43 PDT 2010
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef ASSOCIATE_H
#define ASSOCIATE_H
#include "blobs.h"
#include "elst.h"
#include "ratngs.h"
#include "seam.h"
#include "split.h"
namespace tesseract {
class WERD_RES;
// Statistics about character widths, gaps and seams.
struct AssociateStats {
AssociateStats() {
Clear();
}
void Clear() {
shape_cost = 0.0f;
bad_shape = false;
full_wh_ratio = 0.0f;
full_wh_ratio_total = 0.0f;
full_wh_ratio_var = 0.0f;
bad_fixed_pitch_right_gap = false;
bad_fixed_pitch_wh_ratio = false;
gap_sum = 0;
}
void Print() {
tprintf("AssociateStats: s(%g %d)\n", shape_cost, bad_shape);
}
float shape_cost; // cost of blob shape
bool bad_shape; // true if the shape of the blob is unacceptable
float full_wh_ratio; // width-to-hight ratio + gap on the right
float full_wh_ratio_total; // sum of width-to-hight ratios
// on the path terminating at this blob
float full_wh_ratio_var; // variance of full_wh_ratios on the path
bool bad_fixed_pitch_right_gap; // true if there is no gap before
// the blob on the right
bool bad_fixed_pitch_wh_ratio; // true if the blobs has width-to-hight
// ratio > kMaxFixedPitchCharAspectRatio
int gap_sum; // sum of gaps within the blob
};
// Utility functions for scoring segmentation paths according to their
// character widths, gap widths, seam characteristics.
class AssociateUtils {
public:
static const float kMaxFixedPitchCharAspectRatio;
static const float kMinGap;
// Returns outline length of the given blob is computed as:
// rating_cert_scale * rating / certainty
// Since from Wordrec::SegSearch() in segsearch.cpp
// rating_cert_scale = -1.0 * getDict().certainty_scale / rating_scale
// And from Classify::ConvertMatchesToChoices() in adaptmatch.cpp
// Rating = Certainty = next.rating
// Rating *= rating_scale * Results->BlobLength
// Certainty *= -(getDict().certainty_scale)
static inline float ComputeOutlineLength(float rating_cert_scale, const BLOB_CHOICE &b) {
return rating_cert_scale * b.rating() / b.certainty();
}
static inline float ComputeRating(float rating_cert_scale, float cert, int width) {
return static_cast<float>(width) * cert / rating_cert_scale;
}
// Computes character widths, gaps and seams stats given the
// AssociateStats of the path so far, col, row of the blob that
// is being added to the path, and WERD_RES containing information
// about character widths, gaps and seams.
// Fills associate_cost with the combined shape, gap and seam cost
// of adding a unichar from (col, row) to the path (note that since
// this function could be used to compute the prioritization for
// pain points, (col, row) entry might not be classified yet; thus
// information in the (col, row) entry of the ratings matrix is not used).
//
// Note: the function assumes that word_res, stats and
// associate_cost pointers are not nullptr.
static void ComputeStats(int col, int row, const AssociateStats *parent_stats,
int parent_path_length, bool fixed_pitch, float max_char_wh_ratio,
WERD_RES *word_res, bool debug, AssociateStats *stats);
// Returns the width cost for fixed-pitch text.
static float FixedPitchWidthCost(float norm_width, float right_gap, bool end_pos,
float max_char_wh_ratio);
// Returns the gap cost for fixed-pitch text (penalizes vertically
// overlapping components).
static inline float FixedPitchGapCost(float norm_gap, bool end_pos) {
return (norm_gap < 0.05 && !end_pos) ? 5.0f : 0.0f;
}
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/wordrec/associate.h
|
C++
|
apache-2.0
| 4,870
|
/******************************************************************************
*
* File: chop.cpp (Formerly chop.c)
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#define _USE_MATH_DEFINES // for M_PI
#include "chop.h"
#include <cmath> // for M_PI
#include "outlines.h"
#include "plotedges.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
namespace tesseract {
// Show if the line is going in the positive or negative X direction.
static int direction(const EDGEPT *point) {
//* direction to return
int dir = 0;
//* prev point
const EDGEPT *prev = point->prev;
//* next point
const EDGEPT *next = point->next;
if (((prev->pos.x <= point->pos.x) && (point->pos.x < next->pos.x)) ||
((prev->pos.x < point->pos.x) && (point->pos.x <= next->pos.x))) {
dir = 1;
}
if (((prev->pos.x >= point->pos.x) && (point->pos.x > next->pos.x)) ||
((prev->pos.x > point->pos.x) && (point->pos.x >= next->pos.x))) {
dir = -1;
}
return dir;
}
/**
* @name point_priority
*
* Assign a priority to and edge point that might be used as part of a
* split. The argument should be of type EDGEPT.
*/
PRIORITY Wordrec::point_priority(EDGEPT *point) {
return static_cast<PRIORITY>(angle_change(point->prev, point, point->next));
}
/**
* @name add_point_to_list
*
* Add an edge point to a POINT_GROUP containing a list of other points.
*/
void Wordrec::add_point_to_list(PointHeap *point_heap, EDGEPT *point) {
if (point_heap->size() < MAX_NUM_POINTS - 2) {
PointPair pair(point_priority(point), point);
point_heap->Push(&pair);
}
#ifndef GRAPHICS_DISABLED
if (chop_debug > 2) {
mark_outline(point);
}
#endif
}
// Returns true if the edgept supplied as input is an inside angle. This
// is determined by the angular change of the vectors from point to point.
bool Wordrec::is_inside_angle(EDGEPT *pt) {
return angle_change(pt->prev, pt, pt->next) < chop_inside_angle;
}
/**
* @name angle_change
*
* Return the change in angle (degrees) of the line segments between
* points one and two, and two and three.
*/
int Wordrec::angle_change(EDGEPT *point1, EDGEPT *point2, EDGEPT *point3) {
VECTOR vector1;
VECTOR vector2;
int angle;
/* Compute angle */
vector1.x = point2->pos.x - point1->pos.x;
vector1.y = point2->pos.y - point1->pos.y;
vector2.x = point3->pos.x - point2->pos.x;
vector2.y = point3->pos.y - point2->pos.y;
/* Use cross product */
float length = std::sqrt(static_cast<float>(vector1.length2()) * vector2.length2());
if (static_cast<int>(length) == 0) {
return (0);
}
auto f = vector1.cross(vector2) / length;
// Avoid FP exception in std::asin caused by illegal values of f
// (caused by rounding errors).
if (f <= -1.0f) {
angle = -90;
} else if (f >= 1.0f) {
angle = 90;
} else {
angle = static_cast<int>(floor(std::asin(f) / M_PI * 180.0 + 0.5));
// Use dot product.
if (vector1.dot(vector2) < 0) {
angle = 180 - angle;
}
// Adjust angle.
if (angle > 180) {
angle -= 360;
} else if (angle <= -180) {
angle += 360;
}
}
return angle;
}
/**
* @name pick_close_point
*
* Choose the edge point that is closest to the critical point. This
* point may not be exactly vertical from the critical point.
*/
EDGEPT *Wordrec::pick_close_point(EDGEPT *critical_point, EDGEPT *vertical_point, int *best_dist) {
EDGEPT *best_point = nullptr;
int this_distance;
bool found_better;
do {
found_better = false;
this_distance = edgept_dist(critical_point, vertical_point);
if (this_distance <= *best_dist) {
if (!(same_point(critical_point->pos, vertical_point->pos) ||
same_point(critical_point->pos, vertical_point->next->pos) ||
(best_point && same_point(best_point->pos, vertical_point->pos)) ||
is_exterior_point(critical_point, vertical_point))) {
*best_dist = this_distance;
best_point = vertical_point;
if (chop_vertical_creep) {
found_better = true;
}
}
}
vertical_point = vertical_point->next;
} while (found_better == true);
return (best_point);
}
/**
* @name prioritize_points
*
* Find a list of edge points from the outer outline of this blob. For
* each of these points assign a priority. Sort these points using a
* heap structure so that they can be visited in order.
*/
void Wordrec::prioritize_points(TESSLINE *outline, PointHeap *points) {
EDGEPT *this_point;
EDGEPT *local_min = nullptr;
EDGEPT *local_max = nullptr;
this_point = outline->loop;
local_min = this_point;
local_max = this_point;
do {
if (this_point->vec.y < 0) {
/* Look for minima */
if (local_max != nullptr) {
new_max_point(local_max, points);
} else if (is_inside_angle(this_point)) {
add_point_to_list(points, this_point);
}
local_max = nullptr;
local_min = this_point->next;
} else if (this_point->vec.y > 0) {
/* Look for maxima */
if (local_min != nullptr) {
new_min_point(local_min, points);
} else if (is_inside_angle(this_point)) {
add_point_to_list(points, this_point);
}
local_min = nullptr;
local_max = this_point->next;
} else {
/* Flat area */
if (local_max != nullptr) {
if (local_max->prev->vec.y != 0) {
new_max_point(local_max, points);
}
local_max = this_point->next;
local_min = nullptr;
} else {
if (local_min->prev->vec.y != 0) {
new_min_point(local_min, points);
}
local_min = this_point->next;
local_max = nullptr;
}
}
/* Next point */
this_point = this_point->next;
} while (this_point != outline->loop);
}
/**
* @name new_min_point
*
* Found a new minimum point try to decide whether to save it or not.
* Return the new value for the local minimum. If a point is saved then
* the local minimum is reset to nullptr.
*/
void Wordrec::new_min_point(EDGEPT *local_min, PointHeap *points) {
int16_t dir;
dir = direction(local_min);
if (dir < 0) {
add_point_to_list(points, local_min);
return;
}
if (dir == 0 && point_priority(local_min) < 0) {
add_point_to_list(points, local_min);
return;
}
}
/**
* @name new_max_point
*
* Found a new minimum point try to decide whether to save it or not.
* Return the new value for the local minimum. If a point is saved then
* the local minimum is reset to nullptr.
*/
void Wordrec::new_max_point(EDGEPT *local_max, PointHeap *points) {
int16_t dir;
dir = direction(local_max);
if (dir > 0) {
add_point_to_list(points, local_max);
return;
}
if (dir == 0 && point_priority(local_max) < 0) {
add_point_to_list(points, local_max);
return;
}
}
/**
* @name vertical_projection_point
*
* For one point on the outline, find the corresponding point on the
* other side of the outline that is a likely projection for a split
* point. This is done by iterating through the edge points until the
* X value of the point being looked at is greater than the X value of
* the split point. Ensure that the point being returned is not right
* next to the split point. Return the edge point in *best_point as
* a result, and any points that were newly created are also saved on
* the new_points list.
*/
void Wordrec::vertical_projection_point(EDGEPT *split_point, EDGEPT *target_point,
EDGEPT **best_point, EDGEPT_CLIST *new_points) {
EDGEPT *p; /* Iterator */
EDGEPT *this_edgept; /* Iterator */
EDGEPT_C_IT new_point_it(new_points);
int x = split_point->pos.x; /* X value of vertical */
int best_dist = LARGE_DISTANCE; /* Best point found */
if (*best_point != nullptr) {
best_dist = edgept_dist(split_point, *best_point);
}
p = target_point;
/* Look at each edge point */
do {
if (((p->pos.x <= x && x <= p->next->pos.x) || (p->next->pos.x <= x && x <= p->pos.x)) &&
!same_point(split_point->pos, p->pos) && !same_point(split_point->pos, p->next->pos) &&
!p->IsChopPt() && (*best_point == nullptr || !same_point((*best_point)->pos, p->pos))) {
if (near_point(split_point, p, p->next, &this_edgept)) {
new_point_it.add_before_then_move(this_edgept);
}
if (*best_point == nullptr) {
best_dist = edgept_dist(split_point, this_edgept);
}
this_edgept = pick_close_point(split_point, this_edgept, &best_dist);
if (this_edgept) {
*best_point = this_edgept;
}
}
p = p->next;
} while (p != target_point);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/chop.cpp
|
C++
|
apache-2.0
| 9,661
|
/******************************************************************************
*
* File: chop.h
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef CHOP_H
#define CHOP_H
#include "genericheap.h"
#include "kdpair.h"
#include "seam.h"
namespace tesseract {
#define MAX_NUM_POINTS 50
// The PointPair elements do NOT own the EDGEPTs.
using PointPair = KDPairInc<float, EDGEPT *>;
using PointHeap = GenericHeap<PointPair>;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/wordrec/chop.h
|
C++
|
apache-2.0
| 1,157
|
/******************************************************************************
*
* File: chopper.cpp (Formerly chopper.c)
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "blamer.h" // for BlamerBundle, IRR_CORRECT
#include "blobs.h" // for TPOINT, TBLOB, EDGEPT, TESSLINE, divisible_blob
#include "dict.h" // for Dict
#include "lm_pain_points.h" // for LMPainPoints
#include "lm_state.h" // for BestChoiceBundle
#include "matrix.h" // for MATRIX
#include "normalis.h" // for DENORM
#include "pageres.h" // for WERD_RES
#include "params.h" // for IntParam, BoolParam
#include "ratngs.h" // for BLOB_CHOICE (ptr only), BLOB_CHOICE_LIST (ptr ...
#include "rect.h" // for TBOX
#include "render.h" // for display_blob
#include "seam.h" // for SEAM
#include "split.h" // for remove_edgept
#include "stopper.h" // for DANGERR
#include "tprintf.h" // for tprintf
#include "wordrec.h" // for Wordrec, SegSearchPending (ptr only)
namespace tesseract {
// Even though the limit on the number of chunks may now be removed, keep
// the same limit for repeatable behavior, and it may be a speed advantage.
static const int kMaxNumChunks = 64;
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**
* @name check_blob
*
* @return true if blob has a non whole outline.
*/
static int check_blob(TBLOB *blob) {
TESSLINE *outline;
EDGEPT *edgept;
for (outline = blob->outlines; outline != nullptr; outline = outline->next) {
edgept = outline->loop;
do {
if (edgept == nullptr) {
break;
}
edgept = edgept->next;
} while (edgept != outline->loop);
if (edgept == nullptr) {
return 1;
}
}
return 0;
}
/**
* @name any_shared_split_points
*
* Return true if any of the splits share a point with this one.
*/
static int any_shared_split_points(const std::vector<SEAM *> &seams, SEAM *seam) {
int length;
int index;
length = seams.size();
for (index = 0; index < length; index++) {
if (seam->SharesPosition(*seams[index])) {
return true;
}
}
return false;
}
/**
* @name preserve_outline_tree
*
* Copy the list of outlines.
*/
static void preserve_outline(EDGEPT *start) {
EDGEPT *srcpt;
if (start == nullptr) {
return;
}
srcpt = start;
do {
srcpt->runlength = 1;
srcpt = srcpt->next;
} while (srcpt != start);
srcpt->runlength = 2;
}
static void preserve_outline_tree(TESSLINE *srcline) {
TESSLINE *outline;
for (outline = srcline; outline != nullptr; outline = outline->next) {
preserve_outline(outline->loop);
}
}
/**
* @name restore_outline_tree
*
* Copy the list of outlines.
*/
static EDGEPT *restore_outline(EDGEPT *start) {
EDGEPT *srcpt;
EDGEPT *real_start;
if (start == nullptr) {
return nullptr;
}
srcpt = start;
do {
if (srcpt->runlength == 2) {
break;
}
srcpt = srcpt->next;
} while (srcpt != start);
real_start = srcpt;
do {
srcpt = srcpt->next;
if (srcpt->prev->runlength == 0) {
remove_edgept(srcpt->prev);
}
} while (srcpt != real_start);
return real_start;
}
static void restore_outline_tree(TESSLINE *srcline) {
TESSLINE *outline;
for (outline = srcline; outline != nullptr; outline = outline->next) {
outline->loop = restore_outline(outline->loop);
outline->start = outline->loop->pos;
}
}
/**********************************************************************
* total_containment
*
* Check to see if one of these outlines is totally contained within
* the bounding box of the other.
**********************************************************************/
static int16_t total_containment(TBLOB *blob1, TBLOB *blob2) {
TBOX box1 = blob1->bounding_box();
TBOX box2 = blob2->bounding_box();
return box1.contains(box2) || box2.contains(box1);
}
// Helper runs all the checks on a seam to make sure it is valid.
// Returns the seam if OK, otherwise deletes the seam and returns nullptr.
static SEAM *CheckSeam(int debug_level, int32_t blob_number, TWERD *word, TBLOB *blob,
TBLOB *other_blob, const std::vector<SEAM *> &seams, SEAM *seam) {
if (seam == nullptr || blob->outlines == nullptr || other_blob->outlines == nullptr ||
total_containment(blob, other_blob) || check_blob(other_blob) ||
!seam->ContainedByBlob(*blob) || !seam->ContainedByBlob(*other_blob) ||
any_shared_split_points(seams, seam) ||
!seam->PrepareToInsertSeam(seams, word->blobs, blob_number, false)) {
word->blobs.erase(word->blobs.begin() + blob_number + 1);
if (seam) {
seam->UndoSeam(blob, other_blob);
delete seam;
seam = nullptr;
#ifndef GRAPHICS_DISABLED
if (debug_level) {
if (debug_level > 2) {
display_blob(blob, ScrollView::RED);
}
tprintf("\n** seam being removed ** \n");
}
#endif
} else {
delete other_blob;
}
return nullptr;
}
return seam;
}
/**
* @name attempt_blob_chop
*
* Try to split the this blob after this one. Check to make sure that
* it was successful.
*/
SEAM *Wordrec::attempt_blob_chop(TWERD *word, TBLOB *blob, int32_t blob_number, bool italic_blob,
const std::vector<SEAM *> &seams) {
if (repair_unchopped_blobs) {
preserve_outline_tree(blob->outlines);
}
TBLOB *other_blob = TBLOB::ShallowCopy(*blob); /* Make new blob */
// Insert it into the word.
word->blobs.insert(word->blobs.begin() + blob_number + 1, other_blob);
SEAM *seam = nullptr;
if (prioritize_division) {
TPOINT location;
if (divisible_blob(blob, italic_blob, &location)) {
seam = new SEAM(0.0f, location);
}
}
if (seam == nullptr) {
seam = pick_good_seam(blob);
}
if (chop_debug) {
if (seam != nullptr) {
seam->Print("Good seam picked=");
} else {
tprintf("\n** no seam picked *** \n");
}
}
if (seam) {
seam->ApplySeam(italic_blob, blob, other_blob);
}
seam = CheckSeam(chop_debug, blob_number, word, blob, other_blob, seams, seam);
if (seam == nullptr) {
if (repair_unchopped_blobs) {
restore_outline_tree(blob->outlines);
}
if (allow_blob_division && !prioritize_division) {
// If the blob can simply be divided into outlines, then do that.
TPOINT location;
if (divisible_blob(blob, italic_blob, &location)) {
other_blob = TBLOB::ShallowCopy(*blob); /* Make new blob */
word->blobs.insert(word->blobs.begin() + blob_number + 1, other_blob);
seam = new SEAM(0.0f, location);
seam->ApplySeam(italic_blob, blob, other_blob);
seam = CheckSeam(chop_debug, blob_number, word, blob, other_blob, seams, seam);
}
}
}
if (seam != nullptr) {
// Make sure this seam doesn't get chopped again.
seam->Finalize();
}
return seam;
}
SEAM *Wordrec::chop_numbered_blob(TWERD *word, int32_t blob_number, bool italic_blob,
const std::vector<SEAM *> &seams) {
return attempt_blob_chop(word, word->blobs[blob_number], blob_number, italic_blob, seams);
}
SEAM *Wordrec::chop_overlapping_blob(const std::vector<TBOX> &boxes, bool italic_blob,
WERD_RES *word_res, unsigned *blob_number) {
TWERD *word = word_res->chopped_word;
for (*blob_number = 0; *blob_number < word->NumBlobs(); ++*blob_number) {
TBLOB *blob = word->blobs[*blob_number];
TPOINT topleft, botright;
topleft.x = blob->bounding_box().left();
topleft.y = blob->bounding_box().top();
botright.x = blob->bounding_box().right();
botright.y = blob->bounding_box().bottom();
TPOINT original_topleft, original_botright;
word_res->denorm.DenormTransform(nullptr, topleft, &original_topleft);
word_res->denorm.DenormTransform(nullptr, botright, &original_botright);
TBOX original_box =
TBOX(original_topleft.x, original_botright.y, original_botright.x, original_topleft.y);
bool almost_equal_box = false;
int num_overlap = 0;
for (auto &&boxe : boxes) {
if (original_box.overlap_fraction(boxe) > 0.125) {
num_overlap++;
}
if (original_box.almost_equal(boxe, 3)) {
almost_equal_box = true;
}
}
TPOINT location;
if (divisible_blob(blob, italic_blob, &location) || (!almost_equal_box && num_overlap > 1)) {
SEAM *seam = attempt_blob_chop(word, blob, *blob_number, italic_blob, word_res->seam_array);
if (seam != nullptr) {
return seam;
}
}
}
*blob_number = UINT_MAX;
return nullptr;
}
/**
* @name improve_one_blob
*
* Finds the best place to chop, based on the worst blob, fixpt, or next to
* a fragment, according to the input. Returns the SEAM corresponding to the
* chop point, if any is found, and the index in the ratings_matrix of the
* chopped blob. Note that blob_choices is just a copy of the pointers in the
* leading diagonal of the ratings MATRIX.
* Although the blob is chopped, the returned SEAM is yet to be inserted into
* word->seam_array and the resulting blobs are unclassified, so this function
* can be used by ApplyBox as well as during recognition.
*/
SEAM *Wordrec::improve_one_blob(const std::vector<BLOB_CHOICE *> &blob_choices, DANGERR *fixpt,
bool split_next_to_fragment, bool italic_blob, WERD_RES *word,
unsigned *blob_number) {
float rating_ceiling = FLT_MAX;
SEAM *seam = nullptr;
do {
auto blob = select_blob_to_split_from_fixpt(fixpt);
if (chop_debug) {
tprintf("blob_number from fixpt = %d\n", blob);
}
bool split_point_from_dict = (blob != -1);
if (split_point_from_dict) {
fixpt->clear();
} else {
blob = select_blob_to_split(blob_choices, rating_ceiling, split_next_to_fragment);
}
if (chop_debug) {
tprintf("blob_number = %d\n", blob);
}
*blob_number = blob;
if (blob == -1) {
return nullptr;
}
// TODO(rays) it may eventually help to allow italic_blob to be true,
seam = chop_numbered_blob(word->chopped_word, *blob_number, italic_blob, word->seam_array);
if (seam != nullptr) {
break; // Success!
}
if (blob_choices[*blob_number] == nullptr) {
return nullptr;
}
if (!split_point_from_dict) {
// We chopped the worst rated blob, try something else next time.
rating_ceiling = blob_choices[*blob_number]->rating();
}
} while (true);
return seam;
}
/**
* @name chop_one_blob
*
* Start with the current one-blob word and its classification. Find
* the worst blobs and try to divide it up to improve the ratings.
* Used for testing chopper.
*/
SEAM *Wordrec::chop_one_blob(const std::vector<TBOX> &boxes,
const std::vector<BLOB_CHOICE *> &blob_choices, WERD_RES *word_res,
unsigned *blob_number) {
if (prioritize_division) {
return chop_overlapping_blob(boxes, true, word_res, blob_number);
} else {
return improve_one_blob(blob_choices, nullptr, false, true, word_res, blob_number);
}
}
/**
* @name chop_word_main
*
* Classify the blobs in this word and permute the results. Find the
* worst blob in the word and chop it up. Continue this process until
* a good answer has been found or all the blobs have been chopped up
* enough. The results are returned in the WERD_RES.
*/
void Wordrec::chop_word_main(WERD_RES *word) {
int num_blobs = word->chopped_word->NumBlobs();
if (word->ratings == nullptr) {
word->ratings = new MATRIX(num_blobs, wordrec_max_join_chunks);
}
if (word->ratings->get(0, 0) == nullptr) {
// Run initial classification.
for (int b = 0; b < num_blobs; ++b) {
BLOB_CHOICE_LIST *choices = classify_piece(
word->seam_array, b, b, "Initial:", word->chopped_word, word->blamer_bundle);
word->ratings->put(b, b, choices);
}
} else {
// Blobs have been pre-classified. Set matrix cell for all blob choices
for (int col = 0; col < word->ratings->dimension(); ++col) {
for (int row = col;
row < word->ratings->dimension() && row < col + word->ratings->bandwidth(); ++row) {
BLOB_CHOICE_LIST *choices = word->ratings->get(col, row);
if (choices != nullptr) {
BLOB_CHOICE_IT bc_it(choices);
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
bc_it.data()->set_matrix_cell(col, row);
}
}
}
}
}
// Run Segmentation Search.
BestChoiceBundle best_choice_bundle(word->ratings->dimension());
SegSearch(word, &best_choice_bundle, word->blamer_bundle);
if (word->best_choice == nullptr) {
// SegSearch found no valid paths, so just use the leading diagonal.
word->FakeWordFromRatings(TOP_CHOICE_PERM);
}
word->RebuildBestState();
// If we finished without a hyphen at the end of the word, let the next word
// be found in the dictionary.
if (word->word->flag(W_EOL) && !getDict().has_hyphen_end(*word->best_choice)) {
getDict().reset_hyphen_vars(true);
}
if (word->blamer_bundle != nullptr && this->fill_lattice_ != nullptr) {
CallFillLattice(*word->ratings, word->best_choices, *word->uch_set, word->blamer_bundle);
}
if (wordrec_debug_level > 0) {
tprintf("Final Ratings Matrix:\n");
word->ratings->print(getDict().getUnicharset());
}
word->FilterWordChoices(getDict().stopper_debug_level);
}
/**
* @name improve_by_chopping
*
* Repeatedly chops the worst blob, classifying the new blobs fixing up all
* the data, and incrementally runs the segmentation search until a good word
* is found, or no more chops can be found.
*/
void Wordrec::improve_by_chopping(float rating_cert_scale, WERD_RES *word,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle,
LMPainPoints *pain_points,
std::vector<SegSearchPending> *pending) {
unsigned blob_number;
do { // improvement loop.
// Make a simple vector of BLOB_CHOICEs to make it easy to pick which
// one to chop.
std::vector<BLOB_CHOICE *> blob_choices;
int num_blobs = word->ratings->dimension();
for (int i = 0; i < num_blobs; ++i) {
BLOB_CHOICE_LIST *choices = word->ratings->get(i, i);
if (choices == nullptr || choices->empty()) {
blob_choices.push_back(nullptr);
} else {
BLOB_CHOICE_IT bc_it(choices);
blob_choices.push_back(bc_it.data());
}
}
SEAM *seam = improve_one_blob(blob_choices, &best_choice_bundle->fixpt, false, false, word,
&blob_number);
if (seam == nullptr) {
break;
}
// A chop has been made. We have to correct all the data structures to
// take into account the extra bottom-level blob.
// Put the seam into the seam_array and correct everything else on the
// word: ratings matrix (including matrix location in the BLOB_CHOICES),
// states in WERD_CHOICEs, and blob widths.
word->InsertSeam(blob_number, seam);
// Insert a new entry in the beam array.
best_choice_bundle->beam.insert(best_choice_bundle->beam.begin() + blob_number, new LanguageModelState);
// Fixpts are outdated, but will get recalculated.
best_choice_bundle->fixpt.clear();
// Remap existing pain points.
pain_points->RemapForSplit(blob_number);
// Insert a new pending at the chop point.
pending->insert(pending->begin() + blob_number, SegSearchPending());
// Classify the two newly created blobs using ProcessSegSearchPainPoint,
// as that updates the pending correctly and adds new pain points.
MATRIX_COORD pain_point(blob_number, blob_number);
ProcessSegSearchPainPoint(0.0f, pain_point, "Chop1", pending, word, pain_points, blamer_bundle);
pain_point.col = blob_number + 1;
pain_point.row = blob_number + 1;
ProcessSegSearchPainPoint(0.0f, pain_point, "Chop2", pending, word, pain_points, blamer_bundle);
if (language_model_->language_model_ngram_on) {
// N-gram evaluation depends on the number of blobs in a chunk, so we
// have to re-evaluate everything in the word.
ResetNGramSearch(word, best_choice_bundle, *pending);
blob_number = 0;
}
// Run language model incrementally. (Except with the n-gram model on.)
UpdateSegSearchNodes(rating_cert_scale, blob_number, pending, word, pain_points,
best_choice_bundle, blamer_bundle);
} while (!language_model_->AcceptableChoiceFound() && word->ratings->dimension() < kMaxNumChunks);
// If after running only the chopper best_choice is incorrect and no blame
// has been yet set, blame the classifier if best_choice is classifier's
// top choice and is a dictionary word (i.e. language model could not have
// helped). Otherwise blame the tradeoff between the classifier and
// the old language model (permuters).
if (word->blamer_bundle != nullptr &&
word->blamer_bundle->incorrect_result_reason() == IRR_CORRECT &&
!word->blamer_bundle->ChoiceIsCorrect(word->best_choice)) {
bool valid_permuter = word->best_choice != nullptr &&
Dict::valid_word_permuter(word->best_choice->permuter(), false);
word->blamer_bundle->BlameClassifierOrLangModel(word, getDict().getUnicharset(), valid_permuter,
wordrec_debug_blamer);
}
}
/**********************************************************************
* select_blob_to_split
*
* These are the results of the last classification. Find a likely
* place to apply splits. If none, return -1.
**********************************************************************/
int Wordrec::select_blob_to_split(const std::vector<BLOB_CHOICE *> &blob_choices,
float rating_ceiling, bool split_next_to_fragment) {
BLOB_CHOICE *blob_choice;
float worst = -FLT_MAX;
int worst_index = -1;
float worst_near_fragment = -FLT_MAX;
int worst_index_near_fragment = -1;
std::vector<const CHAR_FRAGMENT *> fragments;
if (chop_debug) {
if (rating_ceiling < FLT_MAX) {
tprintf("rating_ceiling = %8.4f\n", rating_ceiling);
} else {
tprintf("rating_ceiling = No Limit\n");
}
}
if (split_next_to_fragment && blob_choices.size() > 0) {
fragments.resize(blob_choices.size());
if (blob_choices[0] != nullptr) {
fragments[0] = getDict().getUnicharset().get_fragment(blob_choices[0]->unichar_id());
} else {
fragments[0] = nullptr;
}
}
for (unsigned x = 0; x < blob_choices.size(); ++x) {
if (blob_choices[x] == nullptr) {
return x;
} else {
blob_choice = blob_choices[x];
// Populate fragments for the following position.
if (split_next_to_fragment && x + 1 < blob_choices.size()) {
if (blob_choices[x + 1] != nullptr) {
fragments[x + 1] =
getDict().getUnicharset().get_fragment(blob_choices[x + 1]->unichar_id());
} else {
fragments[x + 1] = nullptr;
}
}
if (blob_choice->rating() < rating_ceiling &&
blob_choice->certainty() < tessedit_certainty_threshold) {
// Update worst and worst_index.
if (blob_choice->rating() > worst) {
worst_index = x;
worst = blob_choice->rating();
}
if (split_next_to_fragment) {
// Update worst_near_fragment and worst_index_near_fragment.
bool expand_following_fragment =
(x + 1 < blob_choices.size() && fragments[x + 1] != nullptr &&
!fragments[x + 1]->is_beginning());
bool expand_preceding_fragment =
(x > 0 && fragments[x - 1] != nullptr && !fragments[x - 1]->is_ending());
if ((expand_following_fragment || expand_preceding_fragment) &&
blob_choice->rating() > worst_near_fragment) {
worst_index_near_fragment = x;
worst_near_fragment = blob_choice->rating();
if (chop_debug) {
tprintf(
"worst_index_near_fragment=%d"
" expand_following_fragment=%d"
" expand_preceding_fragment=%d\n",
worst_index_near_fragment, expand_following_fragment, expand_preceding_fragment);
}
}
}
}
}
}
// TODO(daria): maybe a threshold of badness for
// worst_near_fragment would be useful.
return worst_index_near_fragment != -1 ? worst_index_near_fragment : worst_index;
}
/**********************************************************************
* select_blob_to_split_from_fixpt
*
* Given the fix point from a dictionary search, if there is a single
* dangerous blob that maps to multiple characters, return that blob
* index as a place we need to split. If none, return -1.
**********************************************************************/
int Wordrec::select_blob_to_split_from_fixpt(DANGERR *fixpt) {
if (!fixpt) {
return -1;
}
for (auto &i : *fixpt) {
if (i.begin + 1 == i.end && i.dangerous && i.correct_is_ngram) {
return i.begin;
}
}
return -1;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/chopper.cpp
|
C++
|
apache-2.0
| 22,305
|
/**********************************************************************
* File: drawfx.cpp
* Description: Draw things to do with feature extraction.
* Author: Ray Smith
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "drawfx.h"
#include "normalis.h"
#include "werd.h"
namespace tesseract {
#ifndef GRAPHICS_DISABLED
# define FXDEMOWIN "FXDemo"
# define FXDEMOXPOS 250
# define FXDEMOYPOS 0
# define FXDEMOXSIZE 600
# define FXDEMOYSIZE 256
# define BLN_MAX 512 // max coord for bln
# define WERDWIDTH (BLN_MAX * 20)
// title of window
# define DEBUG_WIN_NAME "FXDebug"
ScrollView *fx_win = nullptr;
/**********************************************************************
* create_fx_win
*
* Create the fx window used to show the fit.
**********************************************************************/
void create_fx_win() { // make features win
fx_win = new ScrollView(FXDEMOWIN, FXDEMOXPOS, FXDEMOYPOS, FXDEMOXSIZE, FXDEMOYSIZE,
WERDWIDTH * 2, BLN_MAX * 2, true);
}
/**********************************************************************
* clear_fx_win
*
* Clear the fx window and draw on the base/mean lines.
**********************************************************************/
void clear_fx_win() { // make features win
fx_win->Clear();
fx_win->Pen(64, 64, 64);
fx_win->Line(-WERDWIDTH, kBlnBaselineOffset, WERDWIDTH, kBlnBaselineOffset);
fx_win->Line(-WERDWIDTH, kBlnXHeight + kBlnBaselineOffset, WERDWIDTH,
kBlnXHeight + kBlnBaselineOffset);
}
#endif // !GRAPHICS_DISABLED
/**********************************************************************
* create_fxdebug_win
*
* Create the fx window used to show the fit.
**********************************************************************/
void create_fxdebug_win() { // make gradients win
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/drawfx.cpp
|
C++
|
apache-2.0
| 2,569
|
/**********************************************************************
* File: drawfx.h
* Description: Draw things to do with feature extraction.
* Author: Ray Smith
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef DRAWFX_H
#define DRAWFX_H
#include "params.h"
#include "scrollview.h"
namespace tesseract {
#ifndef GRAPHICS_DISABLED
extern ScrollView *fx_win;
#endif // !GRAPHICS_DISABLED
void create_fx_win(); // make features win
void clear_fx_win(); // make features win
void create_fxdebug_win(); // make gradients win
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/wordrec/drawfx.h
|
C++
|
apache-2.0
| 1,224
|
/******************************************************************************
*
* File: findseam.cpp (Formerly findseam.c)
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "findseam.h"
#include "outlines.h"
#include "plotedges.h"
#include "seam.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
/**********************************************************************
* partial_split_priority
*
* Assign a priority to this split based on the features that it has.
* Grade it according to the different rating schemes and return the
* value of its goodness.
**********************************************************************/
#define partial_split_priority(split) (grade_split_length(split) + grade_sharpness(split))
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
#define SPLIT_CLOSENESS 20 /* Difference in x value */
/* How many to keep */
#define MAX_NUM_SEAMS 150
/* How many to keep */
#define NO_FULL_PRIORITY (-1) // Special marker for pri.
/* Evaluate right away */
#define BAD_PRIORITY 9999.0
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
namespace tesseract {
/**********************************************************************
* add_seam_to_queue
*
* Adds the given new_seam to the seams priority queue, unless it is full
* and the new seam is worse than the worst.
**********************************************************************/
void Wordrec::add_seam_to_queue(float new_priority, SEAM *new_seam, SeamQueue *seams) {
if (new_seam == nullptr) {
return;
}
if (chop_debug) {
tprintf("Pushing new seam with priority %g :", new_priority);
new_seam->Print("seam: ");
}
if (seams->size() >= MAX_NUM_SEAMS) {
SeamPair old_pair(0, nullptr);
if (seams->PopWorst(&old_pair) && old_pair.key() <= new_priority) {
if (chop_debug) {
tprintf("Old seam staying with priority %g\n", old_pair.key());
}
delete new_seam;
seams->Push(&old_pair);
return;
} else if (chop_debug) {
tprintf("New seam with priority %g beats old worst seam with %g\n", new_priority,
old_pair.key());
}
}
SeamPair new_pair(new_priority, new_seam);
seams->Push(&new_pair);
}
/**********************************************************************
* choose_best_seam
*
* Choose the best seam that can be created by assembling this a
* collection of splits. A queue of all the possible seams is
* maintained. Each new split received is placed in that queue with
* its partial priority value. These values in the seam queue are
* evaluated and combined until a good enough seam is found. If no
* further good seams are being found then this function returns to the
* caller, who will send more splits. If this function is called with
* a split of nullptr, then no further splits can be supplied by the
* caller.
**********************************************************************/
void Wordrec::choose_best_seam(SeamQueue *seam_queue, const SPLIT *split, PRIORITY priority,
SEAM **seam_result, TBLOB *blob, SeamPile *seam_pile) {
SEAM *seam;
float my_priority;
/* Add seam of split */
my_priority = priority;
if (split != nullptr) {
TPOINT split_point = split->point1->pos;
split_point += split->point2->pos;
split_point /= 2;
seam = new SEAM(my_priority, split_point, *split);
if (chop_debug > 1) {
seam->Print("Partial priority ");
}
add_seam_to_queue(my_priority, seam, seam_queue);
if (my_priority > chop_good_split) {
return;
}
}
TBOX bbox = blob->bounding_box();
/* Queue loop */
while (!seam_queue->empty()) {
SeamPair seam_pair;
seam_queue->Pop(&seam_pair);
seam = seam_pair.extract_data();
/* Set full priority */
my_priority =
seam->FullPriority(bbox.left(), bbox.right(), chop_overlap_knob, chop_centered_maxwidth,
chop_center_knob, chop_width_change_knob);
if (chop_debug) {
char str[80];
snprintf(str, sizeof(str), "Full my_priority %0.0f, ", my_priority);
seam->Print(str);
}
if ((*seam_result == nullptr || (*seam_result)->priority() > my_priority) &&
my_priority < chop_ok_split) {
/* No crossing */
if (seam->IsHealthy(*blob, chop_min_outline_points, chop_min_outline_area)) {
delete *seam_result;
*seam_result = new SEAM(*seam);
(*seam_result)->set_priority(my_priority);
} else {
delete seam;
seam = nullptr;
my_priority = BAD_PRIORITY;
}
}
if (my_priority < chop_good_split) {
delete seam;
return; /* Made good answer */
}
if (seam) {
/* Combine with others */
if (seam_pile->size() < chop_seam_pile_size) {
combine_seam(*seam_pile, seam, seam_queue);
SeamDecPair pair(seam_pair.key(), seam);
seam_pile->Push(&pair);
} else if (chop_new_seam_pile && seam_pile->size() == chop_seam_pile_size &&
seam_pile->PeekTop().key() > seam_pair.key()) {
combine_seam(*seam_pile, seam, seam_queue);
SeamDecPair pair;
seam_pile->Pop(&pair); // pop the worst.
// Replace the seam in pair (deleting the old one) with
// the new seam and score, then push back into the heap.
pair.set_key(seam_pair.key());
pair.set_data(seam);
seam_pile->Push(&pair);
} else {
delete seam;
}
}
my_priority = seam_queue->empty() ? NO_FULL_PRIORITY : seam_queue->PeekTop().key();
if ((my_priority > chop_ok_split) || (my_priority > chop_good_split && split)) {
return;
}
}
}
/**********************************************************************
* combine_seam
*
* Find other seams to combine with this one. The new seams that result
* from this union should be added to the seam queue. The return value
* tells whether or not any additional seams were added to the queue.
**********************************************************************/
void Wordrec::combine_seam(const SeamPile &seam_pile, const SEAM *seam, SeamQueue *seam_queue) {
for (int x = 0; x < seam_pile.size(); ++x) {
const SEAM *this_one = seam_pile.get(x).data();
if (seam->CombineableWith(*this_one, SPLIT_CLOSENESS, chop_ok_split)) {
SEAM *new_one = new SEAM(*seam);
new_one->CombineWith(*this_one);
if (chop_debug > 1) {
new_one->Print("Combo priority ");
}
add_seam_to_queue(new_one->priority(), new_one, seam_queue);
}
}
}
/**********************************************************************
* pick_good_seam
*
* Find and return a good seam that will split this blob into two pieces.
* Work from the outlines provided.
**********************************************************************/
SEAM *Wordrec::pick_good_seam(TBLOB *blob) {
SeamPile seam_pile(chop_seam_pile_size);
EDGEPT *points[MAX_NUM_POINTS];
EDGEPT_CLIST new_points;
SEAM *seam = nullptr;
TESSLINE *outline;
int16_t num_points = 0;
#ifndef GRAPHICS_DISABLED
if (chop_debug > 2) {
wordrec_display_splits.set_value(true);
}
draw_blob_edges(blob);
#endif
PointHeap point_heap(MAX_NUM_POINTS);
for (outline = blob->outlines; outline; outline = outline->next) {
prioritize_points(outline, &point_heap);
}
while (!point_heap.empty() && num_points < MAX_NUM_POINTS) {
points[num_points++] = point_heap.PeekTop().data();
point_heap.Pop(nullptr);
}
/* Initialize queue */
SeamQueue seam_queue(MAX_NUM_SEAMS);
try_point_pairs(points, num_points, &seam_queue, &seam_pile, &seam, blob);
try_vertical_splits(points, num_points, &new_points, &seam_queue, &seam_pile, &seam, blob);
if (seam == nullptr) {
choose_best_seam(&seam_queue, nullptr, BAD_PRIORITY, &seam, blob, &seam_pile);
} else if (seam->priority() > chop_good_split) {
choose_best_seam(&seam_queue, nullptr, seam->priority(), &seam, blob, &seam_pile);
}
EDGEPT_C_IT it(&new_points);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
EDGEPT *inserted_point = it.data();
if (seam == nullptr || !seam->UsesPoint(inserted_point)) {
for (outline = blob->outlines; outline; outline = outline->next) {
if (outline->loop == inserted_point) {
outline->loop = outline->loop->next;
}
}
remove_edgept(inserted_point);
}
}
if (seam) {
if (seam->priority() > chop_ok_split) {
delete seam;
seam = nullptr;
}
#ifndef GRAPHICS_DISABLED
else if (wordrec_display_splits) {
seam->Mark(edge_window);
if (chop_debug > 2) {
edge_window->Update();
edge_window->Wait();
}
}
#endif
}
if (chop_debug) {
wordrec_display_splits.set_value(false);
}
return (seam);
}
/**********************************************************************
* try_point_pairs
*
* Try all the splits that are produced by pairing critical points
* together. See if any of them are suitable for use. Use a seam
* queue and seam pile that have already been initialized and used.
**********************************************************************/
void Wordrec::try_point_pairs(EDGEPT *points[MAX_NUM_POINTS], int16_t num_points,
SeamQueue *seam_queue, SeamPile *seam_pile, SEAM **seam,
TBLOB *blob) {
int16_t x;
int16_t y;
PRIORITY priority;
for (x = 0; x < num_points; x++) {
for (y = x + 1; y < num_points; y++) {
if (points[y] &&
points[x]->WeightedDistance(*points[y], chop_x_y_weight) < chop_split_length &&
points[x] != points[y]->next && points[y] != points[x]->next &&
!is_exterior_point(points[x], points[y]) && !is_exterior_point(points[y], points[x])) {
SPLIT split(points[x], points[y]);
priority = partial_split_priority(&split);
choose_best_seam(seam_queue, &split, priority, seam, blob, seam_pile);
}
}
}
}
/**********************************************************************
* try_vertical_splits
*
* Try all the splits that are produced by vertical projection to see
* if any of them are suitable for use. Use a seam queue and seam pile
* that have already been initialized and used.
* Return in new_points a collection of points that were inserted into
* the blob while examining vertical splits and which may safely be
* removed once a seam is chosen if they are not part of the seam.
**********************************************************************/
void Wordrec::try_vertical_splits(EDGEPT *points[MAX_NUM_POINTS], int16_t num_points,
EDGEPT_CLIST *new_points, SeamQueue *seam_queue,
SeamPile *seam_pile, SEAM **seam, TBLOB *blob) {
EDGEPT *vertical_point = nullptr;
int16_t x;
PRIORITY priority;
TESSLINE *outline;
for (x = 0; x < num_points; x++) {
vertical_point = nullptr;
for (outline = blob->outlines; outline; outline = outline->next) {
vertical_projection_point(points[x], outline->loop, &vertical_point, new_points);
}
if (vertical_point && points[x] != vertical_point->next && vertical_point != points[x]->next &&
points[x]->WeightedDistance(*vertical_point, chop_x_y_weight) < chop_split_length) {
SPLIT split(points[x], vertical_point);
priority = partial_split_priority(&split);
choose_best_seam(seam_queue, &split, priority, seam, blob, seam_pile);
}
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/findseam.cpp
|
C++
|
apache-2.0
| 12,836
|
/******************************************************************************
*
* File: findseam.h
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef FINDSEAM_H
#define FINDSEAM_H
#include "chop.h"
#include "genericheap.h"
#include "kdpair.h"
#include "seam.h"
namespace tesseract {
// The SeamPair elements own their SEAMs and delete them upon destruction.
using SeamPair = KDPtrPairInc<float, SEAM>;
using SeamQueue = GenericHeap<SeamPair>;
using SeamDecPair = KDPtrPairDec<float, SEAM>;
using SeamPile = GenericHeap<SeamDecPair>;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/wordrec/findseam.h
|
C++
|
apache-2.0
| 1,273
|
/******************************************************************************
*
* File: gradechop.cpp (Formerly gradechop.c)
* Description:
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include <algorithm>
#include <cmath>
#include "chop.h"
#include "wordrec.h"
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
namespace tesseract {
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* grade_split_length
*
* Return a grade for the length of this split.
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_split_length(SPLIT *split) {
PRIORITY grade;
float split_length;
split_length = split->point1->WeightedDistance(*split->point2, chop_x_y_weight);
if (split_length <= 0) {
grade = 0;
} else {
grade = std::sqrt(split_length) * chop_split_dist_knob;
}
return (std::max(0.0f, grade));
}
/**********************************************************************
* grade_sharpness
*
* Return a grade for the sharpness of this split.
* 0 = "perfect"
* 100 = "no way jay"
**********************************************************************/
PRIORITY Wordrec::grade_sharpness(SPLIT *split) {
PRIORITY grade;
grade = point_priority(split->point1) + point_priority(split->point2);
if (grade < -360.0) {
grade = 0;
} else {
grade += 360.0;
}
grade *= chop_sharpness_knob; /* Values 0 to -360 */
return (grade);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/gradechop.cpp
|
C++
|
apache-2.0
| 2,704
|
///////////////////////////////////////////////////////////////////////
// File: language_model.cpp
// Description: Functions that utilize the knowledge about the properties,
// structure and statistics of the language to help recognition.
// Author: Daria Antonova
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "language_model.h"
#include <tesseract/unichar.h> // for UNICHAR_ID, INVALID_UNICHAR_ID
#include <cassert> // for assert
#include <cmath> // for log2, pow
#include "blamer.h" // for BlamerBundle
#include "ccutil.h" // for CCUtil
#include "dawg.h" // for NO_EDGE, Dawg, Dawg::kPatternUn...
#include "errcode.h" // for ASSERT_HOST
#include "lm_state.h" // for ViterbiStateEntry, ViterbiState...
#include "matrix.h" // for MATRIX_COORD
#include "pageres.h" // for WERD_RES
#include "params.h" // for IntParam, BoolParam, DoubleParam
#include "params_training_featdef.h" // for ParamsTrainingHypothesis, PTRAI...
#include "tprintf.h" // for tprintf
#include "unicharset.h" // for UNICHARSET
#include "unicity_table.h" // for UnicityTable
template <typename T>
class UnicityTable;
namespace tesseract {
class LMPainPoints;
struct FontInfo;
#if defined(ANDROID)
static inline double log2(double n) {
return log(n) / log(2.0);
}
#endif // ANDROID
const float LanguageModel::kMaxAvgNgramCost = 25.0f;
LanguageModel::LanguageModel(const UnicityTable<FontInfo> *fontinfo_table, Dict *dict)
: INT_MEMBER(language_model_debug_level, 0, "Language model debug level",
dict->getCCUtil()->params())
, BOOL_INIT_MEMBER(language_model_ngram_on, false,
"Turn on/off the use of character ngram model", dict->getCCUtil()->params())
, INT_MEMBER(language_model_ngram_order, 8, "Maximum order of the character ngram model",
dict->getCCUtil()->params())
, INT_MEMBER(language_model_viterbi_list_max_num_prunable, 10,
"Maximum number of prunable (those for which"
" PrunablePath() is true) entries in each viterbi list"
" recorded in BLOB_CHOICEs",
dict->getCCUtil()->params())
, INT_MEMBER(language_model_viterbi_list_max_size, 500,
"Maximum size of viterbi lists recorded in BLOB_CHOICEs",
dict->getCCUtil()->params())
, double_MEMBER(language_model_ngram_small_prob, 0.000001,
"To avoid overly small denominators use this as the "
"floor of the probability returned by the ngram model.",
dict->getCCUtil()->params())
, double_MEMBER(language_model_ngram_nonmatch_score, -40.0,
"Average classifier score of a non-matching unichar.",
dict->getCCUtil()->params())
, BOOL_MEMBER(language_model_ngram_use_only_first_uft8_step, false,
"Use only the first UTF8 step of the given string"
" when computing log probabilities.",
dict->getCCUtil()->params())
, double_MEMBER(language_model_ngram_scale_factor, 0.03,
"Strength of the character ngram model relative to the"
" character classifier ",
dict->getCCUtil()->params())
, double_MEMBER(language_model_ngram_rating_factor, 16.0,
"Factor to bring log-probs into the same range as ratings"
" when multiplied by outline length ",
dict->getCCUtil()->params())
, BOOL_MEMBER(language_model_ngram_space_delimited_language, true,
"Words are delimited by space", dict->getCCUtil()->params())
, INT_MEMBER(language_model_min_compound_length, 3, "Minimum length of compound words",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_non_freq_dict_word, 0.1,
"Penalty for words not in the frequent word dictionary",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_non_dict_word, 0.15, "Penalty for non-dictionary words",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_punc, 0.2, "Penalty for inconsistent punctuation",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_case, 0.1, "Penalty for inconsistent case",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_script, 0.5, "Penalty for inconsistent script",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_chartype, 0.3, "Penalty for inconsistent character type",
dict->getCCUtil()->params())
,
// TODO(daria, rays): enable font consistency checking
// after improving font analysis.
double_MEMBER(language_model_penalty_font, 0.00, "Penalty for inconsistent font",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_spacing, 0.05, "Penalty for inconsistent spacing",
dict->getCCUtil()->params())
, double_MEMBER(language_model_penalty_increment, 0.01, "Penalty increment",
dict->getCCUtil()->params())
, INT_MEMBER(wordrec_display_segmentations, 0, "Display Segmentations (ScrollView)",
dict->getCCUtil()->params())
, BOOL_INIT_MEMBER(language_model_use_sigmoidal_certainty, false,
"Use sigmoidal score for certainty", dict->getCCUtil()->params())
, dawg_args_(nullptr, new DawgPositionVector(), NO_PERM)
, fontinfo_table_(fontinfo_table)
, dict_(dict) {
ASSERT_HOST(dict_ != nullptr);
}
LanguageModel::~LanguageModel() {
delete dawg_args_.updated_dawgs;
}
void LanguageModel::InitForWord(const WERD_CHOICE *prev_word, bool fixed_pitch,
float max_char_wh_ratio, float rating_cert_scale) {
fixed_pitch_ = fixed_pitch;
max_char_wh_ratio_ = max_char_wh_ratio;
rating_cert_scale_ = rating_cert_scale;
acceptable_choice_found_ = false;
correct_segmentation_explored_ = false;
// Initialize vectors with beginning DawgInfos.
very_beginning_active_dawgs_.clear();
dict_->init_active_dawgs(&very_beginning_active_dawgs_, false);
beginning_active_dawgs_.clear();
dict_->default_dawgs(&beginning_active_dawgs_, false);
// Fill prev_word_str_ with the last language_model_ngram_order
// unichars from prev_word.
if (language_model_ngram_on) {
if (prev_word != nullptr && !prev_word->unichar_string().empty()) {
prev_word_str_ = prev_word->unichar_string();
if (language_model_ngram_space_delimited_language) {
prev_word_str_ += ' ';
}
} else {
prev_word_str_ = " ";
}
const char *str_ptr = prev_word_str_.c_str();
const char *str_end = str_ptr + prev_word_str_.length();
int step;
prev_word_unichar_step_len_ = 0;
while (str_ptr != str_end && (step = UNICHAR::utf8_step(str_ptr))) {
str_ptr += step;
++prev_word_unichar_step_len_;
}
ASSERT_HOST(str_ptr == str_end);
}
}
/**
* Helper scans the collection of predecessors for competing siblings that
* have the same letter with the opposite case, setting competing_vse.
*/
static void ScanParentsForCaseMix(const UNICHARSET &unicharset, LanguageModelState *parent_node) {
if (parent_node == nullptr) {
return;
}
ViterbiStateEntry_IT vit(&parent_node->viterbi_state_entries);
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
ViterbiStateEntry *vse = vit.data();
vse->competing_vse = nullptr;
UNICHAR_ID unichar_id = vse->curr_b->unichar_id();
if (unicharset.get_isupper(unichar_id) || unicharset.get_islower(unichar_id)) {
UNICHAR_ID other_case = unicharset.get_other_case(unichar_id);
if (other_case == unichar_id) {
continue; // Not in unicharset.
}
// Find other case in same list. There could be multiple entries with
// the same unichar_id, but in theory, they should all point to the
// same BLOB_CHOICE, and that is what we will be using to decide
// which to keep.
ViterbiStateEntry_IT vit2(&parent_node->viterbi_state_entries);
for (vit2.mark_cycle_pt();
!vit2.cycled_list() && vit2.data()->curr_b->unichar_id() != other_case; vit2.forward()) {
}
if (!vit2.cycled_list()) {
vse->competing_vse = vit2.data();
}
}
}
}
/**
* Helper returns true if the given choice has a better case variant before
* it in the choice_list that is not distinguishable by size.
*/
static bool HasBetterCaseVariant(const UNICHARSET &unicharset, const BLOB_CHOICE *choice,
BLOB_CHOICE_LIST *choices) {
UNICHAR_ID choice_id = choice->unichar_id();
UNICHAR_ID other_case = unicharset.get_other_case(choice_id);
if (other_case == choice_id || other_case == INVALID_UNICHAR_ID) {
return false; // Not upper or lower or not in unicharset.
}
if (unicharset.SizesDistinct(choice_id, other_case)) {
return false; // Can be separated by size.
}
BLOB_CHOICE_IT bc_it(choices);
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
BLOB_CHOICE *better_choice = bc_it.data();
if (better_choice->unichar_id() == other_case) {
return true; // Found an earlier instance of other_case.
} else if (better_choice == choice) {
return false; // Reached the original choice.
}
}
return false; // Should never happen, but just in case.
}
/**
* UpdateState has the job of combining the ViterbiStateEntry lists on each
* of the choices on parent_list with each of the blob choices in curr_list,
* making a new ViterbiStateEntry for each sensible path.
*
* This could be a huge set of combinations, creating a lot of work only to
* be truncated by some beam limit, but only certain kinds of paths will
* continue at the next step:
* - paths that are liked by the language model: either a DAWG or the n-gram
* model, where active.
* - paths that represent some kind of top choice. The old permuter permuted
* the top raw classifier score, the top upper case word and the top lower-
* case word. UpdateState now concentrates its top-choice paths on top
* lower-case, top upper-case (or caseless alpha), and top digit sequence,
* with allowance for continuation of these paths through blobs where such
* a character does not appear in the choices list.
*
* GetNextParentVSE enforces some of these models to minimize the number of
* calls to AddViterbiStateEntry, even prior to looking at the language model.
* Thus an n-blob sequence of [l1I] will produce 3n calls to
* AddViterbiStateEntry instead of 3^n.
*
* Of course it isn't quite that simple as Title Case is handled by allowing
* lower case to continue an upper case initial, but it has to be detected
* in the combiner so it knows which upper case letters are initial alphas.
*/
bool LanguageModel::UpdateState(bool just_classified, int curr_col, int curr_row,
BLOB_CHOICE_LIST *curr_list, LanguageModelState *parent_node,
LMPainPoints *pain_points, WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle) {
if (language_model_debug_level > 0) {
tprintf("\nUpdateState: col=%d row=%d %s", curr_col, curr_row,
just_classified ? "just_classified" : "");
if (language_model_debug_level > 5) {
tprintf("(parent=%p)\n", static_cast<void *>(parent_node));
} else {
tprintf("\n");
}
}
// Initialize helper variables.
bool word_end = (curr_row + 1 >= word_res->ratings->dimension());
bool new_changed = false;
float denom = (language_model_ngram_on) ? ComputeDenom(curr_list) : 1.0f;
const UNICHARSET &unicharset = dict_->getUnicharset();
BLOB_CHOICE *first_lower = nullptr;
BLOB_CHOICE *first_upper = nullptr;
BLOB_CHOICE *first_digit = nullptr;
bool has_alnum_mix = false;
if (parent_node != nullptr) {
int result = SetTopParentLowerUpperDigit(parent_node);
if (result < 0) {
if (language_model_debug_level > 0) {
tprintf("No parents found to process\n");
}
return false;
}
if (result > 0) {
has_alnum_mix = true;
}
}
if (!GetTopLowerUpperDigit(curr_list, &first_lower, &first_upper, &first_digit)) {
has_alnum_mix = false;
};
ScanParentsForCaseMix(unicharset, parent_node);
if (language_model_debug_level > 3 && parent_node != nullptr) {
parent_node->Print("Parent viterbi list");
}
LanguageModelState *curr_state = best_choice_bundle->beam[curr_row];
// Call AddViterbiStateEntry() for each parent+child ViterbiStateEntry.
ViterbiStateEntry_IT vit;
BLOB_CHOICE_IT c_it(curr_list);
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
BLOB_CHOICE *choice = c_it.data();
// TODO(antonova): make sure commenting this out if ok for ngram
// model scoring (I think this was introduced to fix ngram model quirks).
// Skip nullptr unichars unless it is the only choice.
// if (!curr_list->singleton() && c_it.data()->unichar_id() == 0) continue;
UNICHAR_ID unichar_id = choice->unichar_id();
if (unicharset.get_fragment(unichar_id)) {
continue; // Skip fragments.
}
// Set top choice flags.
LanguageModelFlagsType blob_choice_flags = kXhtConsistentFlag;
if (c_it.at_first() || !new_changed) {
blob_choice_flags |= kSmallestRatingFlag;
}
if (first_lower == choice) {
blob_choice_flags |= kLowerCaseFlag;
}
if (first_upper == choice) {
blob_choice_flags |= kUpperCaseFlag;
}
if (first_digit == choice) {
blob_choice_flags |= kDigitFlag;
}
if (parent_node == nullptr) {
// Process the beginning of a word.
// If there is a better case variant that is not distinguished by size,
// skip this blob choice, as we have no choice but to accept the result
// of the character classifier to distinguish between them, even if
// followed by an upper case.
// With words like iPoc, and other CamelBackWords, the lower-upper
// transition can only be achieved if the classifier has the correct case
// as the top choice, and leaving an initial I lower down the list
// increases the chances of choosing IPoc simply because it doesn't
// include such a transition. iPoc will beat iPOC and ipoc because
// the other words are baseline/x-height inconsistent.
if (HasBetterCaseVariant(unicharset, choice, curr_list)) {
continue;
}
// Upper counts as lower at the beginning of a word.
if (blob_choice_flags & kUpperCaseFlag) {
blob_choice_flags |= kLowerCaseFlag;
}
new_changed |= AddViterbiStateEntry(blob_choice_flags, denom, word_end, curr_col, curr_row,
choice, curr_state, nullptr, pain_points, word_res,
best_choice_bundle, blamer_bundle);
} else {
// Get viterbi entries from each parent ViterbiStateEntry.
vit.set_to_list(&parent_node->viterbi_state_entries);
int vit_counter = 0;
vit.mark_cycle_pt();
ViterbiStateEntry *parent_vse = nullptr;
LanguageModelFlagsType top_choice_flags;
while ((parent_vse =
GetNextParentVSE(just_classified, has_alnum_mix, c_it.data(), blob_choice_flags,
unicharset, word_res, &vit, &top_choice_flags)) != nullptr) {
// Skip pruned entries and do not look at prunable entries if already
// examined language_model_viterbi_list_max_num_prunable of those.
if (PrunablePath(*parent_vse) &&
(++vit_counter > language_model_viterbi_list_max_num_prunable ||
(language_model_ngram_on && parent_vse->ngram_info->pruned))) {
continue;
}
// If the parent has no alnum choice, (ie choice is the first in a
// string of alnum), and there is a better case variant that is not
// distinguished by size, skip this blob choice/parent, as with the
// initial blob treatment above.
if (!parent_vse->HasAlnumChoice(unicharset) &&
HasBetterCaseVariant(unicharset, choice, curr_list)) {
continue;
}
// Create a new ViterbiStateEntry if BLOB_CHOICE in c_it.data()
// looks good according to the Dawgs or character ngram model.
new_changed |= AddViterbiStateEntry(top_choice_flags, denom, word_end, curr_col, curr_row,
c_it.data(), curr_state, parent_vse, pain_points,
word_res, best_choice_bundle, blamer_bundle);
}
}
}
return new_changed;
}
/**
* Finds the first lower and upper case letter and first digit in curr_list.
* For non-upper/lower languages, alpha counts as upper.
* Uses the first character in the list in place of empty results.
* Returns true if both alpha and digits are found.
*/
bool LanguageModel::GetTopLowerUpperDigit(BLOB_CHOICE_LIST *curr_list, BLOB_CHOICE **first_lower,
BLOB_CHOICE **first_upper,
BLOB_CHOICE **first_digit) const {
BLOB_CHOICE_IT c_it(curr_list);
const UNICHARSET &unicharset = dict_->getUnicharset();
BLOB_CHOICE *first_unichar = nullptr;
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
UNICHAR_ID unichar_id = c_it.data()->unichar_id();
if (unicharset.get_fragment(unichar_id)) {
continue; // skip fragments
}
if (first_unichar == nullptr) {
first_unichar = c_it.data();
}
if (*first_lower == nullptr && unicharset.get_islower(unichar_id)) {
*first_lower = c_it.data();
}
if (*first_upper == nullptr && unicharset.get_isalpha(unichar_id) &&
!unicharset.get_islower(unichar_id)) {
*first_upper = c_it.data();
}
if (*first_digit == nullptr && unicharset.get_isdigit(unichar_id)) {
*first_digit = c_it.data();
}
}
ASSERT_HOST(first_unichar != nullptr);
bool mixed = (*first_lower != nullptr || *first_upper != nullptr) && *first_digit != nullptr;
if (*first_lower == nullptr) {
*first_lower = first_unichar;
}
if (*first_upper == nullptr) {
*first_upper = first_unichar;
}
if (*first_digit == nullptr) {
*first_digit = first_unichar;
}
return mixed;
}
/**
* Forces there to be at least one entry in the overall set of the
* viterbi_state_entries of each element of parent_node that has the
* top_choice_flag set for lower, upper and digit using the same rules as
* GetTopLowerUpperDigit, setting the flag on the first found suitable
* candidate, whether or not the flag is set on some other parent.
* Returns 1 if both alpha and digits are found among the parents, -1 if no
* parents are found at all (a legitimate case), and 0 otherwise.
*/
int LanguageModel::SetTopParentLowerUpperDigit(LanguageModelState *parent_node) const {
if (parent_node == nullptr) {
return -1;
}
UNICHAR_ID top_id = INVALID_UNICHAR_ID;
ViterbiStateEntry *top_lower = nullptr;
ViterbiStateEntry *top_upper = nullptr;
ViterbiStateEntry *top_digit = nullptr;
ViterbiStateEntry *top_choice = nullptr;
float lower_rating = 0.0f;
float upper_rating = 0.0f;
float digit_rating = 0.0f;
float top_rating = 0.0f;
const UNICHARSET &unicharset = dict_->getUnicharset();
ViterbiStateEntry_IT vit(&parent_node->viterbi_state_entries);
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
ViterbiStateEntry *vse = vit.data();
// INVALID_UNICHAR_ID should be treated like a zero-width joiner, so scan
// back to the real character if needed.
ViterbiStateEntry *unichar_vse = vse;
UNICHAR_ID unichar_id = unichar_vse->curr_b->unichar_id();
float rating = unichar_vse->curr_b->rating();
while (unichar_id == INVALID_UNICHAR_ID && unichar_vse->parent_vse != nullptr) {
unichar_vse = unichar_vse->parent_vse;
unichar_id = unichar_vse->curr_b->unichar_id();
rating = unichar_vse->curr_b->rating();
}
if (unichar_id != INVALID_UNICHAR_ID) {
if (unicharset.get_islower(unichar_id)) {
if (top_lower == nullptr || lower_rating > rating) {
top_lower = vse;
lower_rating = rating;
}
} else if (unicharset.get_isalpha(unichar_id)) {
if (top_upper == nullptr || upper_rating > rating) {
top_upper = vse;
upper_rating = rating;
}
} else if (unicharset.get_isdigit(unichar_id)) {
if (top_digit == nullptr || digit_rating > rating) {
top_digit = vse;
digit_rating = rating;
}
}
}
if (top_choice == nullptr || top_rating > rating) {
top_choice = vse;
top_rating = rating;
top_id = unichar_id;
}
}
if (top_choice == nullptr) {
return -1;
}
bool mixed = (top_lower != nullptr || top_upper != nullptr) && top_digit != nullptr;
if (top_lower == nullptr) {
top_lower = top_choice;
}
top_lower->top_choice_flags |= kLowerCaseFlag;
if (top_upper == nullptr) {
top_upper = top_choice;
}
top_upper->top_choice_flags |= kUpperCaseFlag;
if (top_digit == nullptr) {
top_digit = top_choice;
}
top_digit->top_choice_flags |= kDigitFlag;
top_choice->top_choice_flags |= kSmallestRatingFlag;
if (top_id != INVALID_UNICHAR_ID && dict_->compound_marker(top_id) &&
(top_choice->top_choice_flags & (kLowerCaseFlag | kUpperCaseFlag | kDigitFlag))) {
// If the compound marker top choice carries any of the top alnum flags,
// then give it all of them, allowing words like I-295 to be chosen.
top_choice->top_choice_flags |= kLowerCaseFlag | kUpperCaseFlag | kDigitFlag;
}
return mixed ? 1 : 0;
}
/**
* Finds the next ViterbiStateEntry with which the given unichar_id can
* combine sensibly, taking into account any mixed alnum/mixed case
* situation, and whether this combination has been inspected before.
*/
ViterbiStateEntry *LanguageModel::GetNextParentVSE(bool just_classified, bool mixed_alnum,
const BLOB_CHOICE *bc,
LanguageModelFlagsType blob_choice_flags,
const UNICHARSET &unicharset, WERD_RES *word_res,
ViterbiStateEntry_IT *vse_it,
LanguageModelFlagsType *top_choice_flags) const {
for (; !vse_it->cycled_list(); vse_it->forward()) {
ViterbiStateEntry *parent_vse = vse_it->data();
// Only consider the parent if it has been updated or
// if the current ratings cell has just been classified.
if (!just_classified && !parent_vse->updated) {
continue;
}
if (language_model_debug_level > 2) {
parent_vse->Print("Considering");
}
// If the parent is non-alnum, then upper counts as lower.
*top_choice_flags = blob_choice_flags;
if ((blob_choice_flags & kUpperCaseFlag) && !parent_vse->HasAlnumChoice(unicharset)) {
*top_choice_flags |= kLowerCaseFlag;
}
*top_choice_flags &= parent_vse->top_choice_flags;
UNICHAR_ID unichar_id = bc->unichar_id();
const BLOB_CHOICE *parent_b = parent_vse->curr_b;
UNICHAR_ID parent_id = parent_b->unichar_id();
// Digits do not bind to alphas if there is a mix in both parent and current
// or if the alpha is not the top choice.
if (unicharset.get_isdigit(unichar_id) && unicharset.get_isalpha(parent_id) &&
(mixed_alnum || *top_choice_flags == 0)) {
continue; // Digits don't bind to alphas.
}
// Likewise alphas do not bind to digits if there is a mix in both or if
// the digit is not the top choice.
if (unicharset.get_isalpha(unichar_id) && unicharset.get_isdigit(parent_id) &&
(mixed_alnum || *top_choice_flags == 0)) {
continue; // Alphas don't bind to digits.
}
// If there is a case mix of the same alpha in the parent list, then
// competing_vse is non-null and will be used to determine whether
// or not to bind the current blob choice.
if (parent_vse->competing_vse != nullptr) {
const BLOB_CHOICE *competing_b = parent_vse->competing_vse->curr_b;
UNICHAR_ID other_id = competing_b->unichar_id();
if (language_model_debug_level >= 5) {
tprintf("Parent %s has competition %s\n", unicharset.id_to_unichar(parent_id),
unicharset.id_to_unichar(other_id));
}
if (unicharset.SizesDistinct(parent_id, other_id)) {
// If other_id matches bc wrt position and size, and parent_id, doesn't,
// don't bind to the current parent.
if (bc->PosAndSizeAgree(*competing_b, word_res->x_height,
language_model_debug_level >= 5) &&
!bc->PosAndSizeAgree(*parent_b, word_res->x_height, language_model_debug_level >= 5)) {
continue; // Competing blobchoice has a better vertical match.
}
}
}
vse_it->forward();
return parent_vse; // This one is good!
}
return nullptr; // Ran out of possibilities.
}
bool LanguageModel::AddViterbiStateEntry(LanguageModelFlagsType top_choice_flags, float denom,
bool word_end, int curr_col, int curr_row, BLOB_CHOICE *b,
LanguageModelState *curr_state,
ViterbiStateEntry *parent_vse, LMPainPoints *pain_points,
WERD_RES *word_res, BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
ViterbiStateEntry_IT vit;
if (language_model_debug_level > 1) {
tprintf(
"AddViterbiStateEntry for unichar %s rating=%.4f"
" certainty=%.4f top_choice_flags=0x%x",
dict_->getUnicharset().id_to_unichar(b->unichar_id()), b->rating(), b->certainty(),
top_choice_flags);
if (language_model_debug_level > 5) {
tprintf(" parent_vse=%p\n", static_cast<void *>(parent_vse));
} else {
tprintf("\n");
}
}
ASSERT_HOST(curr_state != nullptr);
// Check whether the list is full.
if (curr_state->viterbi_state_entries_length >= language_model_viterbi_list_max_size) {
if (language_model_debug_level > 1) {
tprintf("AddViterbiStateEntry: viterbi list is full!\n");
}
return false;
}
// Invoke Dawg language model component.
LanguageModelDawgInfo *dawg_info = GenerateDawgInfo(word_end, curr_col, curr_row, *b, parent_vse);
float outline_length = AssociateUtils::ComputeOutlineLength(rating_cert_scale_, *b);
// Invoke Ngram language model component.
LanguageModelNgramInfo *ngram_info = nullptr;
if (language_model_ngram_on) {
ngram_info =
GenerateNgramInfo(dict_->getUnicharset().id_to_unichar(b->unichar_id()), b->certainty(),
denom, curr_col, curr_row, outline_length, parent_vse);
ASSERT_HOST(ngram_info != nullptr);
}
bool liked_by_language_model =
dawg_info != nullptr || (ngram_info != nullptr && !ngram_info->pruned);
// Quick escape if not liked by the language model, can't be consistent
// xheight, and not top choice.
if (!liked_by_language_model && top_choice_flags == 0) {
if (language_model_debug_level > 1) {
tprintf("Language model components very early pruned this entry\n");
}
delete ngram_info;
delete dawg_info;
return false;
}
// Check consistency of the path and set the relevant consistency_info.
LMConsistencyInfo consistency_info(parent_vse != nullptr ? &parent_vse->consistency_info
: nullptr);
// Start with just the x-height consistency, as it provides significant
// pruning opportunity.
consistency_info.ComputeXheightConsistency(
b, dict_->getUnicharset().get_ispunctuation(b->unichar_id()));
// Turn off xheight consistent flag if not consistent.
if (consistency_info.InconsistentXHeight()) {
top_choice_flags &= ~kXhtConsistentFlag;
}
// Quick escape if not liked by the language model, not consistent xheight,
// and not top choice.
if (!liked_by_language_model && top_choice_flags == 0) {
if (language_model_debug_level > 1) {
tprintf("Language model components early pruned this entry\n");
}
delete ngram_info;
delete dawg_info;
return false;
}
// Compute the rest of the consistency info.
FillConsistencyInfo(curr_col, word_end, b, parent_vse, word_res, &consistency_info);
if (dawg_info != nullptr && consistency_info.invalid_punc) {
consistency_info.invalid_punc = false; // do not penalize dict words
}
// Compute cost of associating the blobs that represent the current unichar.
AssociateStats associate_stats;
ComputeAssociateStats(curr_col, curr_row, max_char_wh_ratio_, parent_vse, word_res,
&associate_stats);
if (parent_vse != nullptr) {
associate_stats.shape_cost += parent_vse->associate_stats.shape_cost;
associate_stats.bad_shape |= parent_vse->associate_stats.bad_shape;
}
// Create the new ViterbiStateEntry compute the adjusted cost of the path.
auto *new_vse = new ViterbiStateEntry(parent_vse, b, 0.0, outline_length, consistency_info,
associate_stats, top_choice_flags, dawg_info, ngram_info,
(language_model_debug_level > 0)
? dict_->getUnicharset().id_to_unichar(b->unichar_id())
: nullptr);
new_vse->cost = ComputeAdjustedPathCost(new_vse);
if (language_model_debug_level >= 3) {
tprintf("Adjusted cost = %g\n", new_vse->cost);
}
// Invoke Top Choice language model component to make the final adjustments
// to new_vse->top_choice_flags.
if (!curr_state->viterbi_state_entries.empty() && new_vse->top_choice_flags) {
GenerateTopChoiceInfo(new_vse, parent_vse, curr_state);
}
// If language model components did not like this unichar - return.
bool keep = new_vse->top_choice_flags || liked_by_language_model;
if (!(top_choice_flags & kSmallestRatingFlag) && // no non-top choice paths
consistency_info.inconsistent_script) { // with inconsistent script
keep = false;
}
if (!keep) {
if (language_model_debug_level > 1) {
tprintf("Language model components did not like this entry\n");
}
delete new_vse;
return false;
}
// Discard this entry if it represents a prunable path and
// language_model_viterbi_list_max_num_prunable such entries with a lower
// cost have already been recorded.
if (PrunablePath(*new_vse) &&
(curr_state->viterbi_state_entries_prunable_length >=
language_model_viterbi_list_max_num_prunable) &&
new_vse->cost >= curr_state->viterbi_state_entries_prunable_max_cost) {
if (language_model_debug_level > 1) {
tprintf("Discarded ViterbiEntry with high cost %g max cost %g\n", new_vse->cost,
curr_state->viterbi_state_entries_prunable_max_cost);
}
delete new_vse;
return false;
}
// Update best choice if needed.
if (word_end) {
UpdateBestChoice(new_vse, pain_points, word_res, best_choice_bundle, blamer_bundle);
// Discard the entry if UpdateBestChoice() found flaws in it.
if (new_vse->cost >= WERD_CHOICE::kBadRating && new_vse != best_choice_bundle->best_vse) {
if (language_model_debug_level > 1) {
tprintf("Discarded ViterbiEntry with high cost %g\n", new_vse->cost);
}
delete new_vse;
return false;
}
}
// Add the new ViterbiStateEntry and to curr_state->viterbi_state_entries.
curr_state->viterbi_state_entries.add_sorted(ViterbiStateEntry::Compare, false, new_vse);
curr_state->viterbi_state_entries_length++;
if (PrunablePath(*new_vse)) {
curr_state->viterbi_state_entries_prunable_length++;
}
// Update lms->viterbi_state_entries_prunable_max_cost and clear
// top_choice_flags of entries with ratings_sum than new_vse->ratings_sum.
if ((curr_state->viterbi_state_entries_prunable_length >=
language_model_viterbi_list_max_num_prunable) ||
new_vse->top_choice_flags) {
ASSERT_HOST(!curr_state->viterbi_state_entries.empty());
int prunable_counter = language_model_viterbi_list_max_num_prunable;
vit.set_to_list(&(curr_state->viterbi_state_entries));
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
ViterbiStateEntry *curr_vse = vit.data();
// Clear the appropriate top choice flags of the entries in the
// list that have cost higher thank new_entry->cost
// (since they will not be top choices any more).
if (curr_vse->top_choice_flags && curr_vse != new_vse && curr_vse->cost > new_vse->cost) {
curr_vse->top_choice_flags &= ~(new_vse->top_choice_flags);
}
if (prunable_counter > 0 && PrunablePath(*curr_vse)) {
--prunable_counter;
}
// Update curr_state->viterbi_state_entries_prunable_max_cost.
if (prunable_counter == 0) {
curr_state->viterbi_state_entries_prunable_max_cost = vit.data()->cost;
if (language_model_debug_level > 1) {
tprintf("Set viterbi_state_entries_prunable_max_cost to %g\n",
curr_state->viterbi_state_entries_prunable_max_cost);
}
prunable_counter = -1; // stop counting
}
}
}
// Print the newly created ViterbiStateEntry.
if (language_model_debug_level > 2) {
new_vse->Print("New");
if (language_model_debug_level > 5) {
curr_state->Print("Updated viterbi list");
}
}
return true;
}
void LanguageModel::GenerateTopChoiceInfo(ViterbiStateEntry *new_vse,
const ViterbiStateEntry *parent_vse,
LanguageModelState *lms) {
ViterbiStateEntry_IT vit(&(lms->viterbi_state_entries));
for (vit.mark_cycle_pt();
!vit.cycled_list() && new_vse->top_choice_flags && new_vse->cost >= vit.data()->cost;
vit.forward()) {
// Clear the appropriate flags if the list already contains
// a top choice entry with a lower cost.
new_vse->top_choice_flags &= ~(vit.data()->top_choice_flags);
}
if (language_model_debug_level > 2) {
tprintf("GenerateTopChoiceInfo: top_choice_flags=0x%x\n", new_vse->top_choice_flags);
}
}
LanguageModelDawgInfo *LanguageModel::GenerateDawgInfo(bool word_end, int curr_col, int curr_row,
const BLOB_CHOICE &b,
const ViterbiStateEntry *parent_vse) {
// Initialize active_dawgs from parent_vse if it is not nullptr.
// Otherwise use very_beginning_active_dawgs_.
if (parent_vse == nullptr) {
dawg_args_.active_dawgs = &very_beginning_active_dawgs_;
dawg_args_.permuter = NO_PERM;
} else {
if (parent_vse->dawg_info == nullptr) {
return nullptr; // not a dict word path
}
dawg_args_.active_dawgs = &parent_vse->dawg_info->active_dawgs;
dawg_args_.permuter = parent_vse->dawg_info->permuter;
}
// Deal with hyphenated words.
if (word_end && dict_->has_hyphen_end(&dict_->getUnicharset(), b.unichar_id(), curr_col == 0)) {
if (language_model_debug_level > 0) {
tprintf("Hyphenated word found\n");
}
return new LanguageModelDawgInfo(dawg_args_.active_dawgs, COMPOUND_PERM);
}
// Deal with compound words.
if (dict_->compound_marker(b.unichar_id()) &&
(parent_vse == nullptr || parent_vse->dawg_info->permuter != NUMBER_PERM)) {
if (language_model_debug_level > 0) {
tprintf("Found compound marker\n");
}
// Do not allow compound operators at the beginning and end of the word.
// Do not allow more than one compound operator per word.
// Do not allow compounding of words with lengths shorter than
// language_model_min_compound_length
if (parent_vse == nullptr || word_end || dawg_args_.permuter == COMPOUND_PERM ||
parent_vse->length < language_model_min_compound_length) {
return nullptr;
}
// Check that the path terminated before the current character is a word.
bool has_word_ending = false;
for (unsigned i = 0; i < parent_vse->dawg_info->active_dawgs.size(); ++i) {
const DawgPosition &pos = parent_vse->dawg_info->active_dawgs[i];
const Dawg *pdawg = pos.dawg_index < 0 ? nullptr : dict_->GetDawg(pos.dawg_index);
if (pdawg == nullptr || pos.back_to_punc) {
continue;
};
if (pdawg->type() == DAWG_TYPE_WORD && pos.dawg_ref != NO_EDGE &&
pdawg->end_of_word(pos.dawg_ref)) {
has_word_ending = true;
break;
}
}
if (!has_word_ending) {
return nullptr;
}
if (language_model_debug_level > 0) {
tprintf("Compound word found\n");
}
return new LanguageModelDawgInfo(&beginning_active_dawgs_, COMPOUND_PERM);
} // done dealing with compound words
LanguageModelDawgInfo *dawg_info = nullptr;
// Call LetterIsOkay().
// Use the normalized IDs so that all shapes of ' can be allowed in words
// like don't.
const auto &normed_ids = dict_->getUnicharset().normed_ids(b.unichar_id());
DawgPositionVector tmp_active_dawgs;
for (unsigned i = 0; i < normed_ids.size(); ++i) {
if (language_model_debug_level > 2) {
tprintf("Test Letter OK for unichar %d, normed %d\n", b.unichar_id(), normed_ids[i]);
}
dict_->LetterIsOkay(&dawg_args_, dict_->getUnicharset(), normed_ids[i],
word_end && i == normed_ids.size() - 1);
if (dawg_args_.permuter == NO_PERM) {
break;
} else if (i < normed_ids.size() - 1) {
tmp_active_dawgs = *dawg_args_.updated_dawgs;
dawg_args_.active_dawgs = &tmp_active_dawgs;
}
if (language_model_debug_level > 2) {
tprintf("Letter was OK for unichar %d, normed %d\n", b.unichar_id(), normed_ids[i]);
}
}
dawg_args_.active_dawgs = nullptr;
if (dawg_args_.permuter != NO_PERM) {
dawg_info = new LanguageModelDawgInfo(dawg_args_.updated_dawgs, dawg_args_.permuter);
} else if (language_model_debug_level > 3) {
tprintf("Letter %s not OK!\n", dict_->getUnicharset().id_to_unichar(b.unichar_id()));
}
return dawg_info;
}
LanguageModelNgramInfo *LanguageModel::GenerateNgramInfo(const char *unichar, float certainty,
float denom, int curr_col, int curr_row,
float outline_length,
const ViterbiStateEntry *parent_vse) {
// Initialize parent context.
const char *pcontext_ptr = "";
int pcontext_unichar_step_len = 0;
if (parent_vse == nullptr) {
pcontext_ptr = prev_word_str_.c_str();
pcontext_unichar_step_len = prev_word_unichar_step_len_;
} else {
pcontext_ptr = parent_vse->ngram_info->context.c_str();
pcontext_unichar_step_len = parent_vse->ngram_info->context_unichar_step_len;
}
// Compute p(unichar | parent context).
int unichar_step_len = 0;
bool pruned = false;
float ngram_cost;
float ngram_and_classifier_cost = ComputeNgramCost(unichar, certainty, denom, pcontext_ptr,
&unichar_step_len, &pruned, &ngram_cost);
// Normalize just the ngram_and_classifier_cost by outline_length.
// The ngram_cost is used by the params_model, so it needs to be left as-is,
// and the params model cost will be normalized by outline_length.
ngram_and_classifier_cost *= outline_length / language_model_ngram_rating_factor;
// Add the ngram_cost of the parent.
if (parent_vse != nullptr) {
ngram_and_classifier_cost += parent_vse->ngram_info->ngram_and_classifier_cost;
ngram_cost += parent_vse->ngram_info->ngram_cost;
}
// Shorten parent context string by unichar_step_len unichars.
int num_remove = (unichar_step_len + pcontext_unichar_step_len - language_model_ngram_order);
if (num_remove > 0) {
pcontext_unichar_step_len -= num_remove;
}
while (num_remove > 0 && *pcontext_ptr != '\0') {
pcontext_ptr += UNICHAR::utf8_step(pcontext_ptr);
--num_remove;
}
// Decide whether to prune this ngram path and update changed accordingly.
if (parent_vse != nullptr && parent_vse->ngram_info->pruned) {
pruned = true;
}
// Construct and return the new LanguageModelNgramInfo.
auto *ngram_info = new LanguageModelNgramInfo(pcontext_ptr, pcontext_unichar_step_len, pruned,
ngram_cost, ngram_and_classifier_cost);
ngram_info->context += unichar;
ngram_info->context_unichar_step_len += unichar_step_len;
assert(ngram_info->context_unichar_step_len <= language_model_ngram_order);
return ngram_info;
}
float LanguageModel::ComputeNgramCost(const char *unichar, float certainty, float denom,
const char *context, int *unichar_step_len,
bool *found_small_prob, float *ngram_cost) {
const char *context_ptr = context;
char *modified_context = nullptr;
char *modified_context_end = nullptr;
const char *unichar_ptr = unichar;
const char *unichar_end = unichar_ptr + strlen(unichar_ptr);
float prob = 0.0f;
int step = 0;
while (unichar_ptr < unichar_end && (step = UNICHAR::utf8_step(unichar_ptr)) > 0) {
if (language_model_debug_level > 1) {
tprintf("prob(%s | %s)=%g\n", unichar_ptr, context_ptr,
dict_->ProbabilityInContext(context_ptr, -1, unichar_ptr, step));
}
prob += dict_->ProbabilityInContext(context_ptr, -1, unichar_ptr, step);
++(*unichar_step_len);
if (language_model_ngram_use_only_first_uft8_step) {
break;
}
unichar_ptr += step;
// If there are multiple UTF8 characters present in unichar, context is
// updated to include the previously examined characters from str,
// unless use_only_first_uft8_step is true.
if (unichar_ptr < unichar_end) {
if (modified_context == nullptr) {
size_t context_len = strlen(context);
modified_context = new char[context_len + strlen(unichar_ptr) + step + 1];
memcpy(modified_context, context, context_len);
modified_context_end = modified_context + context_len;
context_ptr = modified_context;
}
strncpy(modified_context_end, unichar_ptr - step, step);
modified_context_end += step;
*modified_context_end = '\0';
}
}
prob /= static_cast<float>(*unichar_step_len); // normalize
if (prob < language_model_ngram_small_prob) {
if (language_model_debug_level > 0) {
tprintf("Found small prob %g\n", prob);
}
*found_small_prob = true;
prob = language_model_ngram_small_prob;
}
*ngram_cost = -1 * std::log2(prob);
float ngram_and_classifier_cost = -1 * std::log2(CertaintyScore(certainty) / denom) +
*ngram_cost * language_model_ngram_scale_factor;
if (language_model_debug_level > 1) {
tprintf("-log [ p(%s) * p(%s | %s) ] = -log2(%g*%g) = %g\n", unichar, unichar, context_ptr,
CertaintyScore(certainty) / denom, prob, ngram_and_classifier_cost);
}
delete[] modified_context;
return ngram_and_classifier_cost;
}
float LanguageModel::ComputeDenom(BLOB_CHOICE_LIST *curr_list) {
if (curr_list->empty()) {
return 1.0f;
}
float denom = 0.0f;
int len = 0;
BLOB_CHOICE_IT c_it(curr_list);
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
ASSERT_HOST(c_it.data() != nullptr);
++len;
denom += CertaintyScore(c_it.data()->certainty());
}
assert(len != 0);
// The ideal situation would be to have the classifier scores for
// classifying each position as each of the characters in the unicharset.
// Since we cannot do this because of speed, we add a very crude estimate
// of what these scores for the "missing" classifications would sum up to.
denom +=
(dict_->getUnicharset().size() - len) * CertaintyScore(language_model_ngram_nonmatch_score);
return denom;
}
void LanguageModel::FillConsistencyInfo(int curr_col, bool word_end, BLOB_CHOICE *b,
ViterbiStateEntry *parent_vse, WERD_RES *word_res,
LMConsistencyInfo *consistency_info) {
const UNICHARSET &unicharset = dict_->getUnicharset();
UNICHAR_ID unichar_id = b->unichar_id();
BLOB_CHOICE *parent_b = parent_vse != nullptr ? parent_vse->curr_b : nullptr;
// Check punctuation validity.
if (unicharset.get_ispunctuation(unichar_id)) {
consistency_info->num_punc++;
}
if (dict_->GetPuncDawg() != nullptr && !consistency_info->invalid_punc) {
if (dict_->compound_marker(unichar_id) && parent_b != nullptr &&
(unicharset.get_isalpha(parent_b->unichar_id()) ||
unicharset.get_isdigit(parent_b->unichar_id()))) {
// reset punc_ref for compound words
consistency_info->punc_ref = NO_EDGE;
} else {
bool is_apos = dict_->is_apostrophe(unichar_id);
bool prev_is_numalpha =
(parent_b != nullptr && (unicharset.get_isalpha(parent_b->unichar_id()) ||
unicharset.get_isdigit(parent_b->unichar_id())));
UNICHAR_ID pattern_unichar_id =
(unicharset.get_isalpha(unichar_id) || unicharset.get_isdigit(unichar_id) ||
(is_apos && prev_is_numalpha))
? Dawg::kPatternUnicharID
: unichar_id;
if (consistency_info->punc_ref == NO_EDGE || pattern_unichar_id != Dawg::kPatternUnicharID ||
dict_->GetPuncDawg()->edge_letter(consistency_info->punc_ref) !=
Dawg::kPatternUnicharID) {
NODE_REF node = Dict::GetStartingNode(dict_->GetPuncDawg(), consistency_info->punc_ref);
consistency_info->punc_ref = (node != NO_EDGE) ? dict_->GetPuncDawg()->edge_char_of(
node, pattern_unichar_id, word_end)
: NO_EDGE;
if (consistency_info->punc_ref == NO_EDGE) {
consistency_info->invalid_punc = true;
}
}
}
}
// Update case related counters.
if (parent_vse != nullptr && !word_end && dict_->compound_marker(unichar_id)) {
// Reset counters if we are dealing with a compound word.
consistency_info->num_lower = 0;
consistency_info->num_non_first_upper = 0;
} else if (unicharset.get_islower(unichar_id)) {
consistency_info->num_lower++;
} else if ((parent_b != nullptr) && unicharset.get_isupper(unichar_id)) {
if (unicharset.get_isupper(parent_b->unichar_id()) || consistency_info->num_lower > 0 ||
consistency_info->num_non_first_upper > 0) {
consistency_info->num_non_first_upper++;
}
}
// Initialize consistency_info->script_id (use script of unichar_id
// if it is not Common, use script id recorded by the parent otherwise).
// Set inconsistent_script to true if the script of the current unichar
// is not consistent with that of the parent.
consistency_info->script_id = unicharset.get_script(unichar_id);
// Hiragana and Katakana can mix with Han.
if (dict_->getUnicharset().han_sid() != dict_->getUnicharset().null_sid()) {
if ((unicharset.hiragana_sid() != unicharset.null_sid() &&
consistency_info->script_id == unicharset.hiragana_sid()) ||
(unicharset.katakana_sid() != unicharset.null_sid() &&
consistency_info->script_id == unicharset.katakana_sid())) {
consistency_info->script_id = dict_->getUnicharset().han_sid();
}
}
if (parent_vse != nullptr &&
(parent_vse->consistency_info.script_id != dict_->getUnicharset().common_sid())) {
int parent_script_id = parent_vse->consistency_info.script_id;
// If script_id is Common, use script id of the parent instead.
if (consistency_info->script_id == dict_->getUnicharset().common_sid()) {
consistency_info->script_id = parent_script_id;
}
if (consistency_info->script_id != parent_script_id) {
consistency_info->inconsistent_script = true;
}
}
// Update chartype related counters.
if (unicharset.get_isalpha(unichar_id)) {
consistency_info->num_alphas++;
} else if (unicharset.get_isdigit(unichar_id)) {
consistency_info->num_digits++;
} else if (!unicharset.get_ispunctuation(unichar_id)) {
consistency_info->num_other++;
}
// Check font and spacing consistency.
if (fontinfo_table_->size() > 0 && parent_b != nullptr) {
int fontinfo_id = -1;
if (parent_b->fontinfo_id() == b->fontinfo_id() ||
parent_b->fontinfo_id2() == b->fontinfo_id()) {
fontinfo_id = b->fontinfo_id();
} else if (parent_b->fontinfo_id() == b->fontinfo_id2() ||
parent_b->fontinfo_id2() == b->fontinfo_id2()) {
fontinfo_id = b->fontinfo_id2();
}
if (language_model_debug_level > 1) {
tprintf(
"pfont %s pfont %s font %s font2 %s common %s(%d)\n",
(parent_b->fontinfo_id() >= 0) ? fontinfo_table_->at(parent_b->fontinfo_id()).name : "",
(parent_b->fontinfo_id2() >= 0) ? fontinfo_table_->at(parent_b->fontinfo_id2()).name
: "",
(b->fontinfo_id() >= 0) ? fontinfo_table_->at(b->fontinfo_id()).name : "",
(fontinfo_id >= 0) ? fontinfo_table_->at(fontinfo_id).name : "",
(fontinfo_id >= 0) ? fontinfo_table_->at(fontinfo_id).name : "", fontinfo_id);
}
if (!word_res->blob_widths.empty()) { // if we have widths/gaps info
bool expected_gap_found = false;
float expected_gap = 0.0f;
int temp_gap;
if (fontinfo_id >= 0) { // found a common font
ASSERT_HOST(fontinfo_id < fontinfo_table_->size());
if (fontinfo_table_->at(fontinfo_id)
.get_spacing(parent_b->unichar_id(), unichar_id, &temp_gap)) {
expected_gap = temp_gap;
expected_gap_found = true;
}
} else {
consistency_info->inconsistent_font = true;
// Get an average of the expected gaps in each font
int num_addends = 0;
int temp_fid;
for (int i = 0; i < 4; ++i) {
if (i == 0) {
temp_fid = parent_b->fontinfo_id();
} else if (i == 1) {
temp_fid = parent_b->fontinfo_id2();
} else if (i == 2) {
temp_fid = b->fontinfo_id();
} else {
temp_fid = b->fontinfo_id2();
}
ASSERT_HOST(temp_fid < 0 || fontinfo_table_->size());
if (temp_fid >= 0 && fontinfo_table_->at(temp_fid).get_spacing(parent_b->unichar_id(),
unichar_id, &temp_gap)) {
expected_gap += temp_gap;
num_addends++;
}
}
if (num_addends > 0) {
expected_gap /= static_cast<float>(num_addends);
expected_gap_found = true;
}
}
if (expected_gap_found) {
int actual_gap = word_res->GetBlobsGap(curr_col - 1);
if (actual_gap == 0) {
consistency_info->num_inconsistent_spaces++;
} else {
float gap_ratio = expected_gap / actual_gap;
// TODO(rays) The gaps seem to be way off most of the time, saved by
// the error here that the ratio was compared to 1/2, when it should
// have been 0.5f. Find the source of the gaps discrepancy and put
// the 0.5f here in place of 0.0f.
// Test on 2476595.sj, pages 0 to 6. (In French.)
if (gap_ratio < 0.0f || gap_ratio > 2.0f) {
consistency_info->num_inconsistent_spaces++;
}
}
if (language_model_debug_level > 1) {
tprintf("spacing for %s(%d) %s(%d) col %d: expected %g actual %d\n",
unicharset.id_to_unichar(parent_b->unichar_id()), parent_b->unichar_id(),
unicharset.id_to_unichar(unichar_id), unichar_id, curr_col, expected_gap,
actual_gap);
}
}
}
}
}
float LanguageModel::ComputeAdjustedPathCost(ViterbiStateEntry *vse) {
ASSERT_HOST(vse != nullptr);
if (params_model_.Initialized()) {
float features[PTRAIN_NUM_FEATURE_TYPES];
ExtractFeaturesFromPath(*vse, features);
float cost = params_model_.ComputeCost(features);
if (language_model_debug_level > 3) {
tprintf("ComputeAdjustedPathCost %g ParamsModel features:\n", cost);
if (language_model_debug_level >= 5) {
for (int f = 0; f < PTRAIN_NUM_FEATURE_TYPES; ++f) {
tprintf("%s=%g\n", kParamsTrainingFeatureTypeName[f], features[f]);
}
}
}
return cost * vse->outline_length;
} else {
float adjustment = 1.0f;
if (vse->dawg_info == nullptr || vse->dawg_info->permuter != FREQ_DAWG_PERM) {
adjustment += language_model_penalty_non_freq_dict_word;
}
if (vse->dawg_info == nullptr) {
adjustment += language_model_penalty_non_dict_word;
if (vse->length > language_model_min_compound_length) {
adjustment +=
((vse->length - language_model_min_compound_length) * language_model_penalty_increment);
}
}
if (vse->associate_stats.shape_cost > 0) {
adjustment += vse->associate_stats.shape_cost / static_cast<float>(vse->length);
}
if (language_model_ngram_on) {
ASSERT_HOST(vse->ngram_info != nullptr);
return vse->ngram_info->ngram_and_classifier_cost * adjustment;
} else {
adjustment += ComputeConsistencyAdjustment(vse->dawg_info, vse->consistency_info);
return vse->ratings_sum * adjustment;
}
}
}
void LanguageModel::UpdateBestChoice(ViterbiStateEntry *vse, LMPainPoints *pain_points,
WERD_RES *word_res, BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
bool truth_path;
WERD_CHOICE *word =
ConstructWord(vse, word_res, &best_choice_bundle->fixpt, blamer_bundle, &truth_path);
ASSERT_HOST(word != nullptr);
if (dict_->stopper_debug_level >= 1) {
std::string word_str;
word->string_and_lengths(&word_str, nullptr);
vse->Print(word_str.c_str());
}
if (language_model_debug_level > 0) {
word->print("UpdateBestChoice() constructed word");
}
// Record features from the current path if necessary.
ParamsTrainingHypothesis curr_hyp;
if (blamer_bundle != nullptr) {
if (vse->dawg_info != nullptr) {
vse->dawg_info->permuter = static_cast<PermuterType>(word->permuter());
}
ExtractFeaturesFromPath(*vse, curr_hyp.features);
word->string_and_lengths(&(curr_hyp.str), nullptr);
curr_hyp.cost = vse->cost; // record cost for error rate computations
if (language_model_debug_level > 0) {
tprintf("Raw features extracted from %s (cost=%g) [ ", curr_hyp.str.c_str(), curr_hyp.cost);
for (float feature : curr_hyp.features) {
tprintf("%g ", feature);
}
tprintf("]\n");
}
// Record the current hypothesis in params_training_bundle.
blamer_bundle->AddHypothesis(curr_hyp);
if (truth_path) {
blamer_bundle->UpdateBestRating(word->rating());
}
}
if (blamer_bundle != nullptr && blamer_bundle->GuidedSegsearchStillGoing()) {
// The word was constructed solely for blamer_bundle->AddHypothesis, so
// we no longer need it.
delete word;
return;
}
if (word_res->chopped_word != nullptr && !word_res->chopped_word->blobs.empty()) {
word->SetScriptPositions(false, word_res->chopped_word, language_model_debug_level);
}
// Update and log new raw_choice if needed.
if (word_res->raw_choice == nullptr || word->rating() < word_res->raw_choice->rating()) {
if (word_res->LogNewRawChoice(word) && language_model_debug_level > 0) {
tprintf("Updated raw choice\n");
}
}
// Set the modified rating for best choice to vse->cost and log best choice.
word->set_rating(vse->cost);
// Call LogNewChoice() for best choice from Dict::adjust_word() since it
// computes adjust_factor that is used by the adaption code (e.g. by
// ClassifyAdaptableWord() to compute adaption acceptance thresholds).
// Note: the rating of the word is not adjusted.
dict_->adjust_word(word, vse->dawg_info == nullptr, vse->consistency_info.xht_decision, 0.0,
false, language_model_debug_level > 0);
// Hand ownership of the word over to the word_res.
if (!word_res->LogNewCookedChoice(dict_->tessedit_truncate_wordchoice_log,
dict_->stopper_debug_level >= 1, word)) {
// The word was so bad that it was deleted.
return;
}
if (word_res->best_choice == word) {
// Word was the new best.
if (dict_->AcceptableChoice(*word, vse->consistency_info.xht_decision) &&
AcceptablePath(*vse)) {
acceptable_choice_found_ = true;
}
// Update best_choice_bundle.
best_choice_bundle->updated = true;
best_choice_bundle->best_vse = vse;
if (language_model_debug_level > 0) {
tprintf("Updated best choice\n");
word->print_state("New state ");
}
// Update hyphen state if we are dealing with a dictionary word.
if (vse->dawg_info != nullptr) {
if (dict_->has_hyphen_end(*word)) {
dict_->set_hyphen_word(*word, *(dawg_args_.active_dawgs));
} else {
dict_->reset_hyphen_vars(true);
}
}
if (blamer_bundle != nullptr) {
blamer_bundle->set_best_choice_is_dict_and_top_choice(vse->dawg_info != nullptr &&
vse->top_choice_flags);
}
}
#ifndef GRAPHICS_DISABLED
if (wordrec_display_segmentations && word_res->chopped_word != nullptr) {
word->DisplaySegmentation(word_res->chopped_word);
}
#endif
}
void LanguageModel::ExtractFeaturesFromPath(const ViterbiStateEntry &vse, float features[]) {
memset(features, 0, sizeof(float) * PTRAIN_NUM_FEATURE_TYPES);
// Record dictionary match info.
int len = vse.length <= kMaxSmallWordUnichars ? 0 : vse.length <= kMaxMediumWordUnichars ? 1 : 2;
if (vse.dawg_info != nullptr) {
int permuter = vse.dawg_info->permuter;
if (permuter == NUMBER_PERM || permuter == USER_PATTERN_PERM) {
if (vse.consistency_info.num_digits == vse.length) {
features[PTRAIN_DIGITS_SHORT + len] = 1.0f;
} else {
features[PTRAIN_NUM_SHORT + len] = 1.0f;
}
} else if (permuter == DOC_DAWG_PERM) {
features[PTRAIN_DOC_SHORT + len] = 1.0f;
} else if (permuter == SYSTEM_DAWG_PERM || permuter == USER_DAWG_PERM ||
permuter == COMPOUND_PERM) {
features[PTRAIN_DICT_SHORT + len] = 1.0f;
} else if (permuter == FREQ_DAWG_PERM) {
features[PTRAIN_FREQ_SHORT + len] = 1.0f;
}
}
// Record shape cost feature (normalized by path length).
features[PTRAIN_SHAPE_COST_PER_CHAR] =
vse.associate_stats.shape_cost / static_cast<float>(vse.length);
// Record ngram cost. (normalized by the path length).
features[PTRAIN_NGRAM_COST_PER_CHAR] = 0.0f;
if (vse.ngram_info != nullptr) {
features[PTRAIN_NGRAM_COST_PER_CHAR] =
vse.ngram_info->ngram_cost / static_cast<float>(vse.length);
}
// Record consistency-related features.
// Disabled this feature for due to its poor performance.
// features[PTRAIN_NUM_BAD_PUNC] = vse.consistency_info.NumInconsistentPunc();
features[PTRAIN_NUM_BAD_CASE] = vse.consistency_info.NumInconsistentCase();
features[PTRAIN_XHEIGHT_CONSISTENCY] = vse.consistency_info.xht_decision;
features[PTRAIN_NUM_BAD_CHAR_TYPE] =
vse.dawg_info == nullptr ? vse.consistency_info.NumInconsistentChartype() : 0.0f;
features[PTRAIN_NUM_BAD_SPACING] = vse.consistency_info.NumInconsistentSpaces();
// Disabled this feature for now due to its poor performance.
// features[PTRAIN_NUM_BAD_FONT] = vse.consistency_info.inconsistent_font;
// Classifier-related features.
if (vse.outline_length > 0.0f) {
features[PTRAIN_RATING_PER_CHAR] = vse.ratings_sum / vse.outline_length;
} else {
// Avoid FP division by 0.
features[PTRAIN_RATING_PER_CHAR] = 0.0f;
}
}
WERD_CHOICE *LanguageModel::ConstructWord(ViterbiStateEntry *vse, WERD_RES *word_res,
DANGERR *fixpt, BlamerBundle *blamer_bundle,
bool *truth_path) {
if (truth_path != nullptr) {
*truth_path =
(blamer_bundle != nullptr && vse->length == blamer_bundle->correct_segmentation_length());
}
BLOB_CHOICE *curr_b = vse->curr_b;
ViterbiStateEntry *curr_vse = vse;
int i;
bool compound = dict_->hyphenated(); // treat hyphenated words as compound
// Re-compute the variance of the width-to-height ratios (since we now
// can compute the mean over the whole word).
float full_wh_ratio_mean = 0.0f;
if (vse->associate_stats.full_wh_ratio_var != 0.0f) {
vse->associate_stats.shape_cost -= vse->associate_stats.full_wh_ratio_var;
full_wh_ratio_mean =
(vse->associate_stats.full_wh_ratio_total / static_cast<float>(vse->length));
vse->associate_stats.full_wh_ratio_var = 0.0f;
}
// Construct a WERD_CHOICE by tracing parent pointers.
auto *word = new WERD_CHOICE(word_res->uch_set, vse->length);
word->set_length(vse->length);
int total_blobs = 0;
for (i = (vse->length - 1); i >= 0; --i) {
if (blamer_bundle != nullptr && truth_path != nullptr && *truth_path &&
!blamer_bundle->MatrixPositionCorrect(i, curr_b->matrix_cell())) {
*truth_path = false;
}
// The number of blobs used for this choice is row - col + 1.
int num_blobs = curr_b->matrix_cell().row - curr_b->matrix_cell().col + 1;
total_blobs += num_blobs;
word->set_blob_choice(i, num_blobs, curr_b);
// Update the width-to-height ratio variance. Useful non-space delimited
// languages to ensure that the blobs are of uniform width.
// Skip leading and trailing punctuation when computing the variance.
if ((full_wh_ratio_mean != 0.0f &&
((curr_vse != vse && curr_vse->parent_vse != nullptr) ||
!dict_->getUnicharset().get_ispunctuation(curr_b->unichar_id())))) {
vse->associate_stats.full_wh_ratio_var +=
pow(full_wh_ratio_mean - curr_vse->associate_stats.full_wh_ratio, 2);
if (language_model_debug_level > 2) {
tprintf("full_wh_ratio_var += (%g-%g)^2\n", full_wh_ratio_mean,
curr_vse->associate_stats.full_wh_ratio);
}
}
// Mark the word as compound if compound permuter was set for any of
// the unichars on the path (usually this will happen for unichars
// that are compounding operators, like "-" and "/").
if (!compound && curr_vse->dawg_info && curr_vse->dawg_info->permuter == COMPOUND_PERM) {
compound = true;
}
// Update curr_* pointers.
curr_vse = curr_vse->parent_vse;
if (curr_vse == nullptr) {
break;
}
curr_b = curr_vse->curr_b;
}
ASSERT_HOST(i == 0); // check that we recorded all the unichar ids.
ASSERT_HOST(total_blobs == word_res->ratings->dimension());
// Re-adjust shape cost to include the updated width-to-height variance.
if (full_wh_ratio_mean != 0.0f) {
vse->associate_stats.shape_cost += vse->associate_stats.full_wh_ratio_var;
}
word->set_rating(vse->ratings_sum);
word->set_certainty(vse->min_certainty);
word->set_x_heights(vse->consistency_info.BodyMinXHeight(),
vse->consistency_info.BodyMaxXHeight());
if (vse->dawg_info != nullptr) {
word->set_permuter(compound ? COMPOUND_PERM : vse->dawg_info->permuter);
} else if (language_model_ngram_on && !vse->ngram_info->pruned) {
word->set_permuter(NGRAM_PERM);
} else if (vse->top_choice_flags) {
word->set_permuter(TOP_CHOICE_PERM);
} else {
word->set_permuter(NO_PERM);
}
word->set_dangerous_ambig_found_(!dict_->NoDangerousAmbig(word, fixpt, true, word_res->ratings));
return word;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/language_model.cpp
|
C++
|
apache-2.0
| 65,241
|
///////////////////////////////////////////////////////////////////////
// File: language_model.h
// Description: Functions that utilize the knowledge about the properties,
// structure and statistics of the language to help segmentation
// search.
// Author: Daria Antonova
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_LANGUAGE_MODEL_H_
#define TESSERACT_WORDREC_LANGUAGE_MODEL_H_
#include "associate.h" // for AssociateStats (ptr only), AssociateUtils
#include "dawg.h" // for DawgPositionVector
#include "dict.h" // for DawgArgs, Dict
#include "lm_consistency.h" // for LMConsistencyInfo
#include "lm_state.h" // for ViterbiStateEntry, LanguageModelFlagsType
#include "params.h" // for DoubleParam, double_VAR_H, IntParam, Boo...
#include "params_model.h" // for ParamsModel
#include "ratngs.h" // for BLOB_CHOICE (ptr only), BLOB_CHOICE_LIST...
#include "stopper.h" // for DANGERR
#include <cmath> // for exp
namespace tesseract {
class UNICHARSET;
class WERD_RES;
struct BlamerBundle;
template <typename T>
class UnicityTable;
class LMPainPoints;
struct FontInfo;
// This class that contains the data structures and functions necessary
// to represent and use the knowledge about the language.
class LanguageModel {
public:
// Masks for keeping track of top choices that should not be pruned out.
static const LanguageModelFlagsType kSmallestRatingFlag = 0x1;
static const LanguageModelFlagsType kLowerCaseFlag = 0x2;
static const LanguageModelFlagsType kUpperCaseFlag = 0x4;
static const LanguageModelFlagsType kDigitFlag = 0x8;
static const LanguageModelFlagsType kXhtConsistentFlag = 0x10;
// Denominator for normalizing per-letter ngram cost when deriving
// penalty adjustments.
static const float kMaxAvgNgramCost;
LanguageModel(const UnicityTable<FontInfo> *fontinfo_table, Dict *dict);
~LanguageModel();
// Fills the given floats array with features extracted from path represented
// by the given ViterbiStateEntry. See ccstruct/params_training_featdef.h
// for feature information.
// Note: the function assumes that features points to an array of size
// PTRAIN_NUM_FEATURE_TYPES.
static void ExtractFeaturesFromPath(const ViterbiStateEntry &vse, float features[]);
// Updates data structures that are used for the duration of the segmentation
// search on the current word;
void InitForWord(const WERD_CHOICE *prev_word, bool fixed_pitch, float max_char_wh_ratio,
float rating_cert_scale);
// Updates language model state of the given BLOB_CHOICE_LIST (from
// the ratings matrix) and its parent. Updates pain_points if new
// problematic points are found in the segmentation graph.
//
// At most language_model_viterbi_list_size are kept in each
// LanguageModelState.viterbi_state_entries list.
// At most language_model_viterbi_list_max_num_prunable of those are prunable
// (non-dictionary) paths.
// The entries that represent dictionary word paths are kept at the front
// of the list.
// The list ordered by cost that is computed collectively by several
// language model components (currently dawg and ngram components).
bool UpdateState(bool just_classified, int curr_col, int curr_row, BLOB_CHOICE_LIST *curr_list,
LanguageModelState *parent_node, LMPainPoints *pain_points, WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle);
// Returns true if an acceptable best choice was discovered.
inline bool AcceptableChoiceFound() {
return acceptable_choice_found_;
}
inline void SetAcceptableChoiceFound(bool val) {
acceptable_choice_found_ = val;
}
// Returns the reference to ParamsModel.
inline ParamsModel &getParamsModel() {
return params_model_;
}
protected:
inline float CertaintyScore(float cert) {
if (language_model_use_sigmoidal_certainty) {
// cert is assumed to be between 0 and -dict_->certainty_scale.
// If you enable language_model_use_sigmoidal_certainty, you
// need to adjust language_model_ngram_nonmatch_score as well.
cert = -cert / dict_->certainty_scale;
return 1.0f / (1.0f + exp(10.0f * cert));
} else {
return (-1.0f / cert);
}
}
inline float ComputeAdjustment(int num_problems, float penalty) {
if (num_problems == 0) {
return 0.0f;
}
if (num_problems == 1) {
return penalty;
}
return (penalty + (language_model_penalty_increment * static_cast<float>(num_problems - 1)));
}
// Computes the adjustment to the ratings sum based on the given
// consistency_info. The paths with invalid punctuation, inconsistent
// case and character type are penalized proportionally to the number
// of inconsistencies on the path.
inline float ComputeConsistencyAdjustment(const LanguageModelDawgInfo *dawg_info,
const LMConsistencyInfo &consistency_info) {
if (dawg_info != nullptr) {
return ComputeAdjustment(consistency_info.NumInconsistentCase(),
language_model_penalty_case) +
(consistency_info.inconsistent_script ? language_model_penalty_script : 0.0f);
}
return (ComputeAdjustment(consistency_info.NumInconsistentPunc(), language_model_penalty_punc) +
ComputeAdjustment(consistency_info.NumInconsistentCase(), language_model_penalty_case) +
ComputeAdjustment(consistency_info.NumInconsistentChartype(),
language_model_penalty_chartype) +
ComputeAdjustment(consistency_info.NumInconsistentSpaces(),
language_model_penalty_spacing) +
(consistency_info.inconsistent_script ? language_model_penalty_script : 0.0f) +
(consistency_info.inconsistent_font ? language_model_penalty_font : 0.0f));
}
// Returns an adjusted ratings sum that includes inconsistency penalties,
// penalties for non-dictionary paths and paths with dips in ngram
// probability.
float ComputeAdjustedPathCost(ViterbiStateEntry *vse);
// Finds the first lower and upper case letter and first digit in curr_list.
// Uses the first character in the list in place of empty results.
// Returns true if both alpha and digits are found.
bool GetTopLowerUpperDigit(BLOB_CHOICE_LIST *curr_list, BLOB_CHOICE **first_lower,
BLOB_CHOICE **first_upper, BLOB_CHOICE **first_digit) const;
// Forces there to be at least one entry in the overall set of the
// viterbi_state_entries of each element of parent_node that has the
// top_choice_flag set for lower, upper and digit using the same rules as
// GetTopLowerUpperDigit, setting the flag on the first found suitable
// candidate, whether or not the flag is set on some other parent.
// Returns 1 if both alpha and digits are found among the parents, -1 if no
// parents are found at all (a legitimate case), and 0 otherwise.
int SetTopParentLowerUpperDigit(LanguageModelState *parent_node) const;
// Finds the next ViterbiStateEntry with which the given unichar_id can
// combine sensibly, taking into account any mixed alnum/mixed case
// situation, and whether this combination has been inspected before.
ViterbiStateEntry *GetNextParentVSE(bool just_classified, bool mixed_alnum, const BLOB_CHOICE *bc,
LanguageModelFlagsType blob_choice_flags,
const UNICHARSET &unicharset, WERD_RES *word_res,
ViterbiStateEntry_IT *vse_it,
LanguageModelFlagsType *top_choice_flags) const;
// Helper function that computes the cost of the path composed of the
// path in the given parent ViterbiStateEntry and the given BLOB_CHOICE.
// If the new path looks good enough, adds a new ViterbiStateEntry to the
// list of viterbi entries in the given BLOB_CHOICE and returns true.
bool AddViterbiStateEntry(LanguageModelFlagsType top_choice_flags, float denom, bool word_end,
int curr_col, int curr_row, BLOB_CHOICE *b,
LanguageModelState *curr_state, ViterbiStateEntry *parent_vse,
LMPainPoints *pain_points, WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle);
// Determines whether a potential entry is a true top choice and
// updates changed accordingly.
//
// Note: The function assumes that b, top_choice_flags and changed
// are not nullptr.
void GenerateTopChoiceInfo(ViterbiStateEntry *new_vse, const ViterbiStateEntry *parent_vse,
LanguageModelState *lms);
// Calls dict_->LetterIsOk() with DawgArgs initialized from parent_vse and
// unichar from b.unichar_id(). Constructs and returns LanguageModelDawgInfo
// with updated active dawgs, constraints and permuter.
//
// Note: the caller is responsible for deleting the returned pointer.
LanguageModelDawgInfo *GenerateDawgInfo(bool word_end, int curr_col, int curr_row,
const BLOB_CHOICE &b,
const ViterbiStateEntry *parent_vse);
// Computes p(unichar | parent context) and records it in ngram_cost.
// If b.unichar_id() is an unlikely continuation of the parent context
// sets found_small_prob to true and returns nullptr.
// Otherwise creates a new LanguageModelNgramInfo entry containing the
// updated context (that includes b.unichar_id() at the end) and returns it.
//
// Note: the caller is responsible for deleting the returned pointer.
LanguageModelNgramInfo *GenerateNgramInfo(const char *unichar, float certainty, float denom,
int curr_col, int curr_row, float outline_length,
const ViterbiStateEntry *parent_vse);
// Computes -(log(prob(classifier)) + log(prob(ngram model)))
// for the given unichar in the given context. If there are multiple
// unichars at one position - takes the average of their probabilities.
// UNICHAR::utf8_step() is used to separate out individual UTF8 characters,
// since probability_in_context() can only handle one at a time (while
// unicharset might contain ngrams and glyphs composed from multiple UTF8
// characters).
float ComputeNgramCost(const char *unichar, float certainty, float denom, const char *context,
int *unichar_step_len, bool *found_small_prob, float *ngram_prob);
// Computes the normalization factors for the classifier confidences
// (used by ComputeNgramCost()).
float ComputeDenom(BLOB_CHOICE_LIST *curr_list);
// Fills the given consistenty_info based on parent_vse.consistency_info
// and on the consistency of the given unichar_id with parent_vse.
void FillConsistencyInfo(int curr_col, bool word_end, BLOB_CHOICE *b,
ViterbiStateEntry *parent_vse, WERD_RES *word_res,
LMConsistencyInfo *consistency_info);
// Constructs WERD_CHOICE by recording unichar_ids of the BLOB_CHOICEs
// on the path represented by the given BLOB_CHOICE and language model
// state entries (lmse, dse). The path is re-constructed by following
// the parent pointers in the lang model state entries). If the
// constructed WERD_CHOICE is better than the best/raw choice recorded
// in the best_choice_bundle, this function updates the corresponding
// fields and sets best_choice_bunldle->updated to true.
void UpdateBestChoice(ViterbiStateEntry *vse, LMPainPoints *pain_points, WERD_RES *word_res,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle);
// Constructs a WERD_CHOICE by tracing parent pointers starting with
// the given LanguageModelStateEntry. Returns the constructed word.
// Updates best_char_choices, certainties and state if they are not
// nullptr (best_char_choices and certainties are assumed to have the
// length equal to lmse->length).
// The caller is responsible for freeing memory associated with the
// returned WERD_CHOICE.
WERD_CHOICE *ConstructWord(ViterbiStateEntry *vse, WERD_RES *word_res, DANGERR *fixpt,
BlamerBundle *blamer_bundle, bool *truth_path);
// Wrapper around AssociateUtils::ComputeStats().
inline void ComputeAssociateStats(int col, int row, float max_char_wh_ratio,
ViterbiStateEntry *parent_vse, WERD_RES *word_res,
AssociateStats *associate_stats) {
AssociateUtils::ComputeStats(
col, row, (parent_vse != nullptr) ? &(parent_vse->associate_stats) : nullptr,
(parent_vse != nullptr) ? parent_vse->length : 0, fixed_pitch_, max_char_wh_ratio, word_res,
language_model_debug_level > 2, associate_stats);
}
// Returns true if the path with such top_choice_flags and dawg_info
// could be pruned out (i.e. is neither a system/user/frequent dictionary
// nor a top choice path).
// In non-space delimited languages all paths can be "somewhat" dictionary
// words. In such languages we cannot do dictionary-driven path pruning,
// so paths with non-empty dawg_info are considered prunable.
inline bool PrunablePath(const ViterbiStateEntry &vse) {
if (vse.top_choice_flags) {
return false;
}
if (vse.dawg_info != nullptr &&
(vse.dawg_info->permuter == SYSTEM_DAWG_PERM || vse.dawg_info->permuter == USER_DAWG_PERM ||
vse.dawg_info->permuter == FREQ_DAWG_PERM)) {
return false;
}
return true;
}
// Returns true if the given ViterbiStateEntry represents an acceptable path.
inline bool AcceptablePath(const ViterbiStateEntry &vse) {
return (vse.dawg_info != nullptr || vse.Consistent() ||
(vse.ngram_info != nullptr && !vse.ngram_info->pruned));
}
public:
// Parameters.
INT_VAR_H(language_model_debug_level);
BOOL_VAR_H(language_model_ngram_on);
INT_VAR_H(language_model_ngram_order);
INT_VAR_H(language_model_viterbi_list_max_num_prunable);
INT_VAR_H(language_model_viterbi_list_max_size);
double_VAR_H(language_model_ngram_small_prob);
double_VAR_H(language_model_ngram_nonmatch_score);
BOOL_VAR_H(language_model_ngram_use_only_first_uft8_step);
double_VAR_H(language_model_ngram_scale_factor);
double_VAR_H(language_model_ngram_rating_factor);
BOOL_VAR_H(language_model_ngram_space_delimited_language);
INT_VAR_H(language_model_min_compound_length);
// Penalties used for adjusting path costs and final word rating.
double_VAR_H(language_model_penalty_non_freq_dict_word);
double_VAR_H(language_model_penalty_non_dict_word);
double_VAR_H(language_model_penalty_punc);
double_VAR_H(language_model_penalty_case);
double_VAR_H(language_model_penalty_script);
double_VAR_H(language_model_penalty_chartype);
double_VAR_H(language_model_penalty_font);
double_VAR_H(language_model_penalty_spacing);
double_VAR_H(language_model_penalty_increment);
INT_VAR_H(wordrec_display_segmentations);
BOOL_VAR_H(language_model_use_sigmoidal_certainty);
protected:
// Member Variables.
// Temporary DawgArgs struct that is re-used across different words to
// avoid dynamic memory re-allocation (should be cleared before each use).
DawgArgs dawg_args_;
// Scaling for recovering blob outline length from rating and certainty.
float rating_cert_scale_ = 0.0f;
// The following variables are set at construction time.
// Pointer to fontinfo table (not owned by LanguageModel).
const UnicityTable<FontInfo> *fontinfo_table_ = nullptr;
// Pointer to Dict class, that is used for querying the dictionaries
// (the pointer is not owned by LanguageModel).
Dict *dict_ = nullptr;
// TODO(daria): the following variables should become LanguageModel params
// when the old code in bestfirst.cpp and heuristic.cpp is deprecated.
//
// Set to true if we are dealing with fixed pitch text
// (set to assume_fixed_pitch_char_segment).
bool fixed_pitch_ = false;
// Max char width-to-height ratio allowed
// (set to segsearch_max_char_wh_ratio).
float max_char_wh_ratio_ = 0.0f;
// The following variables are initialized with InitForWord().
// String representation of the classification of the previous word
// (since this is only used by the character ngram model component,
// only the last language_model_ngram_order of the word are stored).
std::string prev_word_str_;
int prev_word_unichar_step_len_ = 0;
// Active dawg vector.
DawgPositionVector very_beginning_active_dawgs_; // includes continuation
DawgPositionVector beginning_active_dawgs_;
// Set to true if acceptable choice was discovered.
// Note: it would be nice to use this to terminate the search once an
// acceptable choices is found. However we do not do that and once an
// acceptable choice is found we finish looking for alternative choices
// in the current segmentation graph and then exit the search (no more
// classifications are done after an acceptable choice is found).
// This is needed in order to let the search find the words very close to
// the best choice in rating (e.g. what/What, Cat/cat, etc) and log these
// choices. This way the stopper will know that the best choice is not
// ambiguous (i.e. there are best choices in the best choice list that have
// ratings close to the very best one) and will be less likely to mis-adapt.
bool acceptable_choice_found_ = false;
// Set to true if a choice representing correct segmentation was explored.
bool correct_segmentation_explored_ = false;
// Params models containing weights for computing ViterbiStateEntry costs.
ParamsModel params_model_;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_LANGUAGE_MODEL_H_
|
2301_81045437/tesseract
|
src/wordrec/language_model.h
|
C++
|
apache-2.0
| 18,663
|
///////////////////////////////////////////////////////////////////////
// File: lm_consistency.cpp
// Description: Struct for recording consistency of the paths representing
// OCR hypotheses.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////
#include "lm_consistency.h"
#include "associate.h"
#include "dict.h"
#include "ratngs.h"
namespace tesseract {
void LMConsistencyInfo::ComputeXheightConsistency(const BLOB_CHOICE *b, bool is_punc) {
if (xht_decision == XH_INCONSISTENT) {
return; // It isn't going to get any better.
}
// Compute xheight consistency.
bool parent_null = xht_sp < 0;
int parent_sp = xht_sp;
// Debug strings.
if (b->yshift() > LMConsistencyInfo::kShiftThresh) {
xht_sp = LMConsistencyInfo::kSUP;
} else if (b->yshift() < -LMConsistencyInfo::kShiftThresh) {
xht_sp = LMConsistencyInfo::kSUB;
} else {
xht_sp = LMConsistencyInfo::kNORM;
}
xht_count[xht_sp]++;
if (is_punc) {
xht_count_punc[xht_sp]++;
}
if (!parent_null) {
xpos_entropy += abs(parent_sp - xht_sp);
}
// TODO(eger): Figure out a better way to account for small caps.
// For the first character not y-shifted, we only care if it is too small.
// Too large is common in drop caps and small caps.
// int16_t small_xht = b->min_xheight();
// if (parent_vse == nullptr && sp == LanguageModelConsistencyInfo::kNORM) {
// small_xht = 0;
// }
IntersectRange(b->min_xheight(), b->max_xheight(), &(xht_lo[xht_sp]), &(xht_hi[xht_sp]));
// Compute xheight inconsistency kinds.
if (parent_null) {
if (xht_count[kNORM] == 1) {
xht_decision = XH_GOOD;
} else {
xht_decision = XH_SUBNORMAL;
}
return;
}
// When we intersect the ranges of xheights in pixels for all characters in
// each position (subscript, normal, superscript),
// How much range must be left? 0? [exactly one pixel height for xheight] 1?
// TODO(eger): Extend this code to take a prior for the rest of the line.
const int kMinIntersectedXHeightRange = 0;
for (int i = 0; i < kNumPos; i++) {
if (xht_lo[i] > xht_hi[i] - kMinIntersectedXHeightRange) {
xht_decision = XH_INCONSISTENT;
return;
}
}
// Reject as improbable anything where there's much punctuation in subscript
// or superscript regions.
if (xht_count_punc[kSUB] > xht_count[kSUB] * 0.4 ||
xht_count_punc[kSUP] > xht_count[kSUP] * 0.4) {
xht_decision = XH_INCONSISTENT;
return;
}
// Now check that the subscript and superscript aren't too small relative to
// the mainline.
auto mainline_xht = static_cast<double>(xht_lo[kNORM]);
double kMinSizeRatio = 0.4;
if (mainline_xht > 0.0 && (static_cast<double>(xht_hi[kSUB]) / mainline_xht < kMinSizeRatio ||
static_cast<double>(xht_hi[kSUP]) / mainline_xht < kMinSizeRatio)) {
xht_decision = XH_INCONSISTENT;
return;
}
// TODO(eger): Check into inconsistency of super/subscript y offsets.
if (xpos_entropy > kMaxEntropy) {
xht_decision = XH_INCONSISTENT;
return;
}
if (xht_count[kSUB] == 0 && xht_count[kSUP] == 0) {
xht_decision = XH_GOOD;
return;
}
xht_decision = XH_SUBNORMAL;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/lm_consistency.cpp
|
C++
|
apache-2.0
| 3,908
|
///////////////////////////////////////////////////////////////////////
// File: lm_consistency.h
// Description: Struct for recording consistency of the paths representing
// OCR hypotheses.
// Author: Rika Antonova
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_LM_CONSISTENCY_H_
#define TESSERACT_WORDREC_LM_CONSISTENCY_H_
#include <cstdint> // for INT16_MAX
#include "dawg.h" // for EDGE_REF, NO_EDGE
#include "dict.h" // for XH_GOOD, XH_INCONSISTENT, XHeightConsi...
class BLOB_CHOICE;
namespace tesseract {
static const char *const XHeightConsistencyEnumName[] = {
"XH_GOOD",
"XH_SUBNORMAL",
"XH_INCONSISTENT",
};
// Struct for keeping track of the consistency of the path.
struct LMConsistencyInfo {
enum ChartypeEnum { CT_NONE, CT_ALPHA, CT_DIGIT, CT_OTHER };
// How much do characters have to be shifted away from normal parameters
// before we say they're not normal?
static const int kShiftThresh = 1;
// How much shifting from subscript to superscript and back
// before we declare shenanigans?
static const int kMaxEntropy = 1;
// Script positions - order important for entropy calculation.
static const int kSUB = 0, kNORM = 1, kSUP = 2;
static const int kNumPos = 3;
explicit LMConsistencyInfo(const LMConsistencyInfo *parent_info) {
if (parent_info == nullptr) {
// Initialize from scratch.
num_alphas = 0;
num_digits = 0;
num_punc = 0;
num_other = 0;
chartype = CT_NONE;
punc_ref = NO_EDGE;
invalid_punc = false;
num_non_first_upper = 0;
num_lower = 0;
script_id = 0;
inconsistent_script = false;
num_inconsistent_spaces = 0;
inconsistent_font = false;
// Initialize XHeight stats.
for (int i = 0; i < kNumPos; i++) {
xht_count[i] = 0;
xht_count_punc[i] = 0;
xht_lo[i] = 0;
xht_hi[i] = 256; // kBlnCellHeight
}
xht_sp = -1; // This invalid value indicates that there was no parent.
xpos_entropy = 0;
xht_decision = XH_GOOD;
} else {
// Copy parent info
*this = *parent_info;
}
}
inline int NumInconsistentPunc() const {
return invalid_punc ? num_punc : 0;
}
inline int NumInconsistentCase() const {
return (num_non_first_upper > num_lower) ? num_lower : num_non_first_upper;
}
inline int NumInconsistentChartype() const {
return (NumInconsistentPunc() + num_other +
((num_alphas > num_digits) ? num_digits : num_alphas));
}
inline bool Consistent() const {
return (NumInconsistentPunc() == 0 && NumInconsistentCase() == 0 &&
NumInconsistentChartype() == 0 && !inconsistent_script && !inconsistent_font &&
!InconsistentXHeight());
}
inline int NumInconsistentSpaces() const {
return num_inconsistent_spaces;
}
inline int InconsistentXHeight() const {
return xht_decision == XH_INCONSISTENT;
}
void ComputeXheightConsistency(const BLOB_CHOICE *b, bool is_punc);
float BodyMinXHeight() const {
if (InconsistentXHeight()) {
return 0.0f;
}
return xht_lo[kNORM];
}
float BodyMaxXHeight() const {
if (InconsistentXHeight()) {
return static_cast<float>(INT16_MAX);
}
return xht_hi[kNORM];
}
EDGE_REF punc_ref;
int num_alphas;
int num_digits;
int num_punc;
int num_other;
ChartypeEnum chartype;
XHeightConsistencyEnum xht_decision;
int num_non_first_upper;
int num_lower;
int script_id;
int num_inconsistent_spaces;
// Metrics clumped by position.
float xht_lo[kNumPos];
float xht_hi[kNumPos];
int16_t xht_count[kNumPos];
int16_t xht_count_punc[kNumPos];
int16_t xht_sp;
int16_t xpos_entropy;
bool invalid_punc;
bool inconsistent_script;
bool inconsistent_font;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_LM_CONSISTENCY_H_
|
2301_81045437/tesseract
|
src/wordrec/lm_consistency.h
|
C++
|
apache-2.0
| 4,507
|
///////////////////////////////////////////////////////////////////////
// File: pain_points.cpp
// Description: Functions that utilize the knowledge about the properties
// of the paths explored by the segmentation search in order
// to "pain points" - the locations in the ratings matrix
// which should be classified next.
// Author: Rika Antonova
// Created: Mon Jun 20 11:26:43 PST 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "lm_pain_points.h"
#include "associate.h"
#include "dict.h"
#include "genericheap.h"
#include "lm_state.h"
#include "matrix.h"
#include "pageres.h"
#include <algorithm>
namespace tesseract {
const float LMPainPoints::kDefaultPainPointPriorityAdjustment = 2.0f;
const float LMPainPoints::kLooseMaxCharWhRatio = 2.5f;
LMPainPointsType LMPainPoints::Deque(MATRIX_COORD *pp, float *priority) {
for (int h = 0; h < LM_PPTYPE_NUM; ++h) {
if (pain_points_heaps_[h].empty()) {
continue;
}
*priority = pain_points_heaps_[h].PeekTop().key();
*pp = pain_points_heaps_[h].PeekTop().data();
pain_points_heaps_[h].Pop(nullptr);
return static_cast<LMPainPointsType>(h);
}
return LM_PPTYPE_NUM;
}
void LMPainPoints::GenerateInitial(WERD_RES *word_res) {
MATRIX *ratings = word_res->ratings;
AssociateStats associate_stats;
for (int col = 0; col < ratings->dimension(); ++col) {
int row_end = std::min(ratings->dimension(), col + ratings->bandwidth() + 1);
for (int row = col + 1; row < row_end; ++row) {
MATRIX_COORD coord(col, row);
if (coord.Valid(*ratings) && ratings->get(col, row) != NOT_CLASSIFIED) {
continue;
}
// Add an initial pain point if needed.
if (ratings->Classified(col, row - 1, dict_->WildcardID()) ||
(col + 1 < ratings->dimension() &&
ratings->Classified(col + 1, row, dict_->WildcardID()))) {
GeneratePainPoint(col, row, LM_PPTYPE_SHAPE, 0.0, true, max_char_wh_ratio_, word_res);
}
}
}
}
void LMPainPoints::GenerateFromPath(float rating_cert_scale, ViterbiStateEntry *vse,
WERD_RES *word_res) {
ViterbiStateEntry *curr_vse = vse;
BLOB_CHOICE *curr_b = vse->curr_b;
// The following pain point generation and priority calculation approaches
// prioritize exploring paths with low average rating of the known part of
// the path, while not relying on the ratings of the pieces to be combined.
//
// A pain point to combine the neighbors is generated for each pair of
// neighboring blobs on the path (the path is represented by vse argument
// given to GenerateFromPath()). The priority of each pain point is set to
// the average rating (per outline length) of the path, not including the
// ratings of the blobs to be combined.
// The ratings of the blobs to be combined are not used to calculate the
// priority, since it is not possible to determine from their magnitude
// whether it will be beneficial to combine the blobs. The reason is that
// chopped junk blobs (/ | - ') can have very good (low) ratings, however
// combining them will be beneficial. Blobs with high ratings might be
// over-joined pieces of characters, but also could be blobs from an unseen
// font or chopped pieces of complex characters.
while (curr_vse->parent_vse != nullptr) {
ViterbiStateEntry *parent_vse = curr_vse->parent_vse;
const MATRIX_COORD &curr_cell = curr_b->matrix_cell();
const MATRIX_COORD &parent_cell = parent_vse->curr_b->matrix_cell();
MATRIX_COORD pain_coord(parent_cell.col, curr_cell.row);
if (!pain_coord.Valid(*word_res->ratings) ||
!word_res->ratings->Classified(parent_cell.col, curr_cell.row, dict_->WildcardID())) {
// rat_subtr contains ratings sum of the two adjacent blobs to be merged.
// rat_subtr will be subtracted from the ratings sum of the path, since
// the blobs will be joined into a new blob, whose rating is yet unknown.
float rat_subtr = curr_b->rating() + parent_vse->curr_b->rating();
// ol_subtr contains the outline length of the blobs that will be joined.
float ol_subtr =
AssociateUtils::ComputeOutlineLength(rating_cert_scale, *curr_b) +
AssociateUtils::ComputeOutlineLength(rating_cert_scale, *(parent_vse->curr_b));
// ol_dif is the outline of the path without the two blobs to be joined.
float ol_dif = vse->outline_length - ol_subtr;
// priority is set to the average rating of the path per unit of outline,
// not counting the ratings of the pieces to be joined.
float priority = ol_dif > 0 ? (vse->ratings_sum - rat_subtr) / ol_dif : 0.0;
GeneratePainPoint(pain_coord.col, pain_coord.row, LM_PPTYPE_PATH, priority, true,
max_char_wh_ratio_, word_res);
} else if (debug_level_ > 3) {
tprintf("NO pain point (Classified) for col=%d row=%d type=%s\n", pain_coord.col,
pain_coord.row, LMPainPointsTypeName[LM_PPTYPE_PATH]);
BLOB_CHOICE_IT b_it(word_res->ratings->get(pain_coord.col, pain_coord.row));
for (b_it.mark_cycle_pt(); !b_it.cycled_list(); b_it.forward()) {
BLOB_CHOICE *choice = b_it.data();
choice->print_full();
}
}
curr_vse = parent_vse;
curr_b = curr_vse->curr_b;
}
}
void LMPainPoints::GenerateFromAmbigs(const DANGERR &fixpt, ViterbiStateEntry *vse,
WERD_RES *word_res) {
// Begins and ends in DANGERR vector now record the blob indices as used
// by the ratings matrix.
for (auto &&danger : fixpt) {
// Only use dangerous ambiguities.
if (danger.dangerous) {
GeneratePainPoint(danger.begin, danger.end - 1, LM_PPTYPE_AMBIG, vse->cost, true,
kLooseMaxCharWhRatio, word_res);
}
}
}
bool LMPainPoints::GeneratePainPoint(int col, int row, LMPainPointsType pp_type,
float special_priority, bool ok_to_extend,
float max_char_wh_ratio, WERD_RES *word_res) {
MATRIX_COORD coord(col, row);
if (coord.Valid(*word_res->ratings) &&
word_res->ratings->Classified(col, row, dict_->WildcardID())) {
return false;
}
if (debug_level_ > 3) {
tprintf("Generating pain point for col=%d row=%d type=%s\n", col, row,
LMPainPointsTypeName[pp_type]);
}
// Compute associate stats.
AssociateStats associate_stats;
AssociateUtils::ComputeStats(col, row, nullptr, 0, fixed_pitch_, max_char_wh_ratio, word_res,
debug_level_, &associate_stats);
// For fixed-pitch fonts/languages: if the current combined blob overlaps
// the next blob on the right and it is ok to extend the blob, try extending
// the blob until there is no overlap with the next blob on the right or
// until the width-to-height ratio becomes too large.
if (ok_to_extend) {
while (associate_stats.bad_fixed_pitch_right_gap && row + 1 < word_res->ratings->dimension() &&
!associate_stats.bad_fixed_pitch_wh_ratio) {
AssociateUtils::ComputeStats(col, ++row, nullptr, 0, fixed_pitch_, max_char_wh_ratio,
word_res, debug_level_, &associate_stats);
}
}
if (associate_stats.bad_shape) {
if (debug_level_ > 3) {
tprintf("Discarded pain point with a bad shape\n");
}
return false;
}
// Insert the new pain point into pain_points_heap_.
if (pain_points_heaps_[pp_type].size() < max_heap_size_) {
// Compute pain point priority.
float priority;
if (pp_type == LM_PPTYPE_PATH) {
priority = special_priority;
} else {
priority = associate_stats.gap_sum;
}
MatrixCoordPair pain_point(priority, MATRIX_COORD(col, row));
pain_points_heaps_[pp_type].Push(&pain_point);
if (debug_level_) {
tprintf("Added pain point with priority %g\n", priority);
}
return true;
} else {
if (debug_level_) {
tprintf("Pain points heap is full\n");
}
return false;
}
}
/**
* Adjusts the pain point coordinates to cope with expansion of the ratings
* matrix due to a split of the blob with the given index.
*/
void LMPainPoints::RemapForSplit(int index) {
for (auto &pain_points_heap : pain_points_heaps_) {
std::vector<MatrixCoordPair> &heap = pain_points_heap.heap();
for (auto &&entry : heap) {
entry.data().MapForSplit(index);
}
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/lm_pain_points.cpp
|
C++
|
apache-2.0
| 9,110
|
///////////////////////////////////////////////////////////////////////
// File: lm_pain_points.h
// Description: Functions that utilize the knowledge about the properties
// of the paths explored by the segmentation search in order
// to generate "pain points" - the locations in the ratings
// matrix which should be classified next.
// Author: Rika Antonova
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_PAIN_POINTS_H_
#define TESSERACT_WORDREC_PAIN_POINTS_H_
#include "genericheap.h" // for GenericHeap
#include "matrix.h" // for MATRIX_COORD (ptr only), MatrixCoordPair
#include "stopper.h" // for DANGERR
namespace tesseract {
class Dict;
struct ViterbiStateEntry;
class WERD_RES;
// Heap of pain points used for determining where to chop/join.
using PainPointHeap = GenericHeap<MatrixCoordPair>;
// Types of pain points (ordered in the decreasing level of importance).
enum LMPainPointsType {
LM_PPTYPE_BLAMER,
LM_PPTYPE_AMBIG,
LM_PPTYPE_PATH,
LM_PPTYPE_SHAPE,
LM_PPTYPE_NUM
};
static const char *const LMPainPointsTypeName[] = {
"LM_PPTYPE_BLAMER",
"LM_PPTYPE_AMBIGS",
"LM_PPTYPE_PATH",
"LM_PPTYPE_SHAPE",
};
class LMPainPoints {
public:
static const float kDefaultPainPointPriorityAdjustment;
// If there is a significant drop in character ngram probability or a
// dangerous ambiguity make the thresholds on what blob combinations
// can be classified looser.
static const float kLooseMaxCharWhRatio;
// Returns a description of the type of a pain point.
static const char *PainPointDescription(LMPainPointsType type) {
return LMPainPointsTypeName[type];
}
LMPainPoints(int max, float rat, bool fp, const Dict *d, int deb)
: max_heap_size_(max)
, max_char_wh_ratio_(rat)
, fixed_pitch_(fp)
, dict_(d)
, debug_level_(deb) {}
~LMPainPoints() = default;
// Returns true if the heap of pain points of pp_type is not empty().
inline bool HasPainPoints(LMPainPointsType pp_type) const {
return !pain_points_heaps_[pp_type].empty();
}
// Dequeues the next pain point from the pain points queue and copies
// its contents and priority to *pp and *priority.
// Returns LM_PPTYPE_NUM if pain points queue is empty, otherwise the type.
LMPainPointsType Deque(MATRIX_COORD *pp, float *priority);
// Clears pain points heap.
void Clear() {
for (auto &pain_points_heap : pain_points_heaps_) {
pain_points_heap.clear();
}
}
// For each cell, generate a "pain point" if the cell is not classified
// and has a left or right neighbor that was classified.
void GenerateInitial(WERD_RES *word_res);
// Generate pain points from the given path.
void GenerateFromPath(float rating_cert_scale, ViterbiStateEntry *vse, WERD_RES *word_res);
// Generate pain points from dangerous ambiguities in best choice.
void GenerateFromAmbigs(const DANGERR &fixpt, ViterbiStateEntry *vse, WERD_RES *word_res);
// Adds a pain point to classify chunks_record->ratings(col, row).
// Returns true if a new pain point was added to an appropriate heap.
// Pain point priority is set to special_priority for pain points of
// LM_PPTYPE_AMBIG or LM_PPTYPE_PATH, for other pain points
// AssociateStats::gap_sum is used.
bool GeneratePainPoint(int col, int row, LMPainPointsType pp_type, float special_priority,
bool ok_to_extend, float max_char_wh_ratio, WERD_RES *word_res);
// Adjusts the pain point coordinates to cope with expansion of the ratings
// matrix due to a split of the blob with the given index.
void RemapForSplit(int index);
private:
// Priority queues containing pain points generated by the language model
// The priority is set by the language model components, adjustments like
// seam cost and width priority are factored into the priority.
PainPointHeap pain_points_heaps_[LM_PPTYPE_NUM];
// Maximum number of points to keep in the heap.
int max_heap_size_;
// Maximum character width/height ratio.
float max_char_wh_ratio_;
// Set to true if fixed pitch should be assumed.
bool fixed_pitch_;
// Cached pointer to dictionary.
const Dict *dict_;
// Debug level for print statements.
int debug_level_;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_PAIN_POINTS_H_
|
2301_81045437/tesseract
|
src/wordrec/lm_pain_points.h
|
C++
|
apache-2.0
| 4,983
|
///////////////////////////////////////////////////////////////////////
// File: lm_state.cpp
// Description: Structures and functionality for capturing the state of
// segmentation search guided by the language model.
// Author: Rika Antonova
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "lm_state.h"
namespace tesseract {
void ViterbiStateEntry::Print(const char *msg) const {
tprintf("%s ViterbiStateEntry", msg);
if (updated) {
tprintf("(NEW)");
}
if (this->debug_str != nullptr) {
tprintf(" str=%s", this->debug_str->c_str());
}
tprintf(" with ratings_sum=%.4f length=%d cost=%.6f", this->ratings_sum, this->length,
this->cost);
if (this->top_choice_flags) {
tprintf(" top_choice_flags=0x%x", this->top_choice_flags);
}
if (!this->Consistent()) {
tprintf(" inconsistent=(punc %d case %d chartype %d script %d font %d)",
this->consistency_info.NumInconsistentPunc(),
this->consistency_info.NumInconsistentCase(),
this->consistency_info.NumInconsistentChartype(),
this->consistency_info.inconsistent_script, this->consistency_info.inconsistent_font);
}
if (this->dawg_info) {
tprintf(" permuter=%d", this->dawg_info->permuter);
}
if (this->ngram_info) {
tprintf(" ngram_cl_cost=%g context=%s ngram pruned=%d",
this->ngram_info->ngram_and_classifier_cost, this->ngram_info->context.c_str(),
this->ngram_info->pruned);
}
if (this->associate_stats.shape_cost > 0.0f) {
tprintf(" shape_cost=%g", this->associate_stats.shape_cost);
}
tprintf(" %s", XHeightConsistencyEnumName[this->consistency_info.xht_decision]);
tprintf("\n");
}
/// Clears the viterbi search state back to its initial conditions.
void LanguageModelState::Clear() {
viterbi_state_entries.clear();
viterbi_state_entries_prunable_length = 0;
viterbi_state_entries_prunable_max_cost = FLT_MAX;
viterbi_state_entries_length = 0;
}
void LanguageModelState::Print(const char *msg) {
tprintf("%s VSEs (max_cost=%g prn_len=%d tot_len=%d):\n", msg,
viterbi_state_entries_prunable_max_cost, viterbi_state_entries_prunable_length,
viterbi_state_entries_length);
ViterbiStateEntry_IT vit(&viterbi_state_entries);
for (vit.mark_cycle_pt(); !vit.cycled_list(); vit.forward()) {
vit.data()->Print("");
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/lm_state.cpp
|
C++
|
apache-2.0
| 3,023
|
///////////////////////////////////////////////////////////////////////
// File: lm_state.h
// Description: Structures and functionality for capturing the state of
// segmentation search guided by the language model.
// Author: Rika Antonova
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_LANGUAGE_MODEL_DEFS_H_
#define TESSERACT_WORDREC_LANGUAGE_MODEL_DEFS_H_
#include <tesseract/unichar.h> // for UNICHAR_ID
#include "associate.h" // for AssociateStats
#include "dawg.h" // for DawgPositionVector
#include "elst.h" // for ELIST_ITERATOR, ELISTIZEH, ELIST_LINK
#include "lm_consistency.h" // for LMConsistencyInfo
#include "ratngs.h" // for BLOB_CHOICE, PermuterType
#include "stopper.h" // for DANGERR
#include "unicharset.h" // for UNICHARSET
namespace tesseract {
/// Used for expressing various language model flags.
using LanguageModelFlagsType = unsigned char;
/// The following structs are used for storing the state of the language model
/// in the segmentation search graph. In this graph the nodes are BLOB_CHOICEs
/// and the links are the relationships between the underlying blobs (see
/// segsearch.h for a more detailed description).
///
/// Each of the BLOB_CHOICEs contains LanguageModelState struct, which has
/// a list of N best paths (list of ViterbiStateEntry) explored by the Viterbi
/// search leading up to and including this BLOB_CHOICE.
///
/// Each ViterbiStateEntry contains information from various components of the
/// language model: dawgs in which the path is found, character ngram model
/// probability of the path, script/chartype/font consistency info, state for
/// language-specific heuristics (e.g. hyphenated and compound words,
/// lower/upper case preferences, etc).
///
/// Each ViterbiStateEntry also contains the parent pointer, so that the path
/// that it represents (WERD_CHOICE) can be constructed by following these
/// parent pointers.
/// Struct for storing additional information used by Dawg language model
/// component. It stores the set of active dawgs in which the sequence of
/// letters on a path can be found.
struct LanguageModelDawgInfo {
LanguageModelDawgInfo(const DawgPositionVector *a, PermuterType pt)
: active_dawgs(*a), permuter(pt) {}
DawgPositionVector active_dawgs;
PermuterType permuter;
};
/// Struct for storing additional information used by Ngram language model
/// component.
struct LanguageModelNgramInfo {
LanguageModelNgramInfo(const char *c, int l, bool p, float nc, float ncc)
: context(c)
, context_unichar_step_len(l)
, pruned(p)
, ngram_cost(nc)
, ngram_and_classifier_cost(ncc) {}
std::string context; ///< context string
/// Length of the context measured by advancing using UNICHAR::utf8_step()
/// (should be at most the order of the character ngram model used).
int context_unichar_step_len;
/// The paths with pruned set are pruned out from the perspective of the
/// character ngram model. They are explored further because they represent
/// a dictionary match or a top choice. Thus ngram_info is still computed
/// for them in order to calculate the combined cost.
bool pruned;
/// -ln(P_ngram_model(path))
float ngram_cost;
/// -[ ln(P_classifier(path)) + scale_factor * ln(P_ngram_model(path)) ]
float ngram_and_classifier_cost;
};
/// Struct for storing the information about a path in the segmentation graph
/// explored by Viterbi search.
struct ViterbiStateEntry : public ELIST_LINK {
ViterbiStateEntry(ViterbiStateEntry *pe, BLOB_CHOICE *b, float c, float ol,
const LMConsistencyInfo &ci, const AssociateStats &as,
LanguageModelFlagsType tcf, LanguageModelDawgInfo *d, LanguageModelNgramInfo *n,
const char *debug_uch)
: curr_b(b)
, parent_vse(pe)
, competing_vse(nullptr)
, dawg_info(d)
, ngram_info(n)
, cost(c)
, ratings_sum(b->rating())
, min_certainty(b->certainty())
, adapted(b->IsAdapted())
, length(1)
, outline_length(ol)
, consistency_info(ci)
, associate_stats(as)
, top_choice_flags(tcf)
, updated(true) {
debug_str = (debug_uch == nullptr) ? nullptr : new std::string();
if (pe != nullptr) {
ratings_sum += pe->ratings_sum;
if (pe->min_certainty < min_certainty) {
min_certainty = pe->min_certainty;
}
adapted += pe->adapted;
length += pe->length;
outline_length += pe->outline_length;
if (debug_uch != nullptr) {
*debug_str += *(pe->debug_str);
}
}
if (debug_str != nullptr && debug_uch != nullptr) {
*debug_str += debug_uch;
}
}
~ViterbiStateEntry() {
delete dawg_info;
delete ngram_info;
delete debug_str;
}
/// Comparator function for sorting ViterbiStateEntry_LISTs in
/// non-increasing order of costs.
static int Compare(const void *e1, const void *e2) {
const ViterbiStateEntry *ve1 = *static_cast<const ViterbiStateEntry *const *>(e1);
const ViterbiStateEntry *ve2 = *static_cast<const ViterbiStateEntry *const *>(e2);
return (ve1->cost < ve2->cost) ? -1 : 1;
}
inline bool Consistent() const {
if (dawg_info != nullptr && consistency_info.NumInconsistentCase() == 0) {
return true;
}
return consistency_info.Consistent();
}
/// Returns true if this VSE has an alphanumeric character as its classifier
/// result.
bool HasAlnumChoice(const UNICHARSET &unicharset) {
if (curr_b == nullptr) {
return false;
}
UNICHAR_ID unichar_id = curr_b->unichar_id();
if (unicharset.get_isalpha(unichar_id) || unicharset.get_isdigit(unichar_id)) {
return true;
}
return false;
}
void Print(const char *msg) const;
/// Pointers to BLOB_CHOICE and parent ViterbiStateEntry (not owned by this).
BLOB_CHOICE *curr_b;
ViterbiStateEntry *parent_vse;
/// Pointer to a case-competing ViterbiStateEntry in the same list that
/// represents a path ending in the same letter of the opposite case.
ViterbiStateEntry *competing_vse;
/// Extra information maintained by Dawg language model component
/// (owned by ViterbiStateEntry).
LanguageModelDawgInfo *dawg_info;
/// Extra information maintained by Ngram language model component
/// (owned by ViterbiStateEntry).
LanguageModelNgramInfo *ngram_info;
/// UTF8 string representing the path corresponding to this vse.
/// Populated only in when language_model_debug_level > 0.
std::string *debug_str;
/// The cost is an adjusted ratings sum, that is adjusted by all the language
/// model components that use Viterbi search.
float cost;
/// Various information about the characters on the path represented
/// by this ViterbiStateEntry.
float ratings_sum; ///< sum of ratings of character on the path
float min_certainty; ///< minimum certainty on the path
int adapted; ///< number of BLOB_CHOICES from adapted templates
int length; ///< number of characters on the path
float outline_length; ///< length of the outline so far
LMConsistencyInfo consistency_info; ///< path consistency info
AssociateStats associate_stats; ///< character widths/gaps/seams
/// Flags for marking the entry as a top choice path with
/// the smallest rating or lower/upper case letters).
LanguageModelFlagsType top_choice_flags;
bool updated; ///< set to true if the entry has just been created/updated
};
ELISTIZEH(ViterbiStateEntry)
/// Struct to store information maintained by various language model components.
struct LanguageModelState {
LanguageModelState()
: viterbi_state_entries_prunable_length(0)
, viterbi_state_entries_prunable_max_cost(FLT_MAX)
, viterbi_state_entries_length(0) {}
~LanguageModelState() = default;
/// Clears the viterbi search state back to its initial conditions.
void Clear();
void Print(const char *msg);
/// Storage for the Viterbi state.
ViterbiStateEntry_LIST viterbi_state_entries;
/// Number and max cost of prunable paths in viterbi_state_entries.
int viterbi_state_entries_prunable_length;
float viterbi_state_entries_prunable_max_cost;
/// Total number of entries in viterbi_state_entries.
int viterbi_state_entries_length;
};
/// Bundle together all the things pertaining to the best choice/state.
struct BestChoiceBundle {
explicit BestChoiceBundle(int matrix_dimension) : updated(false), best_vse(nullptr) {
beam.reserve(matrix_dimension);
for (int i = 0; i < matrix_dimension; ++i) {
beam.push_back(new LanguageModelState);
}
}
~BestChoiceBundle() {
for (auto &state : beam) {
delete state;
}
}
/// Flag to indicate whether anything was changed.
bool updated;
/// Places to try to fix the word suggested by ambiguity checking.
DANGERR fixpt;
/// The beam. One LanguageModelState containing a list of ViterbiStateEntry
/// per row in the ratings matrix containing all VSEs whose BLOB_CHOICE is
/// somewhere in the corresponding row.
std::vector<LanguageModelState *> beam;
/// Best ViterbiStateEntry and BLOB_CHOICE.
ViterbiStateEntry *best_vse;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_LANGUAGE_MODEL_DEFS_H_
|
2301_81045437/tesseract
|
src/wordrec/lm_state.h
|
C++
|
apache-2.0
| 10,034
|
/******************************************************************************
*
* File: outlines.cpp (Formerly outlines.c)
* Description: Combinatorial Splitter
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "outlines.h"
#include "wordrec.h"
namespace tesseract {
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* near_point
*
* Find the point on a line segment that is closest to a point not on
* the line segment. Return that point in near_pt. Returns whether
* near_pt was newly created.
**********************************************************************/
bool Wordrec::near_point(EDGEPT *point, EDGEPT *line_pt_0, EDGEPT *line_pt_1, EDGEPT **near_pt) {
TPOINT p;
float slope;
float intercept;
float x0 = line_pt_0->pos.x;
float x1 = line_pt_1->pos.x;
float y0 = line_pt_0->pos.y;
float y1 = line_pt_1->pos.y;
if (x0 == x1) {
/* Handle vertical line */
p.x = static_cast<int16_t>(x0);
p.y = point->pos.y;
} else {
/* Slope and intercept */
slope = (y0 - y1) / (x0 - x1);
intercept = y1 - x1 * slope;
/* Find perpendicular */
p.x = static_cast<int16_t>((point->pos.x + (point->pos.y - intercept) * slope) /
(slope * slope + 1));
p.y = static_cast<int16_t>(slope * p.x + intercept);
}
if (is_on_line(p, line_pt_0->pos, line_pt_1->pos) && (!same_point(p, line_pt_0->pos)) &&
(!same_point(p, line_pt_1->pos))) {
/* Intersection on line */
*near_pt = make_edgept(p.x, p.y, line_pt_1, line_pt_0);
return true;
} else { /* Intersection not on line */
*near_pt = closest(point, line_pt_0, line_pt_1);
return false;
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/outlines.cpp
|
C++
|
apache-2.0
| 2,765
|
/******************************************************************************
*
* File: outlines.h
* Description: Combinatorial Splitter
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef OUTLINES_H
#define OUTLINES_H
#include <cmath> // for abs
#include "blobs.h" // for TPOINT
#include "params.h" // for IntParam
#include "wordrec.h" // for Wordrec
/*----------------------------------------------------------------------
C o n s t a n t s
----------------------------------------------------------------------*/
#define LARGE_DISTANCE 100000 /* Used for closest dist */
#define MIN_BLOB_SIZE 10 /* Big units */
#define MAX_ASPECT_RATIO 2.5 /* Widest character */
/*----------------------------------------------------------------------
M a c r o s
----------------------------------------------------------------------*/
/**********************************************************************
* same_point
*
* Return true if the point values are the same. The parameters must
* be of type POINT.
**********************************************************************/
#define same_point(p1, p2) \
((abs(p1.x - p2.x) < chop_same_distance) && (abs(p1.y - p2.y) < chop_same_distance))
/**********************************************************************
* dist_square
*
* Return the square of the distance between these two points. The
* parameters must be of type POINT.
**********************************************************************/
#define dist_square(p1, p2) ((p2.x - p1.x) * (p2.x - p1.x) + (p2.y - p1.y) * (p2.y - p1.y))
/**********************************************************************
* closest
*
* The expression provides the EDGEPT that is closest to the point in
* question. All three parameters must be of type EDGEPT.
**********************************************************************/
#define closest(test_p, p1, p2) \
(p1 ? (p2 ? ((dist_square(test_p->pos, p1->pos) < dist_square(test_p->pos, p2->pos)) ? p1 : p2) \
: p1) \
: p2)
/**********************************************************************
* edgept_dist
*
* Return the distance (squared) between the two edge points.
**********************************************************************/
#define edgept_dist(p1, p2) (dist_square((p1)->pos, (p2)->pos))
/**********************************************************************
* is_exterior_point
*
* Return true if the point supplied is an exterior projection from the
* outline.
**********************************************************************/
#define is_exterior_point(edge, point) \
(same_point(edge->prev->pos, point->pos) || same_point(edge->next->pos, point->pos) || \
(angle_change(edge->prev, edge, edge->next) - angle_change(edge->prev, edge, point) > 20))
/**********************************************************************
* is_equal
*
* Return true if the POINTs are equal.
**********************************************************************/
#define is_equal(p1, p2) (((p1).x == (p2).x) && ((p1).y == (p2).y))
/**********************************************************************
* is_on_line
*
* Return true if the point is on the line segment between the two end
* points. The two end points are included as part of the line. The
* parameters must be of type POINT.
**********************************************************************/
#define is_on_line(p, p0, p1) \
(within_range((p).x, (p0).x, (p1).x) && within_range((p).y, (p0).y, (p1).y))
/**********************************************************************
* within_range
*
* Return true if the first number is in between the second two numbers.
* Return false otherwise.
**********************************************************************/
#define within_range(x, x0, x1) (((x0 <= x) && (x <= x1)) || ((x1 <= x) && (x <= x0)))
#endif
|
2301_81045437/tesseract
|
src/wordrec/outlines.h
|
C
|
apache-2.0
| 4,812
|
///////////////////////////////////////////////////////////////////////
// File: params_model.cpp
// Description: Trained language model parameters.
// Author: David Eger
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "params_model.h"
#include <cctype>
#include <cmath>
#include <cstdio>
#include "bitvector.h"
#include "helpers.h" // for ClipToRange
#include "serialis.h" // for TFile
#include "tprintf.h"
namespace tesseract {
// Scale factor to apply to params model scores.
static const float kScoreScaleFactor = 100.0f;
// Minimum cost result to return.
static const float kMinFinalCost = 0.001f;
// Maximum cost result to return.
static const float kMaxFinalCost = 100.0f;
void ParamsModel::Print() {
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) {
tprintf("ParamsModel for pass %d lang %s\n", p, lang_.c_str());
for (unsigned i = 0; i < weights_vec_[p].size(); ++i) {
tprintf("%s = %g\n", kParamsTrainingFeatureTypeName[i], weights_vec_[p][i]);
}
}
}
void ParamsModel::Copy(const ParamsModel &other_model) {
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) {
weights_vec_[p] = other_model.weights_for_pass(static_cast<PassEnum>(p));
}
}
// Given a (modifiable) line, parse out a key / value pair.
// Return true on success.
bool ParamsModel::ParseLine(char *line, char **key, float *val) {
if (line[0] == '#') {
return false;
}
int end_of_key = 0;
while (line[end_of_key] && !(isascii(line[end_of_key]) && isspace(line[end_of_key]))) {
end_of_key++;
}
if (!line[end_of_key]) {
tprintf("ParamsModel::Incomplete line %s\n", line);
return false;
}
line[end_of_key++] = 0;
*key = line;
if (sscanf(line + end_of_key, " %f", val) != 1) {
return false;
}
return true;
}
// Applies params model weights to the given features.
// Assumes that features is an array of size PTRAIN_NUM_FEATURE_TYPES.
// The cost is set to a number that can be multiplied by the outline length,
// as with the old ratings scheme. This enables words of different length
// and combinations of words to be compared meaningfully.
float ParamsModel::ComputeCost(const float features[]) const {
float unnorm_score = 0.0;
for (int f = 0; f < PTRAIN_NUM_FEATURE_TYPES; ++f) {
unnorm_score += weights_vec_[pass_][f] * features[f];
}
return ClipToRange(-unnorm_score / kScoreScaleFactor, kMinFinalCost, kMaxFinalCost);
}
bool ParamsModel::Equivalent(const ParamsModel &that) const {
float epsilon = 0.0001f;
for (int p = 0; p < PTRAIN_NUM_PASSES; ++p) {
if (weights_vec_[p].size() != that.weights_vec_[p].size()) {
return false;
}
for (unsigned i = 0; i < weights_vec_[p].size(); i++) {
if (weights_vec_[p][i] != that.weights_vec_[p][i] &&
std::fabs(weights_vec_[p][i] - that.weights_vec_[p][i]) > epsilon) {
return false;
}
}
}
return true;
}
bool ParamsModel::LoadFromFp(const char *lang, TFile *fp) {
const int kMaxLineSize = 100;
char line[kMaxLineSize];
BitVector present;
present.Init(PTRAIN_NUM_FEATURE_TYPES);
lang_ = lang;
// Load weights for passes with adaption on.
std::vector<float> &weights = weights_vec_[pass_];
weights.clear();
weights.resize(PTRAIN_NUM_FEATURE_TYPES, 0.0f);
while (fp->FGets(line, kMaxLineSize) != nullptr) {
char *key = nullptr;
float value;
if (!ParseLine(line, &key, &value)) {
continue;
}
int idx = ParamsTrainingFeatureByName(key);
if (idx < 0) {
tprintf("ParamsModel::Unknown parameter %s\n", key);
continue;
}
if (!present[idx]) {
present.SetValue(idx, true);
}
weights[idx] = value;
}
bool complete = (present.NumSetBits() == PTRAIN_NUM_FEATURE_TYPES);
if (!complete) {
for (int i = 0; i < PTRAIN_NUM_FEATURE_TYPES; i++) {
if (!present[i]) {
tprintf("Missing field %s.\n", kParamsTrainingFeatureTypeName[i]);
}
}
lang_ = "";
weights.clear();
}
return complete;
}
bool ParamsModel::SaveToFile(const char *full_path) const {
const std::vector<float> &weights = weights_vec_[pass_];
if (weights.size() != PTRAIN_NUM_FEATURE_TYPES) {
tprintf("Refusing to save ParamsModel that has not been initialized.\n");
return false;
}
FILE *fp = fopen(full_path, "wb");
if (!fp) {
tprintf("Could not open %s for writing.\n", full_path);
return false;
}
bool all_good = true;
for (unsigned i = 0; i < weights.size(); i++) {
if (fprintf(fp, "%s %f\n", kParamsTrainingFeatureTypeName[i], weights[i]) < 0) {
all_good = false;
}
}
fclose(fp);
return all_good;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/params_model.cpp
|
C++
|
apache-2.0
| 5,249
|
///////////////////////////////////////////////////////////////////////
// File: params_model.h
// Description: Trained feature serialization for language parameter training.
// Author: David Eger
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_PARAMS_MODEL_H_
#define TESSERACT_WORDREC_PARAMS_MODEL_H_
#include <tesseract/export.h> // for TESS_API
#include "params_training_featdef.h" // for PTRAIN_NUM_FEATURE_TYPES
namespace tesseract {
class TFile;
// Represents the learned weights for a given language.
class TESS_API ParamsModel {
public:
// Enum for expressing OCR pass.
enum PassEnum {
PTRAIN_PASS1,
PTRAIN_PASS2,
PTRAIN_NUM_PASSES
};
ParamsModel() : pass_(PTRAIN_PASS1) {}
ParamsModel(const char *lang, const std::vector<float> &weights)
: lang_(lang), pass_(PTRAIN_PASS1) {
weights_vec_[pass_] = weights;
}
inline bool Initialized() {
return weights_vec_[pass_].size() == PTRAIN_NUM_FEATURE_TYPES;
}
// Prints out feature weights.
void Print();
// Clears weights for all passes.
void Clear() {
for (auto &p : weights_vec_) {
p.clear();
}
}
// Copies the weights of the given params model.
void Copy(const ParamsModel &other_model);
// Applies params model weights to the given features.
// Assumes that features is an array of size PTRAIN_NUM_FEATURE_TYPES.
float ComputeCost(const float features[]) const;
bool Equivalent(const ParamsModel &that) const;
// Returns true on success.
bool SaveToFile(const char *full_path) const;
// Returns true on success.
bool LoadFromFp(const char *lang, TFile *fp);
const std::vector<float> &weights() const {
return weights_vec_[pass_];
}
const std::vector<float> &weights_for_pass(PassEnum pass) const {
return weights_vec_[pass];
}
void SetPass(PassEnum pass) {
pass_ = pass;
}
private:
bool ParseLine(char *line, char **key, float *val);
std::string lang_;
// Set to the current pass type and used to determine which set of weights
// should be used for ComputeCost() and other functions.
PassEnum pass_;
// Several sets of weights for various OCR passes (e.g. pass1 with adaption,
// pass2 without adaption, etc).
std::vector<float> weights_vec_[PTRAIN_NUM_PASSES];
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_PARAMS_MODEL_H_
|
2301_81045437/tesseract
|
src/wordrec/params_model.h
|
C++
|
apache-2.0
| 2,986
|
/******************************************************************************
*
* File: pieces.cpp
* Description:
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "blobs.h"
#include "helpers.h"
#include "matrix.h"
#include "ratngs.h"
#include "seam.h"
#include "wordrec.h"
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
using tesseract::ScoredFont;
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* classify_piece
*
* Create a larger piece from a collection of smaller ones. Classify
* it and return the results. Take the large piece apart to leave
* the collection of small pieces un modified.
**********************************************************************/
namespace tesseract {
BLOB_CHOICE_LIST *Wordrec::classify_piece(const std::vector<SEAM *> &seams, int16_t start,
int16_t end, const char *description, TWERD *word,
BlamerBundle *blamer_bundle) {
if (end > start) {
SEAM::JoinPieces(seams, word->blobs, start, end);
}
BLOB_CHOICE_LIST *choices =
classify_blob(word->blobs[start], description, ScrollView::WHITE, blamer_bundle);
// Set the matrix_cell_ entries in all the BLOB_CHOICES.
BLOB_CHOICE_IT bc_it(choices);
for (bc_it.mark_cycle_pt(); !bc_it.cycled_list(); bc_it.forward()) {
bc_it.data()->set_matrix_cell(start, end);
}
if (end > start) {
SEAM::BreakPieces(seams, word->blobs, start, end);
}
return (choices);
}
template <class BLOB_CHOICE>
int SortByUnicharID(const void *void1, const void *void2) {
const BLOB_CHOICE *p1 = *static_cast<const BLOB_CHOICE *const *>(void1);
const BLOB_CHOICE *p2 = *static_cast<const BLOB_CHOICE *const *>(void2);
return p1->unichar_id() - p2->unichar_id();
}
template <class BLOB_CHOICE>
int SortByRating(const void *void1, const void *void2) {
const BLOB_CHOICE *p1 = *static_cast<const BLOB_CHOICE *const *>(void1);
const BLOB_CHOICE *p2 = *static_cast<const BLOB_CHOICE *const *>(void2);
if (p1->rating() < p2->rating()) {
return 1;
}
return -1;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/pieces.cpp
|
C++
|
apache-2.0
| 3,256
|
/******************************************************************************
*
* File: plotedges.cpp (Formerly plotedges.c)
* Description: Graphics routines for "Edges" and "Outlines" windows
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "plotedges.h"
#include "render.h"
#include "split.h"
#ifndef GRAPHICS_DISABLED
namespace tesseract {
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
ScrollView *edge_window = nullptr;
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
/**********************************************************************
* display_edgepts
*
* Macro to display edge points in a window.
**********************************************************************/
void display_edgepts(LIST outlines) {
/* Set up window */
if (edge_window == nullptr) {
edge_window = new ScrollView("Edges", 750, 150, 400, 128, 800, 256, true);
} else {
edge_window->Clear();
}
/* Render the outlines */
auto window = edge_window;
/* Reclaim old memory */
iterate(outlines) {
render_edgepts(window, reinterpret_cast<EDGEPT *>(outlines->first_node()), ScrollView::WHITE);
}
}
/**********************************************************************
* draw_blob_edges
*
* Display the edges of this blob in the edges window.
**********************************************************************/
void draw_blob_edges(TBLOB *blob) {
if (wordrec_display_splits) {
LIST edge_list = NIL_LIST;
for (TESSLINE *ol = blob->outlines; ol != nullptr; ol = ol->next) {
edge_list = push(edge_list, ol->loop);
}
display_edgepts(edge_list);
destroy(edge_list);
}
}
/**********************************************************************
* mark_outline
*
* Make a mark on the edges window at a particular location.
**********************************************************************/
void mark_outline(EDGEPT *edgept) { /* Start of point list */
auto window = edge_window;
float x = edgept->pos.x;
float y = edgept->pos.y;
window->Pen(ScrollView::RED);
window->SetCursor(x, y);
x -= 4;
y -= 12;
window->DrawTo(x, y);
x -= 2;
y += 4;
window->DrawTo(x, y);
x -= 4;
y += 2;
window->DrawTo(x, y);
x += 10;
y += 6;
window->DrawTo(x, y);
window->Update();
}
} // namespace tesseract
#endif // !GRAPHICS_DISABLED
|
2301_81045437/tesseract
|
src/wordrec/plotedges.cpp
|
C++
|
apache-2.0
| 3,413
|
/******************************************************************************
*
* File: plotedges.h
* Description: Convert the various data type into line lists
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef PLOTEDGES_H
#define PLOTEDGES_H
#include "oldlist.h" // for LIST
namespace tesseract {
class ScrollView;
struct EDGEPT;
struct TBLOB;
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
extern ScrollView *edge_window; /* Window for edges */
/*----------------------------------------------------------------------
F u n c t i o n s
---------------------------------------------------------------------*/
void display_edgepts(LIST outlines);
void draw_blob_edges(TBLOB *blob);
void mark_outline(EDGEPT *edgept);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/wordrec/plotedges.h
|
C++
|
apache-2.0
| 1,610
|
/******************************************************************************
*
* File: render.cpp (Formerly render.c)
* Description: Convert the various data type into line lists
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "render.h"
#include "blobs.h"
#include <cmath>
namespace tesseract {
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
ScrollView *blob_window = nullptr;
ScrollView::Color color_list[] = {ScrollView::RED, ScrollView::CYAN, ScrollView::YELLOW,
ScrollView::BLUE, ScrollView::GREEN, ScrollView::WHITE};
BOOL_VAR(wordrec_display_all_blobs, 0, "Display Blobs");
BOOL_VAR(wordrec_blob_pause, 0, "Blob pause");
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**********************************************************************
* display_blob
*
* Macro to display blob in a window.
**********************************************************************/
void display_blob(TBLOB *blob, ScrollView::Color color) {
/* Size of drawable */
if (blob_window == nullptr) {
blob_window = new ScrollView("Blobs", 520, 10, 500, 256, 2000, 256, true);
} else {
blob_window->Clear();
}
render_blob(blob_window, blob, color);
}
/**********************************************************************
* render_blob
*
* Create a list of line segments that represent the expanded outline
* that was supplied as input.
**********************************************************************/
void render_blob(ScrollView *window, TBLOB *blob, ScrollView::Color color) {
/* No outline */
if (!blob) {
return;
}
render_outline(window, blob->outlines, color);
}
/**********************************************************************
* render_edgepts
*
* Create a list of line segments that represent the expanded outline
* that was supplied as input.
**********************************************************************/
void render_edgepts(ScrollView *window, EDGEPT *edgept, ScrollView::Color color) {
if (!edgept) {
return;
}
float x = edgept->pos.x;
float y = edgept->pos.y;
EDGEPT *this_edge = edgept;
window->Pen(color);
window->SetCursor(x, y);
do {
this_edge = this_edge->next;
x = this_edge->pos.x;
y = this_edge->pos.y;
window->DrawTo(x, y);
} while (edgept != this_edge);
}
/**********************************************************************
* render_outline
*
* Create a list of line segments that represent the expanded outline
* that was supplied as input.
**********************************************************************/
void render_outline(ScrollView *window, TESSLINE *outline, ScrollView::Color color) {
/* No outline */
if (!outline) {
return;
}
/* Draw Compact outline */
if (outline->loop) {
render_edgepts(window, outline->loop, color);
}
/* Add on next outlines */
render_outline(window, outline->next, color);
}
#endif // !GRAPHICS_DISABLED
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/render.cpp
|
C++
|
apache-2.0
| 4,094
|
/******************************************************************************
*
* File: render.h
* Description: Convert the various data type into line lists
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1989, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef RENDER_H
#define RENDER_H
#include "params.h" // for BOOL_VAR_H, BoolParam
#include "scrollview.h" // ScrollView
namespace tesseract {
struct EDGEPT;
struct TBLOB;
struct TESSLINE;
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
extern ScrollView *blob_window; // Window for blobs
extern ScrollView::Color color_list[]; // Colors for outlines
extern BOOL_VAR_H(wordrec_display_all_blobs);
extern BOOL_VAR_H(wordrec_blob_pause);
#define NUM_COLORS 6
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
void display_blob(TBLOB *blob, ScrollView::Color color);
void render_blob(ScrollView *window, TBLOB *blob, ScrollView::Color color);
void render_edgepts(ScrollView *window, EDGEPT *edgept, ScrollView::Color color);
void render_outline(ScrollView *window, TESSLINE *outline, ScrollView::Color color);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/wordrec/render.h
|
C++
|
apache-2.0
| 2,026
|
///////////////////////////////////////////////////////////////////////
// File: segsearch.cpp
// Description: Segmentation search functions.
// Author: Daria Antonova
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <cstdint> // for INT32_MAX
#include "blamer.h" // for BlamerBundle
#include "errcode.h" // for ASSERT_HOST
#include "lm_pain_points.h" // for LMPainPoints, LM_PPTYPE_SHAPE, LMPainPoi...
#include "lm_state.h" // for BestChoiceBundle, ViterbiStateEntry
#include "matrix.h" // for MATRIX_COORD, MATRIX
#include "pageres.h" // for WERD_RES
#include "params.h" // for BoolParam, IntParam, DoubleParam
#include "ratngs.h" // for BLOB_CHOICE_LIST, BLOB_CHOICE_IT
#include "tprintf.h" // for tprintf
#include "wordrec.h" // for Wordrec, SegSearchPending (ptr only)
namespace tesseract {
void Wordrec::SegSearch(WERD_RES *word_res, BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
LMPainPoints pain_points(segsearch_max_pain_points, segsearch_max_char_wh_ratio,
assume_fixed_pitch_char_segment, &getDict(), segsearch_debug_level);
// Compute scaling factor that will help us recover blob outline length
// from classifier rating and certainty for the blob.
float rating_cert_scale = -1.0 * getDict().certainty_scale / rating_scale;
std::vector<SegSearchPending> pending;
InitialSegSearch(word_res, &pain_points, &pending, best_choice_bundle, blamer_bundle);
if (!SegSearchDone(0)) { // find a better choice
if (chop_enable && word_res->chopped_word != nullptr) {
improve_by_chopping(rating_cert_scale, word_res, best_choice_bundle, blamer_bundle,
&pain_points, &pending);
}
if (chop_debug) {
SEAM::PrintSeams("Final seam list:", word_res->seam_array);
}
if (blamer_bundle != nullptr && !blamer_bundle->ChoiceIsCorrect(word_res->best_choice)) {
blamer_bundle->SetChopperBlame(word_res, wordrec_debug_blamer);
}
}
// Keep trying to find a better path by fixing the "pain points".
MATRIX_COORD pain_point;
float pain_point_priority;
int num_futile_classifications = 0;
std::string blamer_debug;
while (wordrec_enable_assoc &&
(!SegSearchDone(num_futile_classifications) ||
(blamer_bundle != nullptr && blamer_bundle->GuidedSegsearchStillGoing()))) {
// Get the next valid "pain point".
bool found_nothing = true;
LMPainPointsType pp_type;
while ((pp_type = pain_points.Deque(&pain_point, &pain_point_priority)) != LM_PPTYPE_NUM) {
if (!pain_point.Valid(*word_res->ratings)) {
word_res->ratings->IncreaseBandSize(pain_point.row - pain_point.col + 1);
}
if (pain_point.Valid(*word_res->ratings) &&
!word_res->ratings->Classified(pain_point.col, pain_point.row, getDict().WildcardID())) {
found_nothing = false;
break;
}
}
if (found_nothing) {
if (segsearch_debug_level > 0) {
tprintf("Pain points queue is empty\n");
}
break;
}
ProcessSegSearchPainPoint(pain_point_priority, pain_point,
LMPainPoints::PainPointDescription(pp_type), &pending, word_res,
&pain_points, blamer_bundle);
UpdateSegSearchNodes(rating_cert_scale, pain_point.col, &pending, word_res, &pain_points,
best_choice_bundle, blamer_bundle);
if (!best_choice_bundle->updated) {
++num_futile_classifications;
}
if (segsearch_debug_level > 0) {
tprintf("num_futile_classifications %d\n", num_futile_classifications);
}
best_choice_bundle->updated = false; // reset updated
// See if it's time to terminate SegSearch or time for starting a guided
// search for the true path to find the blame for the incorrect best_choice.
if (SegSearchDone(num_futile_classifications) && blamer_bundle != nullptr &&
blamer_bundle->GuidedSegsearchNeeded(word_res->best_choice)) {
InitBlamerForSegSearch(word_res, &pain_points, blamer_bundle, blamer_debug);
}
} // end while loop exploring alternative paths
if (blamer_bundle != nullptr) {
blamer_bundle->FinishSegSearch(word_res->best_choice, wordrec_debug_blamer, blamer_debug);
}
if (segsearch_debug_level > 0) {
tprintf("Done with SegSearch (AcceptableChoiceFound: %d)\n",
language_model_->AcceptableChoiceFound());
}
}
// Setup and run just the initial segsearch on an established matrix,
// without doing any additional chopping or joining.
// (Internal factored version that can be used as part of the main SegSearch.)
void Wordrec::InitialSegSearch(WERD_RES *word_res, LMPainPoints *pain_points,
std::vector<SegSearchPending> *pending,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle) {
if (segsearch_debug_level > 0) {
tprintf("Starting SegSearch on ratings matrix%s:\n",
wordrec_enable_assoc ? " (with assoc)" : "");
word_res->ratings->print(getDict().getUnicharset());
}
pain_points->GenerateInitial(word_res);
// Compute scaling factor that will help us recover blob outline length
// from classifier rating and certainty for the blob.
float rating_cert_scale = -1.0 * getDict().certainty_scale / rating_scale;
language_model_->InitForWord(prev_word_best_choice_, assume_fixed_pitch_char_segment,
segsearch_max_char_wh_ratio, rating_cert_scale);
// Initialize blamer-related information: map character boxes recorded in
// blamer_bundle->norm_truth_word to the corresponding i,j indices in the
// ratings matrix. We expect this step to succeed, since when running the
// chopper we checked that the correct chops are present.
if (blamer_bundle != nullptr) {
blamer_bundle->SetupCorrectSegmentation(word_res->chopped_word, wordrec_debug_blamer);
}
// pending[col] tells whether there is update work to do to combine
// best_choice_bundle->beam[col - 1] with some BLOB_CHOICEs in matrix[col, *].
// As the language model state is updated, pending entries are modified to
// minimize duplication of work. It is important that during the update the
// children are considered in the non-decreasing order of their column, since
// this guarantees that all the parents would be up to date before an update
// of a child is done.
pending->clear();
pending->resize(word_res->ratings->dimension(), SegSearchPending());
// Search the ratings matrix for the initial best path.
(*pending)[0].SetColumnClassified();
UpdateSegSearchNodes(rating_cert_scale, 0, pending, word_res, pain_points, best_choice_bundle,
blamer_bundle);
}
void Wordrec::UpdateSegSearchNodes(float rating_cert_scale, int starting_col,
std::vector<SegSearchPending> *pending, WERD_RES *word_res,
LMPainPoints *pain_points, BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle) {
MATRIX *ratings = word_res->ratings;
ASSERT_HOST(static_cast<unsigned>(ratings->dimension()) == pending->size());
ASSERT_HOST(static_cast<unsigned>(ratings->dimension()) == best_choice_bundle->beam.size());
for (int col = starting_col; col < ratings->dimension(); ++col) {
if (!(*pending)[col].WorkToDo()) {
continue;
}
int first_row = col;
int last_row = std::min(ratings->dimension() - 1, col + ratings->bandwidth() - 1);
if ((*pending)[col].SingleRow() >= 0) {
first_row = last_row = (*pending)[col].SingleRow();
}
if (segsearch_debug_level > 0) {
tprintf("\n\nUpdateSegSearchNodes: col=%d, rows=[%d,%d], alljust=%d\n", col, first_row,
last_row, (*pending)[col].IsRowJustClassified(INT32_MAX));
}
// Iterate over the pending list for this column.
for (int row = first_row; row <= last_row; ++row) {
// Update language model state of this child+parent pair.
BLOB_CHOICE_LIST *current_node = ratings->get(col, row);
LanguageModelState *parent_node = col == 0 ? nullptr : best_choice_bundle->beam[col - 1];
if (current_node != nullptr &&
language_model_->UpdateState((*pending)[col].IsRowJustClassified(row), col, row,
current_node, parent_node, pain_points, word_res,
best_choice_bundle, blamer_bundle) &&
row + 1 < ratings->dimension()) {
// Since the language model state of this entry changed, process all
// the child column.
(*pending)[row + 1].RevisitWholeColumn();
if (segsearch_debug_level > 0) {
tprintf("Added child col=%d to pending\n", row + 1);
}
} // end if UpdateState.
} // end for row.
} // end for col.
if (best_choice_bundle->best_vse != nullptr) {
ASSERT_HOST(word_res->StatesAllValid());
if (best_choice_bundle->best_vse->updated) {
pain_points->GenerateFromPath(rating_cert_scale, best_choice_bundle->best_vse, word_res);
if (!best_choice_bundle->fixpt.empty()) {
pain_points->GenerateFromAmbigs(best_choice_bundle->fixpt, best_choice_bundle->best_vse,
word_res);
}
}
}
// The segsearch is completed. Reset all updated flags on all VSEs and reset
// all pendings.
for (unsigned col = 0; col < pending->size(); ++col) {
(*pending)[col].Clear();
ViterbiStateEntry_IT vse_it(&best_choice_bundle->beam[col]->viterbi_state_entries);
for (vse_it.mark_cycle_pt(); !vse_it.cycled_list(); vse_it.forward()) {
vse_it.data()->updated = false;
}
}
}
void Wordrec::ProcessSegSearchPainPoint(float pain_point_priority, const MATRIX_COORD &pain_point,
const char *pain_point_type,
std::vector<SegSearchPending> *pending,
WERD_RES *word_res, LMPainPoints *pain_points,
BlamerBundle *blamer_bundle) {
if (segsearch_debug_level > 0) {
tprintf("Classifying pain point %s priority=%.4f, col=%d, row=%d\n", pain_point_type,
pain_point_priority, pain_point.col, pain_point.row);
}
ASSERT_HOST(pain_points != nullptr);
MATRIX *ratings = word_res->ratings;
// Classify blob [pain_point.col pain_point.row]
if (!pain_point.Valid(*ratings)) {
ratings->IncreaseBandSize(pain_point.row + 1 - pain_point.col);
}
ASSERT_HOST(pain_point.Valid(*ratings));
BLOB_CHOICE_LIST *classified =
classify_piece(word_res->seam_array, pain_point.col, pain_point.row, pain_point_type,
word_res->chopped_word, blamer_bundle);
BLOB_CHOICE_LIST *lst = ratings->get(pain_point.col, pain_point.row);
if (lst == nullptr) {
ratings->put(pain_point.col, pain_point.row, classified);
} else {
// We cannot delete old BLOB_CHOICEs, since they might contain
// ViterbiStateEntries that are parents of other "active" entries.
// Thus if the matrix cell already contains classifications we add
// the new ones to the beginning of the list.
BLOB_CHOICE_IT it(lst);
it.add_list_before(classified);
delete classified; // safe to delete, since empty after add_list_before()
classified = nullptr;
}
if (segsearch_debug_level > 0) {
print_ratings_list("Updated ratings matrix with a new entry:",
ratings->get(pain_point.col, pain_point.row), getDict().getUnicharset());
ratings->print(getDict().getUnicharset());
}
// Insert initial "pain points" to join the newly classified blob
// with its left and right neighbors.
if (classified != nullptr && !classified->empty()) {
if (pain_point.col > 0) {
pain_points->GeneratePainPoint(pain_point.col - 1, pain_point.row, LM_PPTYPE_SHAPE, 0.0, true,
segsearch_max_char_wh_ratio, word_res);
}
if (pain_point.row + 1 < ratings->dimension()) {
pain_points->GeneratePainPoint(pain_point.col, pain_point.row + 1, LM_PPTYPE_SHAPE, 0.0, true,
segsearch_max_char_wh_ratio, word_res);
}
}
(*pending)[pain_point.col].SetBlobClassified(pain_point.row);
}
// Resets enough of the results so that the Viterbi search is re-run.
// Needed when the n-gram model is enabled, as the multi-length comparison
// implementation will re-value existing paths to worse values.
void Wordrec::ResetNGramSearch(WERD_RES *word_res, BestChoiceBundle *best_choice_bundle,
std::vector<SegSearchPending> &pending) {
// TODO(rays) More refactoring required here.
// Delete existing viterbi states.
for (auto &col : best_choice_bundle->beam) {
col->Clear();
}
// Reset best_choice_bundle.
word_res->ClearWordChoices();
best_choice_bundle->best_vse = nullptr;
// Clear out all existing pendings and add a new one for the first column.
pending[0].SetColumnClassified();
for (auto &data : pending) {
data.Clear();
}
}
void Wordrec::InitBlamerForSegSearch(WERD_RES *word_res, LMPainPoints *pain_points,
BlamerBundle *blamer_bundle, std::string &blamer_debug) {
pain_points->Clear(); // Clear pain points heap.
blamer_bundle->InitForSegSearch(word_res->best_choice, word_res->ratings, getDict().WildcardID(),
wordrec_debug_blamer, blamer_debug, pain_points,
segsearch_max_char_wh_ratio, word_res);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/segsearch.cpp
|
C++
|
apache-2.0
| 14,344
|
/**********************************************************************
* File: tface.cpp (Formerly tface.c)
* Description: C side of the Tess/tessedit C/C++ interface.
* Author: Ray Smith
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include <cmath>
#include "wordrec.h"
#ifndef DISABLED_LEGACY_ENGINE
# include "chop.h"
# include "featdefs.h"
# include "pageres.h"
# include "params_model.h"
#endif
namespace tesseract {
/**
* @name program_editup
*
* Initialize all the things in the program that need to be initialized.
* init_permute determines whether to initialize the permute functions
* and Dawg models.
*/
void Wordrec::program_editup(const std::string &textbase, TessdataManager *init_classifier,
TessdataManager *init_dict) {
if (!textbase.empty()) {
imagefile = textbase;
}
#ifndef DISABLED_LEGACY_ENGINE
InitFeatureDefs(&feature_defs_);
InitAdaptiveClassifier(init_classifier);
if (init_dict) {
getDict().SetupForLoad(Dict::GlobalDawgCache());
getDict().Load(lang, init_dict);
getDict().FinishLoad();
}
pass2_ok_split = chop_ok_split;
#endif // ndef DISABLED_LEGACY_ENGINE
}
/**
* @name end_recog
*
* Cleanup and exit the recog program.
*/
int Wordrec::end_recog() {
program_editdown(0);
return (0);
}
/**
* @name program_editdown
*
* This function holds any necessary post processing for the Wise Owl
* program.
*/
void Wordrec::program_editdown(int32_t elapsed_time) {
#ifndef DISABLED_LEGACY_ENGINE
EndAdaptiveClassifier();
#endif // ndef DISABLED_LEGACY_ENGINE
getDict().End();
}
/**
* @name dict_word()
*
* Test the dictionaries, returning NO_PERM (0) if not found, or one
* of the PermuterType values if found, according to the dictionary.
*/
int Wordrec::dict_word(const WERD_CHOICE &word) {
return getDict().valid_word(word);
}
#ifndef DISABLED_LEGACY_ENGINE
/**
* @name set_pass1
*
* Get ready to do some pass 1 stuff.
*/
void Wordrec::set_pass1() {
chop_ok_split.set_value(70.0);
language_model_->getParamsModel().SetPass(ParamsModel::PTRAIN_PASS1);
SettupPass1();
}
/**
* @name set_pass2
*
* Get ready to do some pass 2 stuff.
*/
void Wordrec::set_pass2() {
chop_ok_split.set_value(pass2_ok_split);
language_model_->getParamsModel().SetPass(ParamsModel::PTRAIN_PASS2);
SettupPass2();
}
/**
* @name cc_recog
*
* Recognize a word.
*/
void Wordrec::cc_recog(WERD_RES *word) {
getDict().reset_hyphen_vars(word->word->flag(W_EOL));
chop_word_main(word);
word->DebugWordChoices(getDict().stopper_debug_level >= 1, getDict().word_to_debug.c_str());
ASSERT_HOST(word->StatesAllValid());
}
/**
* @name call_matcher
*
* Called from Tess with a blob in tess form.
* The blob may need rotating to the correct orientation for classification.
*/
BLOB_CHOICE_LIST *Wordrec::call_matcher(TBLOB *tessblob) {
// Rotate the blob for classification if necessary.
TBLOB *rotated_blob = tessblob->ClassifyNormalizeIfNeeded();
if (rotated_blob == nullptr) {
rotated_blob = tessblob;
}
auto *ratings = new BLOB_CHOICE_LIST(); // matcher result
AdaptiveClassifier(rotated_blob, ratings);
if (rotated_blob != tessblob) {
delete rotated_blob;
}
return ratings;
}
#endif // ndef DISABLED_LEGACY_ENGINE
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/tface.cpp
|
C++
|
apache-2.0
| 3,944
|
/******************************************************************************
*
* File: wordclass.cpp (Formerly wordclass.c)
* Description: Word classifier
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1990, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I N C L U D E S
----------------------------------------------------------------------*/
#include "blamer.h" // for blamer_bundle
#include "params.h" // for BoolParam
#include "render.h" // for display_blob, blob_window, wordrec_blob_pause
#include "wordrec.h" // for Wordrec
struct TBLOB;
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
namespace tesseract {
/**
* @name classify_blob
*
* Classify the this blob if it is not already recorded in the match
* table. Attempt to recognize this blob as a character. The recognition
* rating for this blob will be stored as a part of the blob. This value
* will also be returned to the caller.
* @param blob Current blob
* @param string The string to display in ScrollView
* @param color The colour to use when displayed with ScrollView
*/
BLOB_CHOICE_LIST *Wordrec::classify_blob(TBLOB *blob, const char *string, ScrollView::Color color,
BlamerBundle *blamer_bundle) {
#ifndef GRAPHICS_DISABLED
if (wordrec_display_all_blobs) {
display_blob(blob, color);
}
#endif
// TODO(rays) collapse with call_matcher and move all to wordrec.cpp.
BLOB_CHOICE_LIST *choices = call_matcher(blob);
// If a blob with the same bounding box as one of the truth character
// bounding boxes is not classified as the corresponding truth character
// blame character classifier for incorrect answer.
if (blamer_bundle != nullptr) {
blamer_bundle->BlameClassifier(getDict().getUnicharset(), blob->bounding_box(), *choices,
wordrec_debug_blamer);
}
#ifndef GRAPHICS_DISABLED
if (classify_debug_level && string) {
print_ratings_list(string, choices, getDict().getUnicharset());
}
if (wordrec_blob_pause) {
blob_window->Wait();
}
#endif
return choices;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/wordrec/wordclass.cpp
|
C++
|
apache-2.0
| 3,082
|
///////////////////////////////////////////////////////////////////////
// File: wordrec.cpp
// Description: wordrec class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "wordrec.h"
#include <memory>
#ifdef DISABLED_LEGACY_ENGINE
# include "params.h"
namespace tesseract {
Wordrec::Wordrec()
: // control parameters
BOOL_MEMBER(wordrec_debug_blamer, false, "Print blamer debug messages", params())
,
BOOL_MEMBER(wordrec_run_blamer, false, "Try to set the blame for errors", params()) {
prev_word_best_choice_ = nullptr;
}
} // namespace tesseract
#else // DISABLED_LEGACY_ENGINE not defined
# include "language_model.h"
# include "params.h"
namespace tesseract {
Wordrec::Wordrec()
: // control parameters
BOOL_MEMBER(merge_fragments_in_matrix, true,
"Merge the fragments in the ratings matrix and delete them"
" after merging",
params())
, BOOL_MEMBER(wordrec_enable_assoc, true, "Associator Enable", params())
, BOOL_MEMBER(force_word_assoc, false,
"force associator to run regardless of what enable_assoc is."
" This is used for CJK where component grouping is necessary.",
CCUtil::params())
, INT_MEMBER(repair_unchopped_blobs, 1, "Fix blobs that aren't chopped", params())
, double_MEMBER(tessedit_certainty_threshold, -2.25, "Good blob limit", params())
, INT_MEMBER(chop_debug, 0, "Chop debug", params())
, BOOL_MEMBER(chop_enable, 1, "Chop enable", params())
, BOOL_MEMBER(chop_vertical_creep, 0, "Vertical creep", params())
, INT_MEMBER(chop_split_length, 10000, "Split Length", params())
, INT_MEMBER(chop_same_distance, 2, "Same distance", params())
, INT_MEMBER(chop_min_outline_points, 6, "Min Number of Points on Outline", params())
, INT_MEMBER(chop_seam_pile_size, 150, "Max number of seams in seam_pile", params())
, BOOL_MEMBER(chop_new_seam_pile, 1, "Use new seam_pile", params())
, INT_MEMBER(chop_inside_angle, -50, "Min Inside Angle Bend", params())
, INT_MEMBER(chop_min_outline_area, 2000, "Min Outline Area", params())
, double_MEMBER(chop_split_dist_knob, 0.5, "Split length adjustment", params())
, double_MEMBER(chop_overlap_knob, 0.9, "Split overlap adjustment", params())
, double_MEMBER(chop_center_knob, 0.15, "Split center adjustment", params())
, INT_MEMBER(chop_centered_maxwidth, 90,
"Width of (smaller) chopped blobs "
"above which we don't care that a chop is not near the center.",
params())
, double_MEMBER(chop_sharpness_knob, 0.06, "Split sharpness adjustment", params())
, double_MEMBER(chop_width_change_knob, 5.0, "Width change adjustment", params())
, double_MEMBER(chop_ok_split, 100.0, "OK split limit", params())
, double_MEMBER(chop_good_split, 50.0, "Good split limit", params())
, INT_MEMBER(chop_x_y_weight, 3, "X / Y length weight", params())
, BOOL_MEMBER(assume_fixed_pitch_char_segment, false,
"include fixed-pitch heuristics in char segmentation", params())
, INT_MEMBER(wordrec_debug_level, 0, "Debug level for wordrec", params())
, INT_MEMBER(wordrec_max_join_chunks, 4, "Max number of broken pieces to associate", params())
, BOOL_MEMBER(wordrec_skip_no_truth_words, false,
"Only run OCR for words that had truth recorded in BlamerBundle", params())
, BOOL_MEMBER(wordrec_debug_blamer, false, "Print blamer debug messages", params())
, BOOL_MEMBER(wordrec_run_blamer, false, "Try to set the blame for errors", params())
, INT_MEMBER(segsearch_debug_level, 0, "SegSearch debug level", params())
, INT_MEMBER(segsearch_max_pain_points, 2000,
"Maximum number of pain points stored in the queue", params())
, INT_MEMBER(segsearch_max_futile_classifications, 20,
"Maximum number of pain point classifications per chunk that"
" did not result in finding a better word choice.",
params())
, double_MEMBER(segsearch_max_char_wh_ratio, 2.0, "Maximum character width-to-height ratio",
params())
, BOOL_MEMBER(save_alt_choices, true,
"Save alternative paths found during chopping"
" and segmentation search",
params())
, pass2_ok_split(0.0f) {
prev_word_best_choice_ = nullptr;
language_model_ = std::make_unique<LanguageModel>(&get_fontinfo_table(), &(getDict()));
fill_lattice_ = nullptr;
}
} // namespace tesseract
#endif // DISABLED_LEGACY_ENGINE
|
2301_81045437/tesseract
|
src/wordrec/wordrec.cpp
|
C++
|
apache-2.0
| 5,280
|
///////////////////////////////////////////////////////////////////////
// File: wordrec.h
// Description: wordrec class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_WORDREC_H_
#define TESSERACT_WORDREC_WORDREC_H_
#ifdef HAVE_CONFIG_H
# include "config_auto.h" // DISABLED_LEGACY_ENGINE
#endif
#ifdef DISABLED_LEGACY_ENGINE
# include <cstdint> // for int16_t, int32_t
# include "classify.h" // for Classify
# include "params.h" // for INT_VAR_H, IntParam, BOOL_VAR_H, BoolP...
# include "ratngs.h" // for WERD_CHOICE
namespace tesseract {
class TessdataManager;
}
namespace tesseract {
/* ccmain/tstruct.cpp */
class TESS_API Wordrec : public Classify {
public:
// config parameters
BOOL_VAR_H(wordrec_debug_blamer);
BOOL_VAR_H(wordrec_run_blamer);
// methods
Wordrec();
virtual ~Wordrec() = default;
// tface.cpp
void program_editup(const std::string &textbase, TessdataManager *init_classifier,
TessdataManager *init_dict);
void program_editdown(int32_t elapsed_time);
int end_recog();
int dict_word(const WERD_CHOICE &word);
// Member variables
WERD_CHOICE *prev_word_best_choice_;
};
} // namespace tesseract
#else // DISABLED_LEGACY_ENGINE not defined
# include <memory>
# include "associate.h"
# include "chop.h" // for PointHeap, MAX_NUM_POINTS
# include "classify.h" // for Classify
# include "dict.h"
# include "elst.h" // for ELIST_ITERATOR, ELISTIZEH, ELIST_LINK
# include "findseam.h" // for SeamQueue, SeamPile
# include "language_model.h"
# include "matrix.h"
# include "oldlist.h" // for LIST
# include "params.h" // for INT_VAR_H, IntParam, BOOL_VAR_H, BoolP...
# include "points.h" // for ICOORD
# include "ratngs.h" // for BLOB_CHOICE_LIST (ptr only), BLOB_CHOI...
# include "seam.h" // for SEAM (ptr only), PRIORITY
# include "stopper.h" // for DANGERR
# include <cstdint> // for int16_t, int32_t
namespace tesseract {
class EDGEPT_CLIST;
class MATRIX;
class TBOX;
class UNICHARSET;
class WERD_RES;
class LMPainPoints;
class TessdataManager;
struct BestChoiceBundle;
struct BlamerBundle;
struct EDGEPT;
struct MATRIX_COORD;
struct SPLIT;
struct TBLOB;
struct TESSLINE;
struct TWERD;
// A class for storing which nodes are to be processed by the segmentation
// search. There is a single SegSearchPending for each column in the ratings
// matrix, and it indicates whether the segsearch should combine all
// BLOB_CHOICES in the column, or just the given row with the parents
// corresponding to *this SegSearchPending, and whether only updated parent
// ViterbiStateEntries should be combined, or all, with the BLOB_CHOICEs.
class SegSearchPending {
public:
SegSearchPending()
: classified_row_(-1), revisit_whole_column_(false), column_classified_(false) {}
// Marks the whole column as just classified. Used to start a search on
// a newly initialized ratings matrix.
void SetColumnClassified() {
column_classified_ = true;
}
// Marks the matrix entry at the given row as just classified.
// Used after classifying a new matrix cell.
// Additional to, not overriding a previous RevisitWholeColumn.
void SetBlobClassified(int row) {
classified_row_ = row;
}
// Marks the whole column as needing work, but not just classified.
// Used when the parent vse list is updated.
// Additional to, not overriding a previous SetBlobClassified.
void RevisitWholeColumn() {
revisit_whole_column_ = true;
}
// Clears *this to indicate no work to do.
void Clear() {
classified_row_ = -1;
revisit_whole_column_ = false;
column_classified_ = false;
}
// Returns true if there are updates to do in the column that *this
// represents.
bool WorkToDo() const {
return revisit_whole_column_ || column_classified_ || classified_row_ >= 0;
}
// Returns true if the given row was just classified.
bool IsRowJustClassified(int row) const {
return row == classified_row_ || column_classified_;
}
// Returns the single row to process if there is only one, otherwise -1.
int SingleRow() const {
return revisit_whole_column_ || column_classified_ ? -1 : classified_row_;
}
private:
// If non-negative, indicates the single row in the ratings matrix that has
// just been classified, and so should be combined with all the parents in the
// column that this SegSearchPending represents.
// Operates independently of revisit_whole_column.
int classified_row_;
// If revisit_whole_column is true, then all BLOB_CHOICEs in this column will
// be processed, but classified_row can indicate a row that is newly
// classified. Overridden if column_classified is true.
bool revisit_whole_column_;
// If column_classified is true, parent vses are processed with all rows
// regardless of whether they are just updated, overriding
// revisit_whole_column and classified_row.
bool column_classified_;
};
/* ccmain/tstruct.cpp *********************************************************/
class FRAGMENT : public ELIST_LINK {
public:
FRAGMENT() { // constructor
}
FRAGMENT(EDGEPT *head_pt, // start
EDGEPT *tail_pt); // end
ICOORD head; // coords of start
ICOORD tail; // coords of end
EDGEPT *headpt; // start point
EDGEPT *tailpt; // end point
};
ELISTIZEH(FRAGMENT)
class TESS_API Wordrec : public Classify {
public:
// config parameters *******************************************************
BOOL_VAR_H(merge_fragments_in_matrix);
BOOL_VAR_H(wordrec_enable_assoc);
BOOL_VAR_H(force_word_assoc);
INT_VAR_H(repair_unchopped_blobs);
double_VAR_H(tessedit_certainty_threshold);
INT_VAR_H(chop_debug);
BOOL_VAR_H(chop_enable);
BOOL_VAR_H(chop_vertical_creep);
INT_VAR_H(chop_split_length);
INT_VAR_H(chop_same_distance);
INT_VAR_H(chop_min_outline_points);
INT_VAR_H(chop_seam_pile_size);
BOOL_VAR_H(chop_new_seam_pile);
INT_VAR_H(chop_inside_angle);
INT_VAR_H(chop_min_outline_area);
double_VAR_H(chop_split_dist_knob);
double_VAR_H(chop_overlap_knob);
double_VAR_H(chop_center_knob);
INT_VAR_H(chop_centered_maxwidth);
double_VAR_H(chop_sharpness_knob);
double_VAR_H(chop_width_change_knob);
double_VAR_H(chop_ok_split);
double_VAR_H(chop_good_split);
INT_VAR_H(chop_x_y_weight);
BOOL_VAR_H(assume_fixed_pitch_char_segment);
INT_VAR_H(wordrec_debug_level);
INT_VAR_H(wordrec_max_join_chunks);
BOOL_VAR_H(wordrec_skip_no_truth_words);
BOOL_VAR_H(wordrec_debug_blamer);
BOOL_VAR_H(wordrec_run_blamer);
INT_VAR_H(segsearch_debug_level);
INT_VAR_H(segsearch_max_pain_points);
INT_VAR_H(segsearch_max_futile_classifications);
double_VAR_H(segsearch_max_char_wh_ratio);
BOOL_VAR_H(save_alt_choices);
// methods from wordrec/*.cpp ***********************************************
Wordrec();
~Wordrec() override = default;
// Fills word->alt_choices with alternative paths found during
// chopping/segmentation search that are kept in best_choices.
void SaveAltChoices(const LIST &best_choices, WERD_RES *word);
// Fills character choice lattice in the given BlamerBundle
// using the given ratings matrix and best choice list.
void FillLattice(const MATRIX &ratings, const WERD_CHOICE_LIST &best_choices,
const UNICHARSET &unicharset, BlamerBundle *blamer_bundle);
// Calls fill_lattice_ member function
// (assumes that fill_lattice_ is not nullptr).
void CallFillLattice(const MATRIX &ratings, const WERD_CHOICE_LIST &best_choices,
const UNICHARSET &unicharset, BlamerBundle *blamer_bundle) {
(this->*fill_lattice_)(ratings, best_choices, unicharset, blamer_bundle);
}
// tface.cpp
void program_editup(const std::string &textbase, TessdataManager *init_classifier,
TessdataManager *init_dict);
void cc_recog(WERD_RES *word);
void program_editdown(int32_t elapsed_time);
void set_pass1();
void set_pass2();
int end_recog();
BLOB_CHOICE_LIST *call_matcher(TBLOB *blob);
int dict_word(const WERD_CHOICE &word);
// wordclass.cpp
BLOB_CHOICE_LIST *classify_blob(TBLOB *blob, const char *string, ScrollView::Color color,
BlamerBundle *blamer_bundle);
// segsearch.cpp
// SegSearch works on the lower diagonal matrix of BLOB_CHOICE_LISTs.
// Each entry in the matrix represents the classification choice
// for a chunk, i.e. an entry in row 2, column 1 represents the list
// of ratings for the chunks 1 and 2 classified as a single blob.
// The entries on the diagonal of the matrix are classifier choice lists
// for a single chunk from the maximal segmentation.
//
// The ratings matrix given to SegSearch represents the segmentation
// graph / trellis for the current word. The nodes in the graph are the
// individual BLOB_CHOICEs in each of the BLOB_CHOICE_LISTs in the ratings
// matrix. The children of each node (nodes connected by outgoing links)
// are the entries in the column that is equal to node's row+1. The parents
// (nodes connected by the incoming links) are the entries in the row that
// is equal to the node's column-1. Here is an example ratings matrix:
//
// 0 1 2 3 4
// -------------------------
// 0| c,( |
// 1| d l,1 |
// 2| o |
// 3| c,( |
// 4| g,y l,1 |
// -------------------------
//
// In the example above node "o" has children (outgoing connection to nodes)
// "c","(","g","y" and parents (incoming connections from nodes) "l","1","d".
//
// The objective of the search is to find the least cost path, where the cost
// is determined by the language model components and the properties of the
// cut between the blobs on the path. SegSearch starts by populating the
// matrix with the all the entries that were classified by the chopper and
// finding the initial best path. Based on the classifier ratings, language
// model scores and the properties of each cut, a list of "pain points" is
// constructed - those are the points on the path where the choices do not
// look consistent with the neighboring choices, the cuts look particularly
// problematic, or the certainties of the blobs are low. The most troublesome
// "pain point" is picked from the list and the new entry in the ratings
// matrix corresponding to this "pain point" is filled in. Then the language
// model state is updated to reflect the new classification and the new
// "pain points" are added to the list and the next most troublesome
// "pain point" is determined. This continues until either the word choice
// composed from the best paths in the segmentation graph is "good enough"
// (e.g. above a certain certainty threshold, is an unambiguous dictionary
// word, etc) or there are no more "pain points" to explore.
//
// If associate_blobs is set to false no new classifications will be done
// to combine blobs. Segmentation search will run only one "iteration"
// on the classifications already recorded in chunks_record.ratings.
//
// Note: this function assumes that word_res, best_choice_bundle arguments
// are not nullptr.
void SegSearch(WERD_RES *word_res, BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle);
// Setup and run just the initial segsearch on an established matrix,
// without doing any additional chopping or joining.
// (Internal factored version that can be used as part of the main SegSearch.)
void InitialSegSearch(WERD_RES *word_res, LMPainPoints *pain_points,
std::vector<SegSearchPending> *pending,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle);
// chop.cpp
PRIORITY point_priority(EDGEPT *point);
void add_point_to_list(PointHeap *point_heap, EDGEPT *point);
// Returns true if the edgept supplied as input is an inside angle. This
// is determined by the angular change of the vectors from point to point.
bool is_inside_angle(EDGEPT *pt);
int angle_change(EDGEPT *point1, EDGEPT *point2, EDGEPT *point3);
EDGEPT *pick_close_point(EDGEPT *critical_point, EDGEPT *vertical_point, int *best_dist);
void prioritize_points(TESSLINE *outline, PointHeap *points);
void new_min_point(EDGEPT *local_min, PointHeap *points);
void new_max_point(EDGEPT *local_max, PointHeap *points);
void vertical_projection_point(EDGEPT *split_point, EDGEPT *target_point, EDGEPT **best_point,
EDGEPT_CLIST *new_points);
// chopper.cpp
SEAM *attempt_blob_chop(TWERD *word, TBLOB *blob, int32_t blob_number, bool italic_blob,
const std::vector<SEAM *> &seams);
SEAM *chop_numbered_blob(TWERD *word, int32_t blob_number, bool italic_blob,
const std::vector<SEAM *> &seams);
SEAM *chop_overlapping_blob(const std::vector<TBOX> &boxes, bool italic_blob, WERD_RES *word_res,
unsigned *blob_number);
SEAM *improve_one_blob(const std::vector<BLOB_CHOICE *> &blob_choices, DANGERR *fixpt,
bool split_next_to_fragment, bool italic_blob, WERD_RES *word,
unsigned *blob_number);
SEAM *chop_one_blob(const std::vector<TBOX> &boxes,
const std::vector<BLOB_CHOICE *> &blob_choices, WERD_RES *word_res,
unsigned *blob_number);
void chop_word_main(WERD_RES *word);
void improve_by_chopping(float rating_cert_scale, WERD_RES *word,
BestChoiceBundle *best_choice_bundle, BlamerBundle *blamer_bundle,
LMPainPoints *pain_points, std::vector<SegSearchPending> *pending);
int select_blob_to_split(const std::vector<BLOB_CHOICE *> &blob_choices, float rating_ceiling,
bool split_next_to_fragment);
int select_blob_to_split_from_fixpt(DANGERR *fixpt);
// findseam.cpp
void add_seam_to_queue(float new_priority, SEAM *new_seam, SeamQueue *seams);
void choose_best_seam(SeamQueue *seam_queue, const SPLIT *split, PRIORITY priority,
SEAM **seam_result, TBLOB *blob, SeamPile *seam_pile);
void combine_seam(const SeamPile &seam_pile, const SEAM *seam, SeamQueue *seam_queue);
SEAM *pick_good_seam(TBLOB *blob);
void try_point_pairs(EDGEPT *points[MAX_NUM_POINTS], int16_t num_points, SeamQueue *seam_queue,
SeamPile *seam_pile, SEAM **seam, TBLOB *blob);
void try_vertical_splits(EDGEPT *points[MAX_NUM_POINTS], int16_t num_points,
EDGEPT_CLIST *new_points, SeamQueue *seam_queue, SeamPile *seam_pile,
SEAM **seam, TBLOB *blob);
// gradechop.cpp
PRIORITY grade_split_length(SPLIT *split);
PRIORITY grade_sharpness(SPLIT *split);
// outlines.cpp
bool near_point(EDGEPT *point, EDGEPT *line_pt_0, EDGEPT *line_pt_1, EDGEPT **near_pt);
// pieces.cpp
virtual BLOB_CHOICE_LIST *classify_piece(const std::vector<SEAM *> &seams, int16_t start,
int16_t end, const char *description, TWERD *word,
BlamerBundle *blamer_bundle);
// Member variables.
std::unique_ptr<LanguageModel> language_model_;
PRIORITY pass2_ok_split;
// Stores the best choice for the previous word in the paragraph.
// This variable is modified by PAGE_RES_IT when iterating over
// words to OCR on the page.
WERD_CHOICE *prev_word_best_choice_;
// Function used to fill char choice lattices.
void (Wordrec::*fill_lattice_)(const MATRIX &ratings, const WERD_CHOICE_LIST &best_choices,
const UNICHARSET &unicharset, BlamerBundle *blamer_bundle);
protected:
inline bool SegSearchDone(int num_futile_classifications) {
return (language_model_->AcceptableChoiceFound() ||
num_futile_classifications >= segsearch_max_futile_classifications);
}
// Updates the language model state recorded for the child entries specified
// in pending[starting_col]. Enqueues the children of the updated entries
// into pending and proceeds to update (and remove from pending) all the
// remaining entries in pending[col] (col >= starting_col). Upon termination
// of this function all the pending[col] lists will be empty.
//
// The arguments:
//
// starting_col: index of the column in chunks_record->ratings from
// which the update should be started
//
// pending: list of entries listing chunks_record->ratings entries
// that should be updated
//
// pain_points: priority heap listing the pain points generated by
// the language model
//
// temp_pain_points: temporary storage for tentative pain points generated
// by the language model after a single call to LanguageModel::UpdateState()
// (the argument is passed in rather than created before each
// LanguageModel::UpdateState() call to avoid dynamic memory re-allocation)
//
// best_choice_bundle: a collection of variables that should be updated
// if a new best choice is found
//
void UpdateSegSearchNodes(float rating_cert_scale, int starting_col,
std::vector<SegSearchPending> *pending, WERD_RES *word_res,
LMPainPoints *pain_points, BestChoiceBundle *best_choice_bundle,
BlamerBundle *blamer_bundle);
// Process the given pain point: classify the corresponding blob, enqueue
// new pain points to join the newly classified blob with its neighbors.
void ProcessSegSearchPainPoint(float pain_point_priority, const MATRIX_COORD &pain_point,
const char *pain_point_type,
std::vector<SegSearchPending> *pending, WERD_RES *word_res,
LMPainPoints *pain_points, BlamerBundle *blamer_bundle);
// Resets enough of the results so that the Viterbi search is re-run.
// Needed when the n-gram model is enabled, as the multi-length comparison
// implementation will re-value existing paths to worse values.
void ResetNGramSearch(WERD_RES *word_res, BestChoiceBundle *best_choice_bundle,
std::vector<SegSearchPending> &pending);
// Add pain points for classifying blobs on the correct segmentation path
// (so that we can evaluate correct segmentation path and discover the reason
// for incorrect result).
void InitBlamerForSegSearch(WERD_RES *word_res, LMPainPoints *pain_points,
BlamerBundle *blamer_bundle, std::string &blamer_debug);
};
} // namespace tesseract
#endif // DISABLED_LEGACY_ENGINE
#endif // TESSERACT_WORDREC_WORDREC_H_
|
2301_81045437/tesseract
|
src/wordrec/wordrec.h
|
C++
|
apache-2.0
| 19,445
|
void build(Solution &s)
{
auto &tess = s.addProject("google.tesseract", "main");
tess += Git("https://github.com/tesseract-ocr/tesseract", "", "{v}");
auto cppstd = cpp17;
auto &libtesseract = tess.addTarget<LibraryTarget>("libtesseract");
{
libtesseract.setChecks("libtesseract");
libtesseract.PackageDefinitions = true;
libtesseract += cppstd;
libtesseract += "TESS_API"_api;
libtesseract += "include/.*"_rr;
libtesseract += "src/.+/.*"_rr;
libtesseract -= "src/lstm/.*\\.cc"_rr;
libtesseract -= "src/training/.*"_rr;
libtesseract.Public += "include"_idir;
libtesseract.Protected +=
"src/ccmain"_id,
"src/api"_id,
"src/dict"_id,
"src/viewer"_id,
"src/wordrec"_id,
"src/ccstruct"_id,
"src/cutil"_id,
"src/textord"_id,
"src/ccutil"_id,
"src/lstm"_id,
"src/classify"_id,
"src/arch"_id,
"src/training"_id;
if (libtesseract.getCompilerType() == CompilerType::MSVC ||
libtesseract.getCompilerType() == CompilerType::ClangCl)
{
libtesseract += "__SSE4_1__"_def;
libtesseract.CompileOptions.push_back("-arch:AVX2");
// openmp
//if (libtesseract.getOptions()["openmp"] == "true")
if (0)
{
if (libtesseract.getCompilerType() == CompilerType::MSVC)
libtesseract.CompileOptions.push_back("-openmp");
else
libtesseract.CompileOptions.push_back("-fopenmp");
libtesseract += "_OPENMP=201107"_def;
if (libtesseract.getBuildSettings().Native.ConfigurationType == ConfigurationType::Debug)
libtesseract += "vcompd.lib"_slib;
else
libtesseract += "vcomp.lib"_slib;
}
}
auto win_or_mingw =
libtesseract.getBuildSettings().TargetOS.Type == OSType::Windows ||
libtesseract.getBuildSettings().TargetOS.Type == OSType::Mingw
;
// check fma flags
libtesseract -= "src/arch/dotproductfma.cpp";
// check arch (arm)
libtesseract -= "src/arch/dotproductneon.cpp";
if (libtesseract.getBuildSettings().TargetOS.Type != OSType::Windows &&
libtesseract.getBuildSettings().TargetOS.Arch != ArchType::aarch64)
{
libtesseract["src/arch/dotproductavx.cpp"].args.push_back("-mavx");
libtesseract["src/arch/dotproductavx512.cpp"].args.push_back("-mavx512f");
libtesseract["src/arch/dotproductsse.cpp"].args.push_back("-msse4.1");
libtesseract["src/arch/intsimdmatrixsse.cpp"].args.push_back("-msse4.1");
libtesseract["src/arch/intsimdmatrixavx2.cpp"].args.push_back("-mavx2");
}
if (!win_or_mingw)
{
#if SW_MODULE_ABI_VERSION > 29
if (!libtesseract.getBuildSettings().TargetOS.Android)
#endif
libtesseract += "pthread"_slib;
}
if (libtesseract.getBuildSettings().TargetOS.Arch == ArchType::aarch64)
{
libtesseract += "src/arch/dotproductneon.cpp";
}
libtesseract.Public += "HAVE_CONFIG_H"_d;
libtesseract.Public += "_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS=1"_d;
libtesseract.Public += "HAVE_LIBARCHIVE"_d;
libtesseract.Public += "org.sw.demo.danbloomberg.leptonica"_dep;
libtesseract.Public += "org.sw.demo.libarchive.libarchive"_dep;
if (win_or_mingw)
{
libtesseract.Public += "ws2_32.lib"_slib;
libtesseract.Protected += "NOMINMAX"_def;
}
if (libtesseract.getCompilerType() == CompilerType::MSVC)
libtesseract.Protected.CompileOptions.push_back("-utf-8");
libtesseract.Variables["TESSERACT_MAJOR_VERSION"] = libtesseract.Variables["PACKAGE_MAJOR_VERSION"];
libtesseract.Variables["TESSERACT_MINOR_VERSION"] = libtesseract.Variables["PACKAGE_MINOR_VERSION"];
libtesseract.Variables["TESSERACT_MICRO_VERSION"] = libtesseract.Variables["PACKAGE_PATCH_VERSION"];
libtesseract.Variables["TESSERACT_VERSION_STR"] = "master";
libtesseract.configureFile("include/tesseract/version.h.in", "tesseract/version.h");
}
//
auto &tesseract = tess.addExecutable("tesseract");
{
tesseract += cppstd;
tesseract += "src/tesseract.cpp";
tesseract += libtesseract;
}
auto &svpaint = tess.addExecutable("svpaint");
{
svpaint += cppstd;
svpaint += "src/svpaint.cpp";
svpaint += libtesseract;
}
auto &training = tess.addDirectory("training");
//
auto &common_training = training.addLibrary("common_training");
{
common_training += "TESS_COMMON_TRAINING_API"_api;
common_training += cppstd;
common_training += "src/training/common/.*"_rr;
common_training.Public += "src/training/common"_idir;
common_training.Public += libtesseract;
}
//
auto &unicharset_training = training.addLibrary("unicharset_training");
{
unicharset_training += "TESS_UNICHARSET_TRAINING_API"_api;
unicharset_training += cppstd;
unicharset_training += "src/training/unicharset/.*"_rr;
unicharset_training.Public += "src/training/unicharset"_idir;
unicharset_training.Public += common_training;
unicharset_training.Public += "org.sw.demo.unicode.icu.i18n"_dep;
auto win_or_mingw =
unicharset_training.getBuildSettings().TargetOS.Type == OSType::Windows ||
unicharset_training.getBuildSettings().TargetOS.Type == OSType::Mingw
;
if (!win_or_mingw)
unicharset_training += "pthread"_slib;
}
//
#define ADD_EXE(n, ...) \
auto &n = training.addExecutable(#n); \
n += cppstd; \
n += "src/training/" #n ".*"_rr; \
n.Public += __VA_ARGS__; \
n
ADD_EXE(ambiguous_words, common_training);
ADD_EXE(classifier_tester, common_training);
ADD_EXE(combine_lang_model, unicharset_training);
ADD_EXE(combine_tessdata, common_training);
ADD_EXE(cntraining, common_training);
ADD_EXE(dawg2wordlist, common_training);
ADD_EXE(mftraining, common_training) += "src/training/mergenf.*"_rr;
ADD_EXE(shapeclustering, common_training);
ADD_EXE(unicharset_extractor, unicharset_training);
ADD_EXE(wordlist2dawg, common_training);
ADD_EXE(lstmeval, unicharset_training);
ADD_EXE(lstmtraining, unicharset_training);
ADD_EXE(set_unicharset_properties, unicharset_training);
ADD_EXE(merge_unicharsets, common_training);
//
auto &pango_training = training.addLibrary("pango_training");
{
pango_training += "TESS_PANGO_TRAINING_API"_api;
pango_training += cppstd;
pango_training += "src/training/pango/.*"_rr;
pango_training.Public += "src/training/pango"_idir;
pango_training.Public += unicharset_training;
pango_training.Public += "org.sw.demo.gnome.pango.pangocairo"_dep;
}
ADD_EXE(text2image, pango_training);
{
text2image += cppstd;
text2image +=
"src/training/degradeimage.cpp",
"src/training/degradeimage.h",
"src/training/text2image.cpp"
;
}
if (!s.getExternalVariables()["with-tests"])
return;
// tests
{
auto &test = tess.addDirectory("test");
test.Scope = TargetScope::Test;
String skipped_tests_str;
if (s.getExternalVariables()["skip-tests"])
skipped_tests_str = s.getExternalVariables()["skip-tests"].getValue();
auto skipped_tests = split_string(skipped_tests_str, ",");
auto add_test = [&test, &s, &cppstd, &libtesseract, &pango_training, &skipped_tests](const String &name) -> decltype(auto)
{
auto &t = test.addTarget<ExecutableTarget>(name);
t += cppstd;
t += FileRegex("unittest", name + "_test.*", false);
t += "unittest"_idir;
t += "SW_TESTING"_def;
auto datadir = test.SourceDir / "tessdata_unittest";
if (s.getExternalVariables()["test-data-dir"])
datadir = fs::current_path() / s.getExternalVariables()["test-data-dir"].getValue();
t += Definition("TESSBIN_DIR=\"" + ""s + "\"");
t += Definition("TESTING_DIR=\"" + to_printable_string(normalize_path(test.SourceDir / "test/testing")) + "\"");
t += Definition("TESTDATA_DIR=\"" + to_printable_string(normalize_path(test.SourceDir / "test/testdata")) + "\"");
t += Definition("LANGDATA_DIR=\"" + to_printable_string(normalize_path(datadir / "langdata_lstm")) + "\"");
t += Definition("TESSDATA_DIR=\"" + to_printable_string(normalize_path(datadir / "tessdata")) + "\"");
t += Definition("TESSDATA_BEST_DIR=\"" + to_printable_string(normalize_path(datadir / "tessdata_best")) + "\"");
// we push all deps to all tests simplify things
t += pango_training;
t += "org.sw.demo.google.googletest.gmock.main"_dep;
t += "org.sw.demo.google.googletest.gtest.main"_dep;
if (t.getCompilerType() == CompilerType::MSVC)
t.CompileOptions.push_back("-utf-8");
auto win_or_mingw =
t.getBuildSettings().TargetOS.Type == OSType::Windows ||
t.getBuildSettings().TargetOS.Type == OSType::Mingw
;
if (!win_or_mingw)
t += "pthread"_slib;
auto tst = libtesseract.addTest(t, name);
for (auto &st : skipped_tests)
{
std::regex r(st);
if (std::regex_match(name, r))
{
tst.skip(true);
break;
}
}
return t;
};
Strings tests
{
"apiexample",
"applybox",
"baseapi",
"baseapi_thread",
"bitvector",
"capiexample",
"capiexample_c",
"cleanapi",
"colpartition",
"commandlineflags",
"denorm",
"equationdetect",
"fileio",
"heap",
"imagedata",
"indexmapbidi",
"intfeaturemap",
"intsimdmatrix",
"lang_model",
"layout",
"ligature_table",
"linlsq",
"list",
"lstm_recode",
"lstm_squashed",
"lstm",
"lstmtrainer",
"loadlang",
"mastertrainer",
"matrix",
"networkio",
"normstrngs",
"nthitem",
"osd",
"pagesegmode",
"pango_font_info",
"paragraphs",
"params_model",
"progress",
"qrsequence",
"recodebeam",
"rect",
"resultiterator",
"scanutils",
"shapetable",
"stats",
"stringrenderer",
"stridemap",
"tablefind",
"tablerecog",
"tabvector",
"textlineprojection",
"tfile",
"unichar",
"unicharcompress",
"unicharset",
"validate_grapheme",
"validate_indic",
"validate_khmer",
"validate_myanmar",
"validator",
};
for (auto t : tests)
add_test(t);
auto &dt = add_test("dawg");
dt += Definition("wordlist2dawg_prog=\"" + to_printable_string(normalize_path(wordlist2dawg.getOutputFile())) + "\"");
dt += Definition("dawg2wordlist_prog=\"" + to_printable_string(normalize_path(dawg2wordlist.getOutputFile())) + "\"");
auto &tw = add_test("tatweel");
tw += "unittest/util/.*"_rr;
tw += "unittest/third_party/.*"_rr;
tw -= "unittest/third_party/googletest/.*"_rr;
}
}
void check(Checker &c)
{
auto &s = c.addSet("libtesseract");
s.checkFunctionExists("getline");
s.checkIncludeExists("dlfcn.h");
s.checkIncludeExists("inttypes.h");
s.checkIncludeExists("memory.h");
s.checkIncludeExists("stdint.h");
s.checkIncludeExists("stdlib.h");
s.checkIncludeExists("string.h");
s.checkIncludeExists("sys/stat.h");
s.checkIncludeExists("sys/types.h");
s.checkIncludeExists("tiffio.h");
s.checkIncludeExists("unistd.h");
s.checkTypeSize("long long int");
s.checkTypeSize("size_t");
s.checkTypeSize("void *");
s.checkTypeSize("wchar_t");
{
auto &c = s.checkSymbolExists("snprintf");
c.Parameters.Includes.push_back("stdio.h");
}
}
|
2301_81045437/tesseract
|
sw.cpp
|
C++
|
apache-2.0
| 13,061
|
datadir = @datadir@/tessdata
data_DATA = pdf.ttf
EXTRA_DIST = $(data_DATA)
SUBDIRS = configs tessconfigs
langdata =
uninstall-local:
cd $(DESTDIR)$(datadir); \
rm -f $(langdata)
|
2301_81045437/tesseract
|
tessdata/Makefile.am
|
Makefile
|
apache-2.0
| 184
|
datadir = @datadir@/tessdata/configs
data_DATA = inter makebox box.train unlv ambigs.train lstm.train lstmdebug
data_DATA += api_config kannada box.train.stderr quiet logfile digits get.images
data_DATA += lstmbox wordstrbox
# Configurations for OCR output.
data_DATA += alto hocr page pdf tsv txt
data_DATA += linebox rebox strokewidth bigram
EXTRA_DIST = $(data_DATA)
|
2301_81045437/tesseract
|
tessdata/configs/Makefile.am
|
Makefile
|
apache-2.0
| 370
|
datadir = @datadir@/tessdata/tessconfigs
data_DATA = batch batch.nochop nobatch matdemo segdemo msdemo
EXTRA_DIST = batch batch.nochop nobatch matdemo segdemo msdemo
|
2301_81045437/tesseract
|
tessdata/tessconfigs/Makefile.am
|
Makefile
|
apache-2.0
| 166
|
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=@CMAKE_INSTALL_PREFIX@
libdir=@CMAKE_INSTALL_FULL_LIBDIR@
includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
Name: @tesseract_NAME@
Description: An OCR Engine that was developed at HP Labs between 1985 and 1995... and now at Google.
URL: https://github.com/tesseract-ocr/tesseract
Version: @tesseract_VERSION@
Requires.private: lept
Libs: -L${libdir} -l@tesseract_OUTPUT_NAME@ @libarchive_LIBS@ @libcurl_LIBS@ @TENSORFLOW_LIBS@
Libs.private:
Cflags: -I${includedir}
|
2301_81045437/tesseract
|
tesseract.pc.cmake
|
CMake
|
apache-2.0
| 501
|
///////////////////////////////////////////////////////////////////////
// File: apiexample_test.cc
// Description: Api Test for Tesseract using text fixtures and parameters.
// Tests for Devanagari, Latin and Arabic scripts are disabled by default.
// Disabled tests can be run when required by using the
// --gtest_also_run_disabled_tests argument.
// ./unittest/apiexample_test --gtest_also_run_disabled_tests
//
// Author: ShreeDevi Kumar
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
// expects clone of tessdata_fast repo in ../../tessdata_fast
//#include "log.h"
#include <allheaders.h>
#include <tesseract/baseapi.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <locale>
#include <memory> // std::unique_ptr
#include <string>
#include "include_gunit.h"
#include "image.h"
namespace tesseract {
class QuickTest : public testing::Test {
protected:
void SetUp() override {
start_time_ = time(nullptr);
}
void TearDown() override {
#ifndef NDEBUG
// Debug builds can be very slow, so allow 4 min for OCR of a test image.
// apitest_example including disabled tests takes about 18 min on ARMv7.
const time_t MAX_SECONDS_FOR_TEST = 240;
#else
// Release builds typically need less than 10 s for OCR of a test image,
// apitest_example including disabled tests takes about 90 s on ARMv7.
const time_t MAX_SECONDS_FOR_TEST = 55;
#endif
const time_t end_time = time(nullptr);
EXPECT_TRUE(end_time - start_time_ <= MAX_SECONDS_FOR_TEST)
<< "The test took too long - " << ::testing::PrintToString(end_time - start_time_);
}
time_t start_time_;
};
void OCRTester(const char *imgname, const char *groundtruth, const char *tessdatadir,
const char *lang) {
// log.info() << tessdatadir << " for language: " << lang << std::endl;
char *outText;
std::locale loc("C"); // You can also use "" for the default system locale
std::ifstream file(groundtruth);
file.imbue(loc); // Use it for file input
std::string gtText((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
auto api = std::make_unique<tesseract::TessBaseAPI>();
ASSERT_FALSE(api->Init(tessdatadir, lang)) << "Could not initialize tesseract.";
Image image = pixRead(imgname);
ASSERT_TRUE(image != nullptr) << "Failed to read test image.";
api->SetImage(image);
outText = api->GetUTF8Text();
EXPECT_EQ(gtText, outText) << "Phototest.tif OCR does not match ground truth for "
<< ::testing::PrintToString(lang);
api->End();
api->ClearPersistentCache();
delete[] outText;
image.destroy();
}
class MatchGroundTruth : public QuickTest, public ::testing::WithParamInterface<const char *> {};
TEST_P(MatchGroundTruth, FastPhototestOCR) {
OCRTester(TESTING_DIR "/phototest.tif", TESTING_DIR "/phototest.txt", TESSDATA_DIR "_fast",
GetParam());
}
TEST_P(MatchGroundTruth, BestPhototestOCR) {
OCRTester(TESTING_DIR "/phototest.tif", TESTING_DIR "/phototest.txt", TESSDATA_DIR "_best",
GetParam());
}
TEST_P(MatchGroundTruth, TessPhototestOCR) {
OCRTester(TESTING_DIR "/phototest.tif", TESTING_DIR "/phototest.txt", TESSDATA_DIR, GetParam());
}
INSTANTIATE_TEST_SUITE_P(Eng, MatchGroundTruth, ::testing::Values("eng"));
INSTANTIATE_TEST_SUITE_P(DISABLED_Latin, MatchGroundTruth, ::testing::Values("script/Latin"));
INSTANTIATE_TEST_SUITE_P(DISABLED_Deva, MatchGroundTruth, ::testing::Values("script/Devanagari"));
INSTANTIATE_TEST_SUITE_P(DISABLED_Arabic, MatchGroundTruth, ::testing::Values("script/Arabic"));
class EuroText : public QuickTest {};
TEST_F(EuroText, FastLatinOCR) {
OCRTester(TESTING_DIR "/eurotext.tif", TESTING_DIR "/eurotext.txt", TESSDATA_DIR "_fast",
"script/Latin");
}
// script/Latin for eurotext.tif does not match groundtruth
// for tessdata & tessdata_best.
// so do not test these here.
} // namespace tesseract
|
2301_81045437/tesseract
|
unittest/apiexample_test.cc
|
C++
|
apache-2.0
| 4,513
|
// (C) Copyright 2017, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <allheaders.h>
#include <tesseract/baseapi.h>
#include <tesseract/resultiterator.h>
#include <string>
#include "boxread.h"
#include "rect.h"
#include "include_gunit.h"
namespace tesseract {
const char *kTruthTextWords = "To simple burn running of goods lately.\n";
const char *kTruthTextLine = "Tosimpleburnrunningofgoodslately.\n";
// The fixture for testing Tesseract.
class ApplyBoxTest : public testing::Test {
protected:
std::string TestDataNameToPath(const std::string &name) {
return file::JoinPath(TESTING_DIR, name);
}
std::string TessdataPath() {
return TESSDATA_DIR;
}
ApplyBoxTest() {
src_pix_ = nullptr;
}
~ApplyBoxTest() override {
src_pix_.destroy();
}
bool SetImage(const char *filename) {
bool found = false;
src_pix_.destroy();
src_pix_ = pixRead(TestDataNameToPath(filename).c_str());
if (api_.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) != -1) {
api_.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
api_.SetImage(src_pix_);
api_.SetVariable("tessedit_make_boxes_from_boxes", "1");
api_.SetInputName(TestDataNameToPath(filename).c_str());
found = true;
}
return found;
}
// Runs ApplyBoxes (via setting the appropriate variables and Recognize)
// and checks that the output ocr text matches the truth_str, and that
// the boxes match the given box file well enough.
// If line_mode is true, ApplyBoxes is run in line segmentation mode,
// otherwise the input box file is assumed to have character-level boxes.
void VerifyBoxesAndText(const char *imagefile, const char *truth_str, const char *target_box_file,
bool line_mode) {
if (!SetImage(imagefile)) {
// eng.traineddata not found or other problem during Init.
GTEST_SKIP();
}
if (line_mode) {
api_.SetVariable("tessedit_resegment_from_line_boxes", "1");
} else {
api_.SetVariable("tessedit_resegment_from_boxes", "1");
}
api_.Recognize(nullptr);
char *ocr_text = api_.GetUTF8Text();
EXPECT_STREQ(truth_str, ocr_text);
delete[] ocr_text;
// Test the boxes by reading the target box file in parallel with the
// bounding boxes in the ocr output.
std::string box_filename = TestDataNameToPath(target_box_file);
FILE *box_file = OpenBoxFile(box_filename.c_str());
ASSERT_TRUE(box_file != nullptr);
int height = pixGetHeight(src_pix_);
ResultIterator *it = api_.GetIterator();
do {
int left, top, right, bottom;
EXPECT_TRUE(it->BoundingBox(tesseract::RIL_SYMBOL, &left, &top, &right, &bottom));
TBOX ocr_box(ICOORD(left, height - bottom), ICOORD(right, height - top));
int line_number = 0;
TBOX truth_box;
std::string box_text;
EXPECT_TRUE(ReadNextBox(0, &line_number, box_file, box_text, &truth_box));
// Testing for major overlap is a bit weak, but if they all
// major overlap successfully, then it has to be fairly close.
EXPECT_TRUE(ocr_box.major_overlap(truth_box));
// Also check that the symbol text matches the box text.
char *symbol_text = it->GetUTF8Text(tesseract::RIL_SYMBOL);
EXPECT_STREQ(box_text.c_str(), symbol_text);
delete[] symbol_text;
} while (it->Next(tesseract::RIL_SYMBOL));
delete it;
}
Image src_pix_;
std::string ocr_text_;
tesseract::TessBaseAPI api_;
};
// Tests character-level applyboxes on normal Times New Roman.
TEST_F(ApplyBoxTest, TimesCharLevel) {
VerifyBoxesAndText("trainingtimes.tif", kTruthTextWords, "trainingtimes.box", false);
}
// Tests character-level applyboxes on italic Times New Roman.
TEST_F(ApplyBoxTest, ItalicCharLevel) {
VerifyBoxesAndText("trainingital.tif", kTruthTextWords, "trainingital.box", false);
}
// Tests line-level applyboxes on normal Times New Roman.
TEST_F(ApplyBoxTest, TimesLineLevel) {
VerifyBoxesAndText("trainingtimesline.tif", kTruthTextLine, "trainingtimes.box", true);
}
// Tests line-level applyboxes on italic Times New Roman.
TEST_F(ApplyBoxTest, ItalLineLevel) {
VerifyBoxesAndText("trainingitalline.tif", kTruthTextLine, "trainingital.box", true);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
unittest/applybox_test.cc
|
C++
|
apache-2.0
| 4,778
|
// (C) Copyright 2017, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include_gunit.h"
#include "cycletimer.h" // for CycleTimer
#include "log.h" // for LOG
#include "ocrblock.h" // for class BLOCK
#include "pageres.h"
#include <tesseract/baseapi.h>
#include <allheaders.h>
#include "gmock/gmock-matchers.h"
#include <memory>
#include <regex>
#include <string>
#include <vector>
namespace tesseract {
using ::testing::ContainsRegex;
using ::testing::HasSubstr;
static const char *langs[] = {"eng", "vie", "hin", "ara", nullptr};
static const char *image_files[] = {"HelloGoogle.tif", "viet.tif", "raaj.tif", "arabic.tif",
nullptr};
static const char *gt_text[] = {"Hello Google", "\x74\x69\xe1\xba\xbf\x6e\x67",
"\xe0\xa4\xb0\xe0\xa4\xbe\xe0\xa4\x9c",
"\xd8\xa7\xd9\x84\xd8\xb9\xd8\xb1\xd8\xa8\xd9\x8a", nullptr};
class FriendlyTessBaseAPI : public tesseract::TessBaseAPI {
FRIEND_TEST(TesseractTest, LSTMGeometryTest);
};
std::string GetCleanedTextResult(tesseract::TessBaseAPI *tess, Image pix) {
tess->SetImage(pix);
char *result = tess->GetUTF8Text();
std::string ocr_result = result;
delete[] result;
trim(ocr_result);
return ocr_result;
}
// The fixture for testing Tesseract.
class TesseractTest : public testing::Test {
protected:
static std::string TestDataNameToPath(const std::string &name) {
return file::JoinPath(TESTING_DIR, name);
}
static std::string TessdataPath() {
return TESSDATA_DIR;
}
};
// Test static TessBaseAPI (like it is used by tesserocr).
TEST_F(TesseractTest, StaticTessBaseAPI) {
static tesseract::TessBaseAPI api;
api.End();
}
// Tests that Tesseract gets exactly the right answer on phototest.
TEST_F(TesseractTest, BasicTesseractTest) {
tesseract::TessBaseAPI api;
std::string truth_text;
std::string ocr_text;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) != -1) {
Image src_pix = pixRead(TestDataNameToPath("phototest.tif").c_str());
CHECK(src_pix);
ocr_text = GetCleanedTextResult(&api, src_pix);
CHECK_OK(
file::GetContents(TestDataNameToPath("phototest.gold.txt"), &truth_text, file::Defaults()));
trim(truth_text);
EXPECT_STREQ(truth_text.c_str(), ocr_text.c_str());
src_pix.destroy();
} else {
// eng.traineddata not found.
GTEST_SKIP();
}
}
// Test that api.GetComponentImages() will return a set of images for
// paragraphs even if text recognition was not run.
TEST_F(TesseractTest, IteratesParagraphsEvenIfNotDetected) {
tesseract::TessBaseAPI api;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) != -1) {
api.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
api.SetVariable("paragraph_debug_level", "3");
#if 0 // TODO: b622.png is missing
Pix* src_pix = pixRead(TestDataNameToPath("b622.png").c_str());
CHECK(src_pix);
api.SetImage(src_pix);
Boxa* para_boxes =
api.GetComponentImages(tesseract::RIL_PARA, true, nullptr, nullptr);
EXPECT_TRUE(para_boxes != nullptr);
Boxa* block_boxes =
api.GetComponentImages(tesseract::RIL_BLOCK, true, nullptr, nullptr);
EXPECT_TRUE(block_boxes != nullptr);
// TODO(eger): Get paragraphs out of this page pre-text.
EXPECT_GE(boxaGetCount(para_boxes), boxaGetCount(block_boxes));
boxaDestroy(&block_boxes);
boxaDestroy(¶_boxes);
src_pix.destroy();
#endif
} else {
// eng.traineddata not found.
GTEST_SKIP();
}
}
// We should get hOCR output and not seg fault, even if the api caller doesn't
// call SetInputName().
TEST_F(TesseractTest, HOCRWorksWithoutSetInputName) {
tesseract::TessBaseAPI api;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) == -1) {
// eng.traineddata not found.
GTEST_SKIP();
}
Image src_pix = pixRead(TestDataNameToPath("HelloGoogle.tif").c_str());
CHECK(src_pix);
api.SetImage(src_pix);
char *result = api.GetHOCRText(0);
EXPECT_TRUE(result != nullptr);
EXPECT_THAT(result, HasSubstr("Hello"));
EXPECT_THAT(result, HasSubstr("<div class='ocr_page'"));
delete[] result;
src_pix.destroy();
}
// hOCR output should contain baseline info for upright textlines.
TEST_F(TesseractTest, HOCRContainsBaseline) {
tesseract::TessBaseAPI api;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) == -1) {
// eng.traineddata not found.
GTEST_SKIP();
}
Image src_pix = pixRead(TestDataNameToPath("HelloGoogle.tif").c_str());
CHECK(src_pix);
api.SetInputName("HelloGoogle.tif");
api.SetImage(src_pix);
char *result = api.GetHOCRText(0);
EXPECT_TRUE(result != nullptr);
EXPECT_THAT(result, HasSubstr("Hello"));
EXPECT_TRUE(std::regex_search(
result, std::regex{"<span class='ocr_line'[^>]* baseline [-.0-9]+ [-.0-9]+"}));
delete[] result;
src_pix.destroy();
}
// Tests that Tesseract gets exactly the right answer on some page numbers.
TEST_F(TesseractTest, AdaptToWordStrTest) {
#ifdef DISABLED_LEGACY_ENGINE
// Skip test because TessBaseAPI::AdaptToWordStr is missing.
GTEST_SKIP();
#else
static const char *kTrainingPages[] = {"136.tif", "256.tif", "410.tif", "432.tif", "540.tif",
"692.tif", "779.tif", "793.tif", "808.tif", "815.tif",
"12.tif", "12.tif", nullptr};
static const char *kTrainingText[] = {"1 3 6", "2 5 6", "4 1 0", "4 3 2", "5 4 0",
"6 9 2", "7 7 9", "7 9 3", "8 0 8", "8 1 5",
"1 2", "1 2", nullptr};
static const char *kTestPages[] = {"324.tif", "433.tif", "12.tif", nullptr};
static const char *kTestText[] = {"324", "433", "12", nullptr};
tesseract::TessBaseAPI api;
std::string truth_text;
std::string ocr_text;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_TESSERACT_ONLY) == -1) {
// eng.traineddata not found.
GTEST_SKIP();
}
api.SetVariable("matcher_sufficient_examples_for_prototyping", "1");
api.SetVariable("classify_class_pruner_threshold", "220");
// Train on the training text.
for (int i = 0; kTrainingPages[i] != nullptr; ++i) {
std::string image_file = TestDataNameToPath(kTrainingPages[i]);
Image src_pix = pixRead(image_file.c_str());
CHECK(src_pix);
api.SetImage(src_pix);
EXPECT_TRUE(api.AdaptToWordStr(tesseract::PSM_SINGLE_WORD, kTrainingText[i]))
<< "Failed to adapt to text \"" << kTrainingText[i] << "\" on image " << image_file;
src_pix.destroy();
}
// Test the test text.
api.SetVariable("tess_bn_matching", "1");
api.SetPageSegMode(tesseract::PSM_SINGLE_WORD);
for (int i = 0; kTestPages[i] != nullptr; ++i) {
Image src_pix = pixRead(TestDataNameToPath(kTestPages[i]).c_str());
CHECK(src_pix);
ocr_text = GetCleanedTextResult(&api, src_pix);
trim(truth_text);
EXPECT_STREQ(kTestText[i], ocr_text.c_str());
src_pix.destroy();
}
#endif
}
// Tests that LSTM gets exactly the right answer on phototest.
TEST_F(TesseractTest, BasicLSTMTest) {
tesseract::TessBaseAPI api;
std::string truth_text;
std::string ocr_text;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_LSTM_ONLY) == -1) {
// eng.traineddata not found.
GTEST_SKIP();
}
Image src_pix = pixRead(TestDataNameToPath("phototest_2.tif").c_str());
CHECK(src_pix);
ocr_text = GetCleanedTextResult(&api, src_pix);
CHECK_OK(
file::GetContents(TestDataNameToPath("phototest.gold.txt"), &truth_text, file::Defaults()));
trim(truth_text);
EXPECT_STREQ(truth_text.c_str(), ocr_text.c_str());
src_pix.destroy();
}
// Test that LSTM's character bounding boxes are properly converted to
// Tesseract structures. Note that we can't guarantee that LSTM's
// character boxes fall completely within Tesseract's word box because
// the baseline denormalization/normalization transforms may introduce
// errors due to float/int conversions (e.g., see OUTLINE::move() in
// ccstruct/poutline.h) Instead, we do a loose check.
TEST_F(TesseractTest, LSTMGeometryTest) {
Image src_pix = pixRead(TestDataNameToPath("deslant.tif").c_str());
FriendlyTessBaseAPI api;
if (api.Init(TessdataPath().c_str(), "eng", tesseract::OEM_LSTM_ONLY) == -1) {
// eng.traineddata not found.
GTEST_SKIP();
}
api.SetImage(src_pix);
ASSERT_EQ(api.Recognize(nullptr), 0);
const PAGE_RES *page_res = api.GetPageRes();
PAGE_RES_IT page_res_it(const_cast<PAGE_RES *>(page_res));
page_res_it.restart_page();
BLOCK *block = page_res_it.block()->block;
CHECK(block);
// extract word and character boxes for each word
for (page_res_it.restart_page(); page_res_it.word() != nullptr; page_res_it.forward()) {
WERD_RES *word = page_res_it.word();
CHECK(word);
CHECK(word->best_choice);
CHECK_GT(word->best_choice->length(), 0);
CHECK(word->word);
CHECK(word->box_word);
// tesseract's word box
TBOX tess_blob_box;
tess_blob_box = word->word->bounding_box();
tess_blob_box.rotate(block->re_rotation());
// verify that each of LSTM's character boxes lies close to within
// tesseract's word box
for (int i = 0; i < word->box_word->length(); ++i) {
TBOX lstm_blob_box = word->box_word->BlobBox(i);
// LSTM character box should not spill out of tesseract word box
// by more than a few pixels in any direction
EXPECT_LT(tess_blob_box.left() - lstm_blob_box.left(), 5);
EXPECT_LT(lstm_blob_box.right() - tess_blob_box.right(), 5);
EXPECT_LT(tess_blob_box.bottom() - lstm_blob_box.bottom(), 5);
EXPECT_LT(lstm_blob_box.top() - tess_blob_box.top(), 5);
}
}
src_pix.destroy();
}
TEST_F(TesseractTest, InitConfigOnlyTest) {
// Languages for testing initialization.
const char *langs[] = {"eng", "chi_tra", "jpn", "vie"};
std::unique_ptr<tesseract::TessBaseAPI> api;
CycleTimer timer;
for (auto &lang : langs) {
api = std::make_unique<tesseract::TessBaseAPI>();
timer.Restart();
EXPECT_EQ(0, api->Init(TessdataPath().c_str(), lang, tesseract::OEM_TESSERACT_ONLY));
timer.Stop();
LOG(INFO) << "Lang " << lang << " took " << timer.GetInMs() << "ms in regular init";
}
// Init variables to set for config-only initialization.
std::vector<std::string> vars_vec, vars_values;
vars_vec.emplace_back("tessedit_init_config_only");
vars_values.emplace_back("1");
LOG(INFO) << "Switching to config only initialization:";
for (auto &lang : langs) {
api = std::make_unique<tesseract::TessBaseAPI>();
timer.Restart();
EXPECT_EQ(0, api->Init(TessdataPath().c_str(), lang, tesseract::OEM_TESSERACT_ONLY, nullptr, 0,
&vars_vec, &vars_values, false));
timer.Stop();
LOG(INFO) << "Lang " << lang << " took " << timer.GetInMs() << "ms in config-only init";
}
}
// Tests if two instances of Tesseract/LSTM can co-exist in the same thread.
// NOTE: This is not an exhaustive test and current support for multiple
// instances in Tesseract is fragile. This test is intended largely as a means
// of detecting and guarding against the existing support being possibly broken
// by future CLs. TessBaseAPI instances are initialized using the default
// OEM_DEFAULT mode.
TEST(TesseractInstanceTest, TestMultipleTessInstances) {
int num_langs = 0;
while (langs[num_langs] != nullptr) {
++num_langs;
}
const std::string kTessdataPath = TESSDATA_DIR;
// Preload images and verify that OCR is correct on them individually.
std::vector<Image > pix(num_langs);
for (int i = 0; i < num_langs; ++i) {
std::string tracestring = "Single instance test with lang = ";
tracestring += langs[i];
SCOPED_TRACE(tracestring);
std::string path = file::JoinPath(TESTING_DIR, image_files[i]);
pix[i] = pixRead(path.c_str());
QCHECK(pix[i] != nullptr) << "Could not read " << path;
tesseract::TessBaseAPI tess;
EXPECT_EQ(0, tess.Init(kTessdataPath.c_str(), langs[i]));
std::string ocr_result = GetCleanedTextResult(&tess, pix[i]);
EXPECT_STREQ(gt_text[i], ocr_result.c_str());
}
// Process the images in all pairwise combinations of associated languages.
std::string ocr_result[2];
for (int i = 0; i < num_langs; ++i) {
for (int j = i + 1; j < num_langs; ++j) {
tesseract::TessBaseAPI tess1, tess2;
tess1.Init(kTessdataPath.c_str(), langs[i]);
tess2.Init(kTessdataPath.c_str(), langs[j]);
ocr_result[0] = GetCleanedTextResult(&tess1, pix[i]);
ocr_result[1] = GetCleanedTextResult(&tess2, pix[j]);
EXPECT_FALSE(strcmp(gt_text[i], ocr_result[0].c_str()) ||
strcmp(gt_text[j], ocr_result[1].c_str()))
<< "OCR failed on language pair " << langs[i] << "-" << langs[j];
}
}
for (int i = 0; i < num_langs; ++i) {
pix[i].destroy();
}
}
// Tests whether Tesseract parameters are correctly set for the two instances.
TEST(TesseractInstanceTest, TestMultipleTessInstanceVariables) {
std::string illegal_name = "an_illegal_name";
std::string langs[2] = {"eng", "hin"};
std::string int_param_name = "tessedit_pageseg_mode";
int int_param[2] = {1, 2};
std::string int_param_str[2] = {"1", "2"};
std::string bool_param_name = "tessedit_ambigs_training";
bool bool_param[2] = {false, true};
std::string bool_param_str[2] = {"F", "T"};
std::string str_param_name = "tessedit_char_blacklist";
std::string str_param[2] = {"abc", "def"};
std::string double_param_name = "segment_penalty_dict_frequent_word";
std::string double_param_str[2] = {"0.01", "2"};
double double_param[2] = {0.01, 2};
const std::string kTessdataPath = TESSDATA_DIR;
tesseract::TessBaseAPI tess1, tess2;
for (int i = 0; i < 2; ++i) {
tesseract::TessBaseAPI *api = (i == 0) ? &tess1 : &tess2;
api->Init(kTessdataPath.c_str(), langs[i].c_str());
api->SetVariable(illegal_name.c_str(), "none");
api->SetVariable(int_param_name.c_str(), int_param_str[i].c_str());
api->SetVariable(bool_param_name.c_str(), bool_param_str[i].c_str());
api->SetVariable(str_param_name.c_str(), str_param[i].c_str());
api->SetVariable(double_param_name.c_str(), double_param_str[i].c_str());
}
for (int i = 0; i < 2; ++i) {
tesseract::TessBaseAPI *api = (i == 0) ? &tess1 : &tess2;
EXPECT_FALSE(api->GetStringVariable(illegal_name.c_str()));
int intvar;
EXPECT_TRUE(api->GetIntVariable(int_param_name.c_str(), &intvar));
EXPECT_EQ(int_param[i], intvar);
bool boolvar;
EXPECT_TRUE(api->GetBoolVariable(bool_param_name.c_str(), &boolvar));
EXPECT_EQ(bool_param[i], boolvar);
EXPECT_STREQ(str_param[i].c_str(), api->GetStringVariable(str_param_name.c_str()));
double doublevar;
EXPECT_TRUE(api->GetDoubleVariable(double_param_name.c_str(), &doublevar));
EXPECT_EQ(double_param[i], doublevar);
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
unittest/baseapi_test.cc
|
C++
|
apache-2.0
| 15,476
|